From bedaa029b04e43dc01f56bb4ab99c23a81c4291b Mon Sep 17 00:00:00 2001 From: Peter Pan Date: Fri, 5 Jun 2020 15:07:41 +0800 Subject: [PATCH] feat: integrate netron (#651) * chore: update dependencies * feat: high-dimensional chart will fit screen size now * fix: scatter chart cannot be rendered properly * chore: separate api module * intergrate netron * intergrate netron * feat: graphs * style: fix lint * chore: update dependencies * fix: type error * chore: update dependencies * chore: update graph color --- frontend/.eslintrc.js | 4 +- frontend/package.json | 14 +- frontend/packages/cli/package.json | 10 +- frontend/packages/core/components/Aside.tsx | 57 + .../packages/core/components/AsideDivider.tsx | 19 - frontend/packages/core/components/Button.tsx | 42 +- .../packages/core/components/ChartToolbox.tsx | 25 +- frontend/packages/core/components/Content.tsx | 22 +- .../core/components/GraphsPage/Argument.tsx | 118 + .../core/components/GraphsPage/Graph.tsx | 233 + .../components/GraphsPage/GraphSidebar.tsx | 55 + .../GraphsPage/ModelPropertiesDialog.tsx | 88 + .../GraphsPage/NodeDocumentationSidebar.tsx | 187 + .../core/components/GraphsPage/NodeInfo.tsx | 78 - .../GraphsPage/NodePropertiesSidebar.tsx | 28 + .../core/components/GraphsPage/Properties.tsx | 50 + .../core/components/GraphsPage/Property.tsx | 54 + .../core/components/GraphsPage/Search.tsx | 218 + .../core/components/GraphsPage/Uploader.tsx | 116 + .../HighDimensionalChart.tsx | 10 +- frontend/packages/core/components/Icon.tsx | 5 +- frontend/packages/core/components/Input.tsx | 34 +- .../packages/core/components/LineChart.tsx | 33 +- .../packages/core/components/RunAside.tsx | 101 +- .../core/components/RunningToggle.tsx | 1 - .../components/SamplesPage/SampleChart.tsx | 8 +- .../components/ScalarsPage/ScalarChart.tsx | 1 + .../packages/core/components/ScatterChart.tsx | 10 +- .../packages/core/components/SearchInput.tsx | 53 +- frontend/packages/core/components/Select.tsx | 2 +- .../packages/core/hooks/useClickOutside.ts | 4 +- frontend/packages/core/hooks/useECharts.ts | 34 +- frontend/packages/core/hooks/useNavItems.ts | 3 +- frontend/packages/core/next.config.js | 2 +- frontend/packages/core/package.json | 33 +- frontend/packages/core/pages/_document.tsx | 14 +- frontend/packages/core/pages/graphs.tsx | 463 +- .../packages/core/pages/high-dimensional.tsx | 101 +- frontend/packages/core/pages/samples.tsx | 52 +- frontend/packages/core/pages/scalars.tsx | 85 +- .../core/public/locales/en/common.json | 10 +- .../core/public/locales/en/graphs.json | 62 +- .../core/public/locales/zh/common.json | 10 +- .../core/public/locales/zh/graphs.json | 62 +- .../core/public/netron/armnn-metadata.json | 476 + .../core/public/netron/armnn-schema.js | 15584 +++++ frontend/packages/core/public/netron/armnn.js | 617 + .../packages/core/public/netron/barracuda.js | 650 + frontend/packages/core/public/netron/base.js | 92 + .../core/public/netron/bigdl-metadata.json | 110 + .../core/public/netron/bigdl-proto.js | 986 + frontend/packages/core/public/netron/bigdl.js | 492 + frontend/packages/core/public/netron/bson.js | 161 + .../core/public/netron/caffe-metadata.json | 525 + .../core/public/netron/caffe-proto.js | 6553 ++ frontend/packages/core/public/netron/caffe.js | 820 + .../core/public/netron/caffe2-metadata.json | 18518 +++++ .../core/public/netron/caffe2-proto.js | 2253 + .../packages/core/public/netron/caffe2.js | 849 + .../packages/core/public/netron/chainer.js | 673 + .../core/public/netron/cntk-metadata.json | 1170 + .../packages/core/public/netron/cntk-proto.js | 532 + frontend/packages/core/public/netron/cntk.js | 1420 + .../core/public/netron/coreml-metadata.json | 513 + .../core/public/netron/coreml-proto.js | 12924 ++++ .../packages/core/public/netron/coreml.js | 1234 + .../core/public/netron/darknet-metadata.json | 561 + .../packages/core/public/netron/darknet.js | 1096 + .../core/public/netron/deps/d3.min.js | 2 + .../core/public/netron/deps/dagre.min.js | 3809 + .../core/public/netron/deps/flatbuffers.js | 1259 + .../packages/core/public/netron/deps/long.js | 2 + .../core/public/netron/deps/marked.min.js | 6 + .../core/public/netron/deps/pako.min.js | 1 + .../core/public/netron/deps/protobuf.min.js | 8 + .../core/public/netron/deps/prototxt.js | 455 + .../core/public/netron/dl4j-metadata.json | 128 + frontend/packages/core/public/netron/dl4j.js | 615 + .../core/public/netron/flux-metadata.json | 2 + frontend/packages/core/public/netron/flux.js | 147 + frontend/packages/core/public/netron/gzip.js | 168 + frontend/packages/core/public/netron/hdf5.js | 1431 + .../packages/core/public/netron/index.html | 73 + frontend/packages/core/public/netron/index.js | 468 + .../core/public/netron/keras-metadata.json | 3794 + frontend/packages/core/public/netron/keras.js | 1308 + .../packages/core/public/netron/mediapipe.js | 359 + .../core/public/netron/mlnet-metadata.json | 92 + frontend/packages/core/public/netron/mlnet.js | 2581 + .../core/public/netron/mnn-metadata.json | 862 + .../packages/core/public/netron/mnn-schema.js | 18364 +++++ frontend/packages/core/public/netron/mnn.js | 627 + .../core/public/netron/mxnet-metadata.json | 871 + frontend/packages/core/public/netron/mxnet.js | 1314 + .../core/public/netron/ncnn-metadata.json | 665 + frontend/packages/core/public/netron/ncnn.js | 869 + frontend/packages/core/public/netron/numpy.js | 300 + .../core/public/netron/onnx-metadata.json | 25014 +++++++ .../packages/core/public/netron/onnx-proto.js | 2231 + frontend/packages/core/public/netron/onnx.js | 1167 + .../core/public/netron/openvino-metadata.json | 1535 + .../packages/core/public/netron/openvino.js | 1113 + .../core/public/netron/paddle-metadata.json | 116 + .../core/public/netron/paddle-proto.js | 1029 + .../packages/core/public/netron/paddle.js | 522 + .../packages/core/public/netron/pickle.js | 563 + .../packages/core/public/netron/python.js | 1603 + .../core/public/netron/pytorch-metadata.json | 4574 ++ .../packages/core/public/netron/pytorch.js | 3222 + .../core/public/netron/sklearn-metadata.json | 2244 + .../packages/core/public/netron/sklearn.js | 1134 + frontend/packages/core/public/netron/tar.js | 117 + .../core/public/netron/tengine-metadata.json | 1044 + .../packages/core/public/netron/tengine.js | 964 + .../core/public/netron/tf-metadata.json | 58063 ++++++++++++++++ .../packages/core/public/netron/tf-proto.js | 6153 ++ frontend/packages/core/public/netron/tf.js | 1682 + .../core/public/netron/tflite-metadata.json | 750 + .../core/public/netron/tflite-schema.js | 15558 +++++ .../packages/core/public/netron/tflite.js | 1010 + .../core/public/netron/torch-metadata.json | 553 + frontend/packages/core/public/netron/torch.js | 1239 + .../core/public/netron/view-grapher.css | 165 + .../core/public/netron/view-grapher.js | 646 + .../core/public/netron/view-sidebar.js | 800 + frontend/packages/core/public/netron/view.js | 1326 + frontend/packages/core/public/netron/zip.js | 502 + .../core/public/style/fonts/vdl-icon.svg | 35 +- .../core/public/style/fonts/vdl-icon.ttf | Bin 4992 -> 6816 bytes .../core/public/style/fonts/vdl-icon.woff | Bin 5068 -> 6892 bytes .../packages/core/public/style/vdl-icon.css | 21 + .../core/resource/graphs/collectDagFacts.ts | 220 - .../packages/core/resource/graphs/index.ts | 2 - .../packages/core/resource/graphs/types.ts | 96 +- frontend/packages/core/utils/style.ts | 2 + frontend/packages/i18n/package.json | 20 +- .../i18n/src/hocs/app-with-translation.tsx | 1 + .../packages/i18n/src/hocs/with-internals.tsx | 1 + frontend/packages/mock/data/components.ts | 2 +- frontend/packages/mock/package.json | 6 +- frontend/packages/server/ecosystem.config.js | 6 +- frontend/packages/server/package.json | 20 +- frontend/packages/serverless/package.json | 6 +- frontend/yarn.lock | 2598 +- visualdl/server/__init__.py | 4 +- visualdl/server/api.py | 165 + visualdl/server/app.py | 182 +- visualdl/server/visualDL.bat | 3 - 148 files changed, 249452 insertions(+), 2577 deletions(-) create mode 100644 frontend/packages/core/components/Aside.tsx delete mode 100644 frontend/packages/core/components/AsideDivider.tsx create mode 100644 frontend/packages/core/components/GraphsPage/Argument.tsx create mode 100644 frontend/packages/core/components/GraphsPage/Graph.tsx create mode 100644 frontend/packages/core/components/GraphsPage/GraphSidebar.tsx create mode 100644 frontend/packages/core/components/GraphsPage/ModelPropertiesDialog.tsx create mode 100644 frontend/packages/core/components/GraphsPage/NodeDocumentationSidebar.tsx delete mode 100644 frontend/packages/core/components/GraphsPage/NodeInfo.tsx create mode 100644 frontend/packages/core/components/GraphsPage/NodePropertiesSidebar.tsx create mode 100644 frontend/packages/core/components/GraphsPage/Properties.tsx create mode 100644 frontend/packages/core/components/GraphsPage/Property.tsx create mode 100644 frontend/packages/core/components/GraphsPage/Search.tsx create mode 100644 frontend/packages/core/components/GraphsPage/Uploader.tsx create mode 100644 frontend/packages/core/public/netron/armnn-metadata.json create mode 100644 frontend/packages/core/public/netron/armnn-schema.js create mode 100644 frontend/packages/core/public/netron/armnn.js create mode 100755 frontend/packages/core/public/netron/barracuda.js create mode 100644 frontend/packages/core/public/netron/base.js create mode 100644 frontend/packages/core/public/netron/bigdl-metadata.json create mode 100644 frontend/packages/core/public/netron/bigdl-proto.js create mode 100644 frontend/packages/core/public/netron/bigdl.js create mode 100644 frontend/packages/core/public/netron/bson.js create mode 100644 frontend/packages/core/public/netron/caffe-metadata.json create mode 100644 frontend/packages/core/public/netron/caffe-proto.js create mode 100644 frontend/packages/core/public/netron/caffe.js create mode 100644 frontend/packages/core/public/netron/caffe2-metadata.json create mode 100644 frontend/packages/core/public/netron/caffe2-proto.js create mode 100644 frontend/packages/core/public/netron/caffe2.js create mode 100644 frontend/packages/core/public/netron/chainer.js create mode 100644 frontend/packages/core/public/netron/cntk-metadata.json create mode 100644 frontend/packages/core/public/netron/cntk-proto.js create mode 100644 frontend/packages/core/public/netron/cntk.js create mode 100644 frontend/packages/core/public/netron/coreml-metadata.json create mode 100644 frontend/packages/core/public/netron/coreml-proto.js create mode 100644 frontend/packages/core/public/netron/coreml.js create mode 100644 frontend/packages/core/public/netron/darknet-metadata.json create mode 100644 frontend/packages/core/public/netron/darknet.js create mode 100644 frontend/packages/core/public/netron/deps/d3.min.js create mode 100644 frontend/packages/core/public/netron/deps/dagre.min.js create mode 100644 frontend/packages/core/public/netron/deps/flatbuffers.js create mode 100644 frontend/packages/core/public/netron/deps/long.js create mode 100644 frontend/packages/core/public/netron/deps/marked.min.js create mode 100644 frontend/packages/core/public/netron/deps/pako.min.js create mode 100644 frontend/packages/core/public/netron/deps/protobuf.min.js create mode 100644 frontend/packages/core/public/netron/deps/prototxt.js create mode 100644 frontend/packages/core/public/netron/dl4j-metadata.json create mode 100644 frontend/packages/core/public/netron/dl4j.js create mode 100644 frontend/packages/core/public/netron/flux-metadata.json create mode 100644 frontend/packages/core/public/netron/flux.js create mode 100644 frontend/packages/core/public/netron/gzip.js create mode 100755 frontend/packages/core/public/netron/hdf5.js create mode 100644 frontend/packages/core/public/netron/index.html create mode 100644 frontend/packages/core/public/netron/index.js create mode 100644 frontend/packages/core/public/netron/keras-metadata.json create mode 100644 frontend/packages/core/public/netron/keras.js create mode 100644 frontend/packages/core/public/netron/mediapipe.js create mode 100644 frontend/packages/core/public/netron/mlnet-metadata.json create mode 100644 frontend/packages/core/public/netron/mlnet.js create mode 100644 frontend/packages/core/public/netron/mnn-metadata.json create mode 100644 frontend/packages/core/public/netron/mnn-schema.js create mode 100644 frontend/packages/core/public/netron/mnn.js create mode 100644 frontend/packages/core/public/netron/mxnet-metadata.json create mode 100644 frontend/packages/core/public/netron/mxnet.js create mode 100644 frontend/packages/core/public/netron/ncnn-metadata.json create mode 100644 frontend/packages/core/public/netron/ncnn.js create mode 100644 frontend/packages/core/public/netron/numpy.js create mode 100644 frontend/packages/core/public/netron/onnx-metadata.json create mode 100644 frontend/packages/core/public/netron/onnx-proto.js create mode 100644 frontend/packages/core/public/netron/onnx.js create mode 100644 frontend/packages/core/public/netron/openvino-metadata.json create mode 100644 frontend/packages/core/public/netron/openvino.js create mode 100644 frontend/packages/core/public/netron/paddle-metadata.json create mode 100644 frontend/packages/core/public/netron/paddle-proto.js create mode 100644 frontend/packages/core/public/netron/paddle.js create mode 100644 frontend/packages/core/public/netron/pickle.js create mode 100644 frontend/packages/core/public/netron/python.js create mode 100755 frontend/packages/core/public/netron/pytorch-metadata.json create mode 100644 frontend/packages/core/public/netron/pytorch.js create mode 100644 frontend/packages/core/public/netron/sklearn-metadata.json create mode 100644 frontend/packages/core/public/netron/sklearn.js create mode 100644 frontend/packages/core/public/netron/tar.js create mode 100755 frontend/packages/core/public/netron/tengine-metadata.json create mode 100755 frontend/packages/core/public/netron/tengine.js create mode 100644 frontend/packages/core/public/netron/tf-metadata.json create mode 100644 frontend/packages/core/public/netron/tf-proto.js create mode 100644 frontend/packages/core/public/netron/tf.js create mode 100644 frontend/packages/core/public/netron/tflite-metadata.json create mode 100644 frontend/packages/core/public/netron/tflite-schema.js create mode 100644 frontend/packages/core/public/netron/tflite.js create mode 100644 frontend/packages/core/public/netron/torch-metadata.json create mode 100644 frontend/packages/core/public/netron/torch.js create mode 100644 frontend/packages/core/public/netron/view-grapher.css create mode 100644 frontend/packages/core/public/netron/view-grapher.js create mode 100644 frontend/packages/core/public/netron/view-sidebar.js create mode 100644 frontend/packages/core/public/netron/view.js create mode 100644 frontend/packages/core/public/netron/zip.js delete mode 100644 frontend/packages/core/resource/graphs/collectDagFacts.ts delete mode 100644 frontend/packages/core/resource/graphs/index.ts create mode 100644 visualdl/server/api.py delete mode 100644 visualdl/server/visualDL.bat diff --git a/frontend/.eslintrc.js b/frontend/.eslintrc.js index 140e51d9..06a215e5 100644 --- a/frontend/.eslintrc.js +++ b/frontend/.eslintrc.js @@ -9,7 +9,7 @@ module.exports = { ecmaVersion: 2018, sourceType: 'module' }, - ignorePatterns: ['node_modules/', 'dist/', 'output/', '_next'], + ignorePatterns: ['node_modules/', 'dist/', 'output/', '_next', 'packages/core/public/netron'], rules: { 'no-console': 'warn', 'sort-imports': 'error' @@ -25,6 +25,7 @@ module.exports = { parser: '@typescript-eslint/parser', rules: { '@typescript-eslint/explicit-function-return-type': 'off', + '@typescript-eslint/explicit-module-boundary-types': 'off', '@typescript-eslint/no-explicit-any': 'error' } }, @@ -52,6 +53,7 @@ module.exports = { }, rules: { '@typescript-eslint/explicit-function-return-type': 'off', + '@typescript-eslint/explicit-module-boundary-types': 'off', '@typescript-eslint/no-explicit-any': 'error', 'react/prop-types': 'off', 'react/react-in-jsx-scope': 'off' diff --git a/frontend/package.json b/frontend/package.json index 6fedbabf..21f43c61 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -38,19 +38,19 @@ "version": "yarn format && git add -A" }, "devDependencies": { - "@typescript-eslint/eslint-plugin": "2.31.0", - "@typescript-eslint/parser": "2.31.0", - "eslint": "6.8.0", + "@typescript-eslint/eslint-plugin": "3.1.0", + "@typescript-eslint/parser": "3.1.0", + "eslint": "7.1.0", "eslint-config-prettier": "6.11.0", "eslint-plugin-prettier": "3.1.3", - "eslint-plugin-react": "7.19.0", + "eslint-plugin-react": "7.20.0", "eslint-plugin-react-hooks": "4.0.0", "husky": "4.2.5", - "lerna": "3.20.2", - "lint-staged": "10.2.2", + "lerna": "3.22.0", + "lint-staged": "10.2.8", "prettier": "2.0.5", "rimraf": "3.0.2", - "typescript": "3.8.3", + "typescript": "3.9.3", "yarn": "1.22.4" }, "engines": { diff --git a/frontend/packages/cli/package.json b/frontend/packages/cli/package.json index fc569ddb..d17dfcc5 100644 --- a/frontend/packages/cli/package.json +++ b/frontend/packages/cli/package.json @@ -35,17 +35,17 @@ ], "dependencies": { "@visualdl/server": "2.0.0-beta.43", - "open": "7.0.3", + "open": "7.0.4", "ora": "4.0.4", "pm2": "4.4.0", "yargs": "15.3.1" }, "devDependencies": { - "@types/node": "13.13.5", - "@types/yargs": "15.0.4", + "@types/node": "14.0.10", + "@types/yargs": "15.0.5", "cross-env": "7.0.2", - "ts-node": "8.10.1", - "typescript": "3.8.3" + "ts-node": "8.10.2", + "typescript": "3.9.3" }, "engines": { "node": ">=10", diff --git a/frontend/packages/core/components/Aside.tsx b/frontend/packages/core/components/Aside.tsx new file mode 100644 index 00000000..05129856 --- /dev/null +++ b/frontend/packages/core/components/Aside.tsx @@ -0,0 +1,57 @@ +import React, {FunctionComponent} from 'react'; +import {WithStyled, asideWidth, borderColor, rem, size} from '~/utils/style'; + +import styled from 'styled-components'; + +export const AsideSection = styled.section` + margin: ${rem(20)}; + + &:not(:last-child) { + border-bottom: 1px solid ${borderColor}; + padding-bottom: ${rem(20)}; + margin-bottom: 0; + } +`; + +const Wrapper = styled.div<{width?: string | number}>` + ${props => size('100%', props.width == null ? asideWidth : props.width)} + overflow: hidden; + display: flex; + flex-direction: column; + + > .aside-top { + flex: auto; + display: flex; + flex-direction: column; + height: 100%; + overflow: auto; + overflow-x: hidden; + overflow-y: auto; + + > ${AsideSection} { + flex: none; + } + } + + > .aside-bottom { + flex: none; + box-shadow: 0 -${rem(5)} ${rem(16)} 0 rgba(0, 0, 0, 0.03); + padding: ${rem(20)}; + } +`; + +type AsideProps = { + width?: string | number; + bottom?: React.ReactNode; +}; + +const Aside: FunctionComponent = ({width, bottom, className, children}) => { + return ( + +
{children}
+ {bottom &&
{bottom}
} +
+ ); +}; + +export default Aside; diff --git a/frontend/packages/core/components/AsideDivider.tsx b/frontend/packages/core/components/AsideDivider.tsx deleted file mode 100644 index f3f0651c..00000000 --- a/frontend/packages/core/components/AsideDivider.tsx +++ /dev/null @@ -1,19 +0,0 @@ -import React, {FunctionComponent} from 'react'; - -import {rem} from '~/utils/style'; -import styled from 'styled-components'; - -const Divider = styled.hr<{height?: string | number}>` - background-color: transparent; - margin: 0; - border: none; - height: ${({height}) => (height ? ('number' === height ? rem(height) : height) : rem(30))}; -`; - -type AsideDividerProps = { - height?: string | number; -}; - -const AsideDivider: FunctionComponent = ({height}) => ; - -export default AsideDivider; diff --git a/frontend/packages/core/components/Button.tsx b/frontend/packages/core/components/Button.tsx index a25f50a8..b11254a3 100644 --- a/frontend/packages/core/components/Button.tsx +++ b/frontend/packages/core/components/Button.tsx @@ -5,6 +5,7 @@ import { borderColor, borderFocusedColor, borderRadius, + css, dangerActiveColor, dangerColor, dangerFocusedColor, @@ -38,11 +39,26 @@ const colors = { } }; -const Wrapper = styled.a<{type?: keyof typeof colors; rounded?: boolean; disabled?: boolean}>` +const defaultColor = { + default: borderColor, + active: borderActiveColor, + focused: borderFocusedColor +} as const; + +type colorTypes = keyof typeof colors; + +const statusButtonColor: ( + status: 'focused' | 'active' +) => (props: {disabled?: boolean; type?: colorTypes}) => ReturnType = status => ({disabled, type}) => css` + ${disabled || type ? '' : sameBorder({color: defaultColor[status]})} + background-color: ${disabled ? '' : type ? colors[type][status] : 'transparent'}; +`; + +const Wrapper = styled.a<{type?: colorTypes; rounded?: boolean; disabled?: boolean}>` height: ${height}; line-height: ${height}; border-radius: ${props => (props.rounded ? half(height) : borderRadius)}; - ${props => (props.type ? '' : sameBorder({color: borderColor}))} + ${props => (props.type ? '' : sameBorder({color: defaultColor.default}))} background-color: ${props => (props.type ? colors[props.type].default : 'transparent')}; color: ${props => (props.disabled ? textLighterColor : props.type ? textInvertColor : textColor)}; cursor: ${props => (props.disabled ? 'not-allowed' : 'pointer')}; @@ -53,20 +69,14 @@ const Wrapper = styled.a<{type?: keyof typeof colors; rounded?: boolean; disable ${transitionProps(['background-color', 'border-color'])} ${ellipsis()} - ${props => - props.disabled - ? '' - : ` - &:hover, - &:focus { - ${props.type ? '' : sameBorder({color: borderFocusedColor})} - background-color: ${props.type ? colors[props.type].focused : 'transparent'}; - } + &:hover, + &:focus { + ${statusButtonColor('focused')} + } - &:active { - ${props.type ? '' : sameBorder({color: borderActiveColor})} - background-color: ${props.type ? colors[props.type].active : 'transparent'}; - }`} + &:active { + ${statusButtonColor('active')} + } `; const Icon = styled(RawIcon)` @@ -76,7 +86,7 @@ const Icon = styled(RawIcon)` type ButtonProps = { rounded?: boolean; icon?: string; - type?: keyof typeof colors; + type?: colorTypes; disabled?: boolean; onClick?: () => unknown; }; diff --git a/frontend/packages/core/components/ChartToolbox.tsx b/frontend/packages/core/components/ChartToolbox.tsx index 31ebc09f..ff784b53 100644 --- a/frontend/packages/core/components/ChartToolbox.tsx +++ b/frontend/packages/core/components/ChartToolbox.tsx @@ -19,15 +19,15 @@ import ReactTooltip from 'react-tooltip'; import {nanoid} from 'nanoid'; import styled from 'styled-components'; -const Toolbox = styled.div` +const Toolbox = styled.div<{reversed?: boolean}>` font-size: ${em(16)}; - height: 1em; line-height: 1; - margin-bottom: ${rem(18)}; display: flex; + flex-direction: ${props => (props.reversed ? 'row-reverse' : 'row')}; + align-items: center; `; -const ToolboxItem = styled.a<{active?: boolean}>` +const ToolboxItem = styled.a<{active?: boolean; reversed?: boolean}>` cursor: pointer; color: ${props => (props.active ? primaryColor : textLighterColor)}; ${transitionProps('color')} @@ -41,7 +41,7 @@ const ToolboxItem = styled.a<{active?: boolean}>` } & + & { - margin-left: ${rem(14)}; + ${props => `margin-${props.reversed ? 'right' : 'left'}: ${rem(14)};`} } `; @@ -67,9 +67,17 @@ export type ChartTooboxItem = NormalChartToolboxItem | ToggleChartToolboxItem; type ChartToolboxProps = { cid?: string; items: ChartTooboxItem[]; + reversed?: boolean; + tooltipPlace?: 'top' | 'bottom' | 'left' | 'right'; }; -const ChartToolbox: FunctionComponent = ({cid, items, className}) => { +const ChartToolbox: FunctionComponent = ({ + cid, + items, + reversed, + tooltipPlace, + className +}) => { const [activeStatus, setActiveStatus] = useState(new Array(items.length).fill(false)); const onClick = useCallback( (index: number) => { @@ -92,10 +100,11 @@ const ChartToolbox: FunctionComponent = ({cid, i return ( <> - + {items.map((item, index) => ( onClick(index)} data-for={item.tooltip ? id : null} @@ -113,7 +122,7 @@ const ChartToolbox: FunctionComponent = ({cid, i ` - margin: ${margin}; - margin-right: ${props => (props.aside ? math(`${margin} + ${asideWidth}`) : margin)}; - min-height: calc(100vh - ${math(`${margin} * 2 + ${headerHeight}`)}); +const Article = styled.article` + flex: auto; + margin: ${contentMargin}; + min-height: ${contentHeight}; `; const Aside = styled.aside` + flex: none; background-color: ${backgroundColor}; - ${size(`calc(100vh - ${headerHeight})`, asideWidth)} - ${position('fixed', headerHeight, 0, null, null)} + height: ${`calc(100vh - ${headerHeight})`}; + ${position('sticky', headerHeight, 0, null, null)} overflow-x: hidden; overflow-y: auto; `; @@ -43,7 +41,7 @@ type ContentProps = { const Content: FunctionComponent = ({children, aside, loading}) => (
-
{children}
+
{children}
{aside && } {loading && ( diff --git a/frontend/packages/core/components/GraphsPage/Argument.tsx b/frontend/packages/core/components/GraphsPage/Argument.tsx new file mode 100644 index 00000000..892f379a --- /dev/null +++ b/frontend/packages/core/components/GraphsPage/Argument.tsx @@ -0,0 +1,118 @@ +import {Argument as ArgumentType, Property as PropertyType} from '~/resource/graphs/types'; +import React, {FunctionComponent, useMemo, useState} from 'react'; +import {borderColor, em, sameBorder, textLightColor, textLighterColor} from '~/utils/style'; + +import Icon from '~/components/Icon'; +import styled from 'styled-components'; + +const Wrapper = styled.div` + ${sameBorder({radius: true})} + + & + & { + margin-top: ${em(10)}; + } + + > .argument-row { + display: flex; + align-items: center; + justify-content: space-between; + padding: ${em(8)} ${em(10)}; + line-height: 1.5; + + > .argument-text { + flex: auto; + overflow: hidden; + word-break: break-all; + } + + > .argument-raw { + overflow: auto; + width: 100%; + + pre { + margin: 0; + } + } + + > .argument-operation { + flex: none; + cursor: pointer; + font-size: ${em(14)}; + margin-left: ${em(10)}; + color: ${textLighterColor}; + + &:hover, + &:active { + color: ${textLightColor}; + } + } + + &:not(:first-child) { + border-top: 1px solid ${borderColor}; + } + } +`; + +type ArgumentProps = { + value: ArgumentType | PropertyType; + expand?: boolean; + showNodeDodumentation?: () => unknown; +}; + +const Argument: FunctionComponent = ({value, expand, showNodeDodumentation}) => { + const [expanded, setExpanded] = useState(expand ?? false); + + const expandable = useMemo(() => { + const argument = value as ArgumentType; + return !!(argument.children && argument.children.length); + }, [value]); + + return ( + +
+ + {value.name ? ( + <> + {value.name}: {value.value} + + ) : ( + value.value.split('\n').map((line, index) => ( + + {index !== 0 &&
} + {line} +
+ )) + )} +
+ {(value as PropertyType).documentation && ( + showNodeDodumentation?.()}> + + + )} + {expandable && ( + setExpanded(e => !e)}> + + + )} +
+ {expandable && + expanded && + (value as ArgumentType)?.children?.map((item, index) => ( +
+ {item.type === 'raw' ? ( + +
{item.value}
+
+ ) : ( + + {item.name ? `${item.name}: ` : ''} + {item.type === 'code' ? {item.value} : item.value} + + )} +
+ ))} +
+ ); +}; + +export default Argument; diff --git a/frontend/packages/core/components/GraphsPage/Graph.tsx b/frontend/packages/core/components/GraphsPage/Graph.tsx new file mode 100644 index 00000000..9dbe5804 --- /dev/null +++ b/frontend/packages/core/components/GraphsPage/Graph.tsx @@ -0,0 +1,233 @@ +import {Documentation, Properties, SearchItem, SearchResult} from '~/resource/graphs/types'; +import React, {useCallback, useEffect, useImperativeHandle, useMemo, useRef, useState} from 'react'; +import {backgroundColor, borderColor, contentHeight, primaryColor, rem, size} from '~/utils/style'; + +import ChartToolbox from '~/components/ChartToolbox'; +import HashLoader from 'react-spinners/HashLoader'; +import styled from 'styled-components'; +import {useTranslation} from '~/utils/i18n'; + +const toolboxHeight = rem(40); + +const Wrapper = styled.div` + position: relative; + height: ${contentHeight}; + background-color: ${backgroundColor}; + display: flex; + flex-direction: column; + justify-content: center; + align-items: center; +`; + +const RenderContent = styled.div<{show: boolean}>` + position: absolute; + top: 0; + left: 0; + ${size('100%', '100%')} + opacity: ${props => (props.show ? 1 : 0)}; + z-index: ${props => (props.show ? 0 : -1)}; + pointer-events: ${props => (props.show ? 'auto' : 'none')}; +`; + +const Toolbox = styled(ChartToolbox)` + height: ${toolboxHeight}; + border-bottom: 1px solid ${borderColor}; + padding: 0 ${rem(20)}; +`; + +const Content = styled.div` + height: calc(100% - ${toolboxHeight}); + + > iframe { + ${size('100%', '100%')} + border: none; + } +`; + +const Loading = styled.div` + ${size('100%', '100%')} + display: flex; + flex-direction: column; + justify-content: center; + align-items: center; + overscroll-behavior: none; + cursor: progress; + font-size: ${rem(16)}; + line-height: ${rem(60)}; +`; + +export type GraphRef = { + export(type: 'svg' | 'png'): void; + search(value: string): void; + select(item: SearchItem): void; + showModelProperties(): void; + showNodeDocumentation: (data: Properties) => void; +}; + +type GraphProps = { + files: FileList | null; + uploader: JSX.Element; + showAttributes: boolean; + showInitializers: boolean; + showNames: boolean; + onRendered?: () => unknown; + onSearch?: (data: SearchResult) => unknown; + onShowModelProperties?: (data: Properties) => unknown; + onShowNodeProperties?: (data: Properties) => unknown; + onShowNodeDocumentation?: (data: Documentation) => unknown; +}; + +const Graph = React.forwardRef( + ( + { + files, + uploader, + showAttributes, + showInitializers, + showNames, + onRendered, + onSearch, + onShowModelProperties, + onShowNodeProperties, + onShowNodeDocumentation + }, + ref + ) => { + const {t} = useTranslation('graphs'); + + const [ready, setReady] = useState(false); + const [loading, setLoading] = useState(false); + const [rendered, setRendered] = useState(false); + + const iframe = useRef(null); + const handler = useCallback( + (event: MessageEvent) => { + if (event.data) { + const {type, data} = event.data; + switch (type) { + case 'status': + switch (data) { + case 'ready': + return setReady(true); + case 'loading': + return setLoading(true); + case 'rendered': + setLoading(false); + setRendered(true); + onRendered?.(); + return; + } + return; + case 'search': + return onSearch?.(data); + case 'show-model-properties': + return onShowModelProperties?.(data); + case 'show-node-properties': + return onShowNodeProperties?.(data); + case 'show-node-documentation': + return onShowNodeDocumentation?.(data); + } + } + }, + [onRendered, onSearch, onShowModelProperties, onShowNodeProperties, onShowNodeDocumentation] + ); + useEffect(() => { + if (process.browser) { + window.addEventListener('message', handler); + return () => window.removeEventListener('message', handler); + } + }, [handler]); + + const dispatch = useCallback((type: string, data?: unknown) => { + if (process.browser) { + iframe.current?.contentWindow?.postMessage( + { + type, + data + }, + `${window.location.protocol}//${window.location.host}` + ); + } + }, []); + + useEffect(() => dispatch('change-files', files), [dispatch, files]); + useEffect(() => dispatch('toggle-attributes', showAttributes), [dispatch, showAttributes]); + useEffect(() => dispatch('toggle-initializers', showInitializers), [dispatch, showInitializers]); + useEffect(() => dispatch('toggle-names', showNames), [dispatch, showNames]); + + useImperativeHandle(ref, () => ({ + export(type) { + dispatch('export', type); + }, + search(value) { + dispatch('search', value); + }, + select(item) { + dispatch('select', item); + }, + showModelProperties() { + dispatch('show-model-properties'); + }, + showNodeDocumentation(data) { + dispatch('show-node-documentation', data); + } + })); + + const content = useMemo(() => { + if (!ready || loading) { + return ( + + + + ); + } + if (ready && !rendered) { + return uploader; + } + return null; + }, [ready, loading, rendered, uploader]); + + return ( + + {content} + + dispatch('zoom-reset') + }, + { + icon: 'zoom-out', + tooltip: t('graphs:zoom-out'), + onClick: () => dispatch('zoom-out') + }, + { + icon: 'zoom-in', + tooltip: t('graphs:zoom-in'), + onClick: () => dispatch('zoom-in') + } + ]} + reversed + tooltipPlace="bottom" + /> + + + + + + ); + } +); + +Graph.displayName = 'Graph'; + +export default Graph; diff --git a/frontend/packages/core/components/GraphsPage/GraphSidebar.tsx b/frontend/packages/core/components/GraphsPage/GraphSidebar.tsx new file mode 100644 index 00000000..799f13a8 --- /dev/null +++ b/frontend/packages/core/components/GraphsPage/GraphSidebar.tsx @@ -0,0 +1,55 @@ +import React, {FunctionComponent} from 'react'; +import {backgroundColor, borderColor, rem, textLightColor} from '~/utils/style'; + +import styled from 'styled-components'; +import {useTranslation} from '~/utils/i18n'; + +const Sidebar = styled.div` + height: 100%; + background-color: ${backgroundColor}; +`; + +const Title = styled.div` + height: ${rem(60)}; + font-size: ${rem(16)}; + display: flex; + justify-content: space-between; + align-items: center; + border-bottom: 1px solid ${borderColor}; + margin: 0 ${rem(20)}; + + > .close { + flex: none; + color: ${textLightColor}; + cursor: pointer; + } +`; + +const Content = styled.div` + padding: ${rem(20)}; + height: calc(100% - ${rem(60)}); + overflow: auto; +`; + +type GraphSidebarProps = { + title: string; + onClose?: () => unknown; +}; + +const GraphSidebar: FunctionComponent = ({title, onClose, children}) => { + const {t} = useTranslation('common'); + + return ( + + + <span>{title}</span> + <a className="close" onClick={() => onClose?.()}> + {t('common:close')} + </a> + + {children} + + ); +}; + +export default GraphSidebar; diff --git a/frontend/packages/core/components/GraphsPage/ModelPropertiesDialog.tsx b/frontend/packages/core/components/GraphsPage/ModelPropertiesDialog.tsx new file mode 100644 index 00000000..4aa5dac8 --- /dev/null +++ b/frontend/packages/core/components/GraphsPage/ModelPropertiesDialog.tsx @@ -0,0 +1,88 @@ +import React, {FunctionComponent} from 'react'; +import {backgroundColor, em, size} from '~/utils/style'; + +import Icon from '~/components/Icon'; +import Properties from '~/components/GraphsPage/Properties'; +import {Properties as PropertiesType} from '~/resource/graphs/types'; +import styled from 'styled-components'; +import {useTranslation} from '~/utils/i18n'; + +const Dialog = styled.div` + position: fixed; + top: 0; + left: 0; + width: 100vw; + height: 100vh; + overscroll-behavior: none; + background-color: rgba(255, 255, 255, 0.8); + z-index: 999; + + > .modal { + width: ${em(536)}; + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + box-shadow: 0 2px 20px 0 rgba(0, 0, 0, 0.08); + + > .modal-header { + padding: 0 ${em(40, 18)}; + height: ${em(47, 18)}; + background-color: #eee; + display: flex; + justify-content: space-between; + align-items: center; + font-size: ${em(18)}; + + > .modal-title { + flex: auto; + } + + > .modal-close { + flex: none; + ${size(em(14, 18), em(14, 18))} + font-size: ${em(14, 18)}; + text-align: center; + cursor: pointer; + } + } + + > .modal-body { + padding: ${em(40)}; + background-color: ${backgroundColor}; + overflow: auto; + max-height: calc(80vh - ${em(47)}); + } + } +`; + +type ModelPropertiesDialogProps = { + data?: PropertiesType | null; + onClose?: () => unknown; +}; + +const ModelPropertiesDialog: FunctionComponent = ({data, onClose}) => { + const {t} = useTranslation('graphs'); + + if (!data) { + return null; + } + + return ( + +
+
+ {t('graphs:model-properties')} + onClose?.()}> + + +
+
+ +
+
+
+ ); +}; + +export default ModelPropertiesDialog; diff --git a/frontend/packages/core/components/GraphsPage/NodeDocumentationSidebar.tsx b/frontend/packages/core/components/GraphsPage/NodeDocumentationSidebar.tsx new file mode 100644 index 00000000..7dfb96be --- /dev/null +++ b/frontend/packages/core/components/GraphsPage/NodeDocumentationSidebar.tsx @@ -0,0 +1,187 @@ +import React, {FunctionComponent, useCallback} from 'react'; +import {Trans, useTranslation} from '~/utils/i18n'; +import {borderRadius, em, textLightColor} from '~/utils/style'; + +import {Documentation as DocumentationType} from '~/resource/graphs/types'; +import GraphSidebar from '~/components/GraphsPage/GraphSidebar'; +import styled from 'styled-components'; + +const Documentation = styled.div` + overflow: hidden; + word-break: break-word; + font-family: -apple-system, BlinkMacSystemFont, 'Segoe WPC', 'Segoe UI', 'Ubuntu', 'Droid Sans', sans-serif; + + h1 { + font-size: ${em(18)}; + margin: ${em(10)} 0; + } + + h2 { + font-size: ${em(16)}; + margin: ${em(10)} 0; + } + + h3 { + font-size: ${em(14)}; + margin: ${em(10)} 0; + } + + p { + line-height: 1.5; + margin: ${em(10)} 0; + } + + dl { + line-height: 1.5; + margin: ${em(10)} 0; + + > dt { + font-weight: 700; + } + + > dd { + margin-left: ${em(20)}; + } + } + + pre { + font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, Courier, monospace; + background-color: rgba(216, 216, 216, 0.5); + color: ${textLightColor}; + padding: ${em(10)}; + border-radius: ${borderRadius}; + overflow: auto; + + code { + background-color: transparent; + padding: 0; + border-radius: 0; + } + } + + code { + font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, Courier, monospace; + background-color: rgba(216, 216, 216, 0.5); + color: ${textLightColor}; + padding: ${em(2)} ${em(4)}; + border-radius: ${em(2)}; + } +`; + +type NodeDocumentationSidebarProps = { + data?: DocumentationType | null; + onClose?: () => unknown; +}; + +const NodeDocumentationSidebar: FunctionComponent = ({data, onClose}) => { + const {t} = useTranslation('graphs'); + + const list = useCallback( + (items: {name: string; type?: string | string[]; description: string}[]) => + items.map((item, index) => ( +
+
+ {item.name} + {item.type && ( + <> + :{' '} + {'string' === typeof item.type ? ( + {item.type} + ) : ( + item.type.map((i, j) => ( + + {j ? ',' : null} + {i} + + )) + )} + + )} +
+
+
+ )), + [] + ); + + return ( + + +

{data?.name}

+ {data?.summary &&

} + {data?.description &&

} + {data?.attributes && ( + <> +

{t('graphs:documentation.attributes')}

+ {list(data.attributes)} + + )} + {data?.inputs && ( + <> +

+ {t('graphs:documentation.inputs')} + {data?.inputs_range && ` (${data.inputs_range.replace(/∞/g, '∞')})`} +

+ {list(data.inputs)} + + )} + {data?.outputs && ( + <> +

+ {t('graphs:documentation.outputs')} + {data?.outputs_range && ` (${data.outputs_range.replace(/∞/g, '∞')})`} +

+ {list(data.outputs)} + + )} + {data?.type_constraints && ( + <> +

{t('graphs:documentation.type-constraints')}

+ {list( + data.type_constraints.map(({type_param_str, allowed_type_strs, description}) => ({ + name: type_param_str, + type: allowed_type_strs, + description + })) + )} + + )} + {data?.examples && ( + <> +

{t('graphs:documentation.examples')}

+ {data.examples.map((example, index) => ( + +

{example.summary}

+
{example.code}
+
+ ))} + + )} + {data?.references && ( + <> +

{t('graphs:documentation.references')}

+
    + {data.references.map((reference, index) => ( +
  • + ))} +
+ + )} + {data && data.domain && data.since_version && data.support_level && ( + <> +

{t('graphs:documentation.support')}

+
+ {/* eslint-disable prettier/prettier */} + + In domain {{domain: data.domain}} since version {{since_version: data.since_version}} at support level {{support_level: data.support_level}}. + + {/* eslint-enable prettier/prettier */} +
+ + )} +
+
+ ); +}; + +export default NodeDocumentationSidebar; diff --git a/frontend/packages/core/components/GraphsPage/NodeInfo.tsx b/frontend/packages/core/components/GraphsPage/NodeInfo.tsx deleted file mode 100644 index 67553fc1..00000000 --- a/frontend/packages/core/components/GraphsPage/NodeInfo.tsx +++ /dev/null @@ -1,78 +0,0 @@ -import {NodeType, TypedNode} from '~/resource/graphs'; -import React, {FunctionComponent} from 'react'; -import {WithStyled, textLightColor} from '~/utils/style'; - -import styled from 'styled-components'; -import {useTranslation} from '~/utils/i18n'; - -const typeName: {[k in NodeType]: string} = { - [NodeType.Input]: 'input', - [NodeType.Output]: 'output', - [NodeType.Op]: 'operator' -}; - -export interface NodeInfoProps { - node?: TypedNode | {type: 'unknown'; guessType: NodeType; msg: string}; -} - -const DataList: FunctionComponent<{items: {key: string; value: string | string[]}[]} & WithStyled> = props => { - return ( -
    - {props.items.map(({key, value}) => ( -
  • - {key}: {value} -
  • - ))} -
- ); -}; - -const PropertyList = styled(DataList)` - padding: 0; - list-style: none; - color: ${textLightColor}; - - li + li { - margin-top: 1em; - } -`; - -const NodeInfo: FunctionComponent = props => { - const {t} = useTranslation('graphs'); - if (!props.node) { - return

{t('graphs:click-node')}

; - } - - const node = props.node; - switch (node.type) { - case NodeType.Input: - case NodeType.Output: - return ( - - ); - case NodeType.Op: - return ( - - ); - case 'unknown': - return ; - default: - return null; - } -}; - -export default NodeInfo; diff --git a/frontend/packages/core/components/GraphsPage/NodePropertiesSidebar.tsx b/frontend/packages/core/components/GraphsPage/NodePropertiesSidebar.tsx new file mode 100644 index 00000000..a6fab497 --- /dev/null +++ b/frontend/packages/core/components/GraphsPage/NodePropertiesSidebar.tsx @@ -0,0 +1,28 @@ +import React, {FunctionComponent} from 'react'; + +import GraphSidebar from '~/components/GraphsPage/GraphSidebar'; +import Properties from '~/components/GraphsPage/Properties'; +import {Properties as PropertiesType} from '~/resource/graphs/types'; +import {useTranslation} from '~/utils/i18n'; + +type NodePropertiesSidebarProps = { + data?: PropertiesType | null; + onClose?: () => unknown; + showNodeDodumentation?: () => unknown; +}; + +const NodePropertiesSidebar: FunctionComponent = ({ + data, + onClose, + showNodeDodumentation +}) => { + const {t} = useTranslation('graphs'); + + return ( + + + + ); +}; + +export default NodePropertiesSidebar; diff --git a/frontend/packages/core/components/GraphsPage/Properties.tsx b/frontend/packages/core/components/GraphsPage/Properties.tsx new file mode 100644 index 00000000..a8d00a1c --- /dev/null +++ b/frontend/packages/core/components/GraphsPage/Properties.tsx @@ -0,0 +1,50 @@ +import React, {FunctionComponent} from 'react'; + +import {Properties as PropertiesType} from '~/resource/graphs/types'; +import Property from '~/components/GraphsPage/Property'; +import {em} from '~/utils/style'; +import styled from 'styled-components'; +import {useTranslation} from '~/utils/i18n'; + +const Header = styled.div` + font-size: ${em(16)}; + font-weight: 700; + padding: ${em(10)} 0; +`; + +type PropertiesProps = PropertiesType & { + expand?: boolean; + showNodeDodumentation?: () => unknown; +}; + +const Properties: FunctionComponent = ({properties, groups, expand, showNodeDodumentation}) => { + const {t} = useTranslation('graphs'); + + return ( + <> + {properties?.map((property, index) => ( + + ))} + {groups?.map((group, index) => ( + +
{t(`graphs:properties.${group.name}`)}
+ {group.properties?.map((property, anotherIndex) => ( + + ))} +
+ ))} + + ); +}; + +export default Properties; diff --git a/frontend/packages/core/components/GraphsPage/Property.tsx b/frontend/packages/core/components/GraphsPage/Property.tsx new file mode 100644 index 00000000..c462507f --- /dev/null +++ b/frontend/packages/core/components/GraphsPage/Property.tsx @@ -0,0 +1,54 @@ +import {Argument as ArgumentType, NameValues, Property as PropertyType} from '~/resource/graphs/types'; +import React, {FunctionComponent} from 'react'; +import {ellipsis, em, sameBorder} from '~/utils/style'; + +import Argument from '~/components/GraphsPage/Argument'; +import styled from 'styled-components'; + +const Wrapper = styled.div` + display: flex; + align-items: top; + justify-content: space-between; + width: 100%; + + > .property-name { + flex: none; + text-align: right; + width: ${em(80)}; + padding: ${em(8)} 0; + ${sameBorder({color: 'transparent'})} + ${ellipsis()} + } + + > .property-values { + flex: auto; + width: calc(100% - ${em(90)}); + margin-left: ${em(10)}; + } + + & + & { + margin-top: ${em(10)}; + } +`; + +type PropertyProps = NameValues & { + expand?: boolean; + showNodeDodumentation?: () => unknown; +}; + +const Property: FunctionComponent = ({name, values, expand, showNodeDodumentation}) => { + return ( + + +
+ {values.map((value, index) => ( + + ))} +
+
+ ); +}; + +export default Property; diff --git a/frontend/packages/core/components/GraphsPage/Search.tsx b/frontend/packages/core/components/GraphsPage/Search.tsx new file mode 100644 index 00000000..5d376145 --- /dev/null +++ b/frontend/packages/core/components/GraphsPage/Search.tsx @@ -0,0 +1,218 @@ +import React, {FunctionComponent, useCallback, useEffect, useState} from 'react'; +import {SearchItem, SearchResult} from '~/resource/graphs/types'; +import { + backgroundColor, + backgroundFocusedColor, + css, + ellipsis, + em, + primaryColor, + rem, + sameBorder, + size, + textLightColor, + transitionProps, + triangle +} from '~/utils/style'; + +import Field from '~/components/Field'; +import SearchInput from '~/components/SearchInput'; +import styled from 'styled-components'; +import useSearchValue from '~/hooks/useSearchValue'; +import {useTranslation} from '~/utils/i18n'; + +const SearchField = styled(Field)` + margin-bottom: ${rem(20)}; + display: flex; + justify-content: space-between; + align-items: center; + + > :first-child { + flex: auto; + } + + > a:last-child { + color: ${primaryColor}; + cursor: pointer; + margin-left: ${rem(10)}; + flex: none; + } +`; + +const Empty = styled.div` + padding: ${rem(100)} 0; + text-align: center; + color: ${textLightColor}; +`; + +const Wrapper = styled.div` + overflow: auto; +`; + +const List = styled.ul` + list-style: none; + margin: 0; + padding: 0; +`; + +const Item = styled.li` + padding: ${em(10)} ${em(12)}; + cursor: pointer; + width: 100%; + background-color: ${backgroundColor}; + display: flex; + align-items: center; + ${transitionProps('background-color')} + + > span { + flex: auto; + margin-left: ${em(10)}; + ${ellipsis()} + } + + &:hover { + background-color: ${backgroundFocusedColor}; + } +`; + +const icon = css` + color: #979797; + flex: none; + width: ${em(8)}; +`; + +const EdgeIcon = styled.i` + ${icon} + position: relative; + height: ${em(11)}; + overflow: hidden; + + &::before { + content: ''; + display: block; + ${size(em(6), em(1))} + background-color: currentColor; + position: absolute; + top: 0; + left: 50%; + transform: translateX(-50%); + } + + &::after { + content: ''; + display: block; + ${triangle({ + pointingDirection: 'bottom', + height: em(5), + width: em(7), + foregroundColor: 'currentColor' + })} + position: absolute; + top: ${em(6)}; + left: 50%; + transform: translateX(-50%); + } +`; + +const NodeIcon = styled.i` + ${icon} + height: ${em(7)}; + ${sameBorder({radius: em(2), color: 'currentColor'})} + background-color: #f7f7f7; +`; + +const InitializerIcon = styled.i` + ${icon} + height: ${em(8)}; + ${sameBorder({radius: em(4), color: 'currentColor'})} + background-color: #f7f7f7; +`; + +const icons = { + input: EdgeIcon, + output: EdgeIcon, + node: NodeIcon, + initializer: InitializerIcon +} as const; + +type SearchProps = { + text?: string; + data: SearchResult; + onChange?: (value: string) => unknown; + onSelect?: (item: SearchItem) => unknown; + onActive?: () => unknown; + onDeactive?: () => unknown; +}; + +const Search: FunctionComponent = ({text, data, onChange, onSelect, onActive, onDeactive}) => { + const {t} = useTranslation(['graphs', 'common']); + + const [search, setSearch] = useState(text ?? ''); + const [searching, setSearching] = useState(false); + const [searchResult, setSearchResult] = useState(data.result); + const debouncedSearchText = useSearchValue(search); + useEffect(() => setSearch(text ?? ''), [text]); + useEffect(() => { + if (searching) { + onChange?.(debouncedSearchText); + } else { + setSearchResult([]); + } + }, [debouncedSearchText, searching, onChange]); + useEffect(() => { + if (data.text === search) { + setSearchResult(data.result); + } + }, [data, search]); + + const focus = useCallback(() => { + setSearching(true); + onActive?.(); + }, [onActive]); + + const cancel = useCallback(() => { + setSearch(''); + onChange?.(''); + setSearching(false); + onDeactive?.(); + }, [onChange, onDeactive]); + + const select = useCallback( + (item: SearchItem) => { + setSearch(item.name); + onSelect?.(item); + setSearching(false); + onDeactive?.(); + }, + [onSelect, onDeactive] + ); + + return ( + <> + + + {searching && {t('common:cancel')}} + + {searching && + (searchResult.length ? ( + + + {searchResult.map(item => { + const Icon = icons[item.type]; + return ( + select(item)} title={item.name}> + + {item.name} + + ); + })} + + + ) : ( + {t('graphs:nothing-matched')} + ))} + + ); +}; + +export default Search; diff --git a/frontend/packages/core/components/GraphsPage/Uploader.tsx b/frontend/packages/core/components/GraphsPage/Uploader.tsx new file mode 100644 index 00000000..425cf02b --- /dev/null +++ b/frontend/packages/core/components/GraphsPage/Uploader.tsx @@ -0,0 +1,116 @@ +import React, {FunctionComponent, useCallback, useState} from 'react'; +import {em, primaryColor, sameBorder, size, textLightColor} from '~/utils/style'; + +import Button from '~/components/Button'; +import Icon from '~/components/Icon'; +import styled from 'styled-components'; +import {useTranslation} from '~/utils/i18n'; + +const DropZone = styled.div<{actived: boolean}>` + ${props => + sameBorder({ + width: '1px', + type: 'dashed', + radius: em(16), + color: props.actived ? primaryColor : undefined + })} + background-color: ${props => (props.actived ? '#f2f6ff' : '#f9f9f9')}; + ${size('43.2%', '68%')} + display: flex; + flex-direction: column; + justify-content: center; + align-items: center; + + > .upload-icon { + font-size: ${em(64)}; + color: ${primaryColor}; + } + + > span { + font-size: ${em(18)}; + line-height: 3.2; + } + + > .upload-button { + min-width: ${em(155)}; + } +`; + +const SupportTable = styled.table` + max-width: 68%; + margin-top: ${em(32)}; + + td { + vertical-align: text-top; + line-height: 2; + + &:first-of-type { + color: ${textLightColor}; + text-align: right; + padding-right: ${em(10)}; + font-size: ${em(16)}; + width: ${em(250)}; + } + } +`; + +type UploaderProps = { + onClickUpload?: () => unknown; + onDropFiles?: (files: FileList) => unknown; +}; + +const Uploader: FunctionComponent = ({onClickUpload, onDropFiles}) => { + const {t} = useTranslation('graphs'); + + const [actived, setActived] = useState(false); + const onClick = useCallback(() => onClickUpload?.(), [onClickUpload]); + const onDrop = useCallback( + (e: React.DragEvent) => { + e.preventDefault(); + setActived(false); + if (e.dataTransfer && e.dataTransfer.files && e.dataTransfer.files.length) { + onDropFiles?.(e.dataTransfer.files); + } + }, + [onDropFiles] + ); + const onDragLeave = useCallback((e: React.DragEvent) => { + e.preventDefault(); + if (e.currentTarget.contains(e.relatedTarget as Node | null)) { + return; + } + setActived(false); + }, []); + + return ( + <> + e.preventDefault()} + onDragEnter={() => setActived(true)} + onDragLeave={onDragLeave} + > + + {t('upload-tip')} + + + + + + {t('supported-model')} + {t('supported-model-list')} + + + {t('experimental-supported-model')} + {t('experimental-supported-model-list')} + + + + + ); +}; + +export default Uploader; diff --git a/frontend/packages/core/components/HighDimensionalPage/HighDimensionalChart.tsx b/frontend/packages/core/components/HighDimensionalPage/HighDimensionalChart.tsx index 9b2e841a..dc7ce189 100644 --- a/frontend/packages/core/components/HighDimensionalPage/HighDimensionalChart.tsx +++ b/frontend/packages/core/components/HighDimensionalPage/HighDimensionalChart.tsx @@ -1,6 +1,6 @@ import {Dimension, DivideParams, Point, Reduction, divide} from '~/resource/high-dimensional'; import React, {FunctionComponent, useMemo} from 'react'; -import {primaryColor, rem} from '~/utils/style'; +import {contentHeight, primaryColor, rem} from '~/utils/style'; import ScatterChart from '~/components/ScatterChart'; import queryString from 'query-string'; @@ -9,11 +9,7 @@ import useHeavyWork from '~/hooks/useHeavyWork'; import {useRunningRequest} from '~/hooks/useRequest'; import {useTranslation} from '~/utils/i18n'; -const height = rem(600); - const divideWasm = () => - // eslint-disable-next-line @typescript-eslint/ban-ts-ignore - // @ts-ignore import('@visualdl/wasm').then(({divide}) => (params: DivideParams) => (divide(params.points, params.labels, !!params.visibility, params.keyword ?? '') as unknown) as [ Point[], @@ -23,7 +19,7 @@ const divideWasm = () => const divideWorker = () => new Worker('~/worker/high-dimensional/divide.worker.ts', {type: 'module'}); const StyledScatterChart = styled(ScatterChart)` - height: ${height}; + height: ${contentHeight}; `; const Empty = styled.div` @@ -31,7 +27,7 @@ const Empty = styled.div` justify-content: center; align-items: center; font-size: ${rem(20)}; - height: ${height}; + height: ${contentHeight}; `; const label = { diff --git a/frontend/packages/core/components/Icon.tsx b/frontend/packages/core/components/Icon.tsx index 8f3fff15..f396cbca 100644 --- a/frontend/packages/core/components/Icon.tsx +++ b/frontend/packages/core/components/Icon.tsx @@ -4,10 +4,11 @@ import {WithStyled} from '~/utils/style'; type IconProps = { type: string; + onClick?: () => unknown; }; -const Icon: FunctionComponent = ({type, className}) => { - return ; +const Icon: FunctionComponent = ({type, onClick, className}) => { + return onClick?.()} />; }; export default Icon; diff --git a/frontend/packages/core/components/Input.tsx b/frontend/packages/core/components/Input.tsx index 83b8a6ac..8fcf1437 100644 --- a/frontend/packages/core/components/Input.tsx +++ b/frontend/packages/core/components/Input.tsx @@ -1,6 +1,6 @@ -import React, {FunctionComponent} from 'react'; import {WithStyled, borderFocusedColor, em, half, sameBorder, textLighterColor, transitionProps} from '~/utils/style'; +import React from 'react'; import styled from 'styled-components'; export const padding = em(10); @@ -25,25 +25,29 @@ const StyledInput = styled.input<{rounded?: boolean}>` } `; -export type InputProps = { +type CustomeInputProps = { rounded?: boolean; - placeholder?: string; value?: string; onChange?: (value: string) => unknown; }; -const Input: FunctionComponent< - InputProps & WithStyled & Omit, keyof InputProps> -> = ({rounded, placeholder, value, onChange, className, ...props}) => ( - onChange?.(e.target.value)} - {...props} - /> +export type InputProps = Omit, keyof CustomeInputProps | 'type' | 'className'> & + CustomeInputProps; + +const Input = React.forwardRef( + ({rounded, value, onChange, className, ...props}, ref) => ( + onChange?.(e.target.value)} + {...props} + /> + ) ); +Input.displayName = 'Input'; + export default Input; diff --git a/frontend/packages/core/components/LineChart.tsx b/frontend/packages/core/components/LineChart.tsx index bbb67360..f90d5f80 100644 --- a/frontend/packages/core/components/LineChart.tsx +++ b/frontend/packages/core/components/LineChart.tsx @@ -1,6 +1,6 @@ import * as chart from '~/utils/chart'; -import React, {useCallback, useEffect, useImperativeHandle, useLayoutEffect, useRef} from 'react'; +import React, {useCallback, useEffect, useImperativeHandle} from 'react'; import {WithStyled, position, primaryColor, size} from '~/utils/style'; import {EChartOption} from 'echarts'; @@ -56,22 +56,21 @@ const LineChart = React.forwardRef( ({title, legend, data, xAxis, yAxis, xType, yType, xRange, yRange, tooltip, loading, className}, ref) => { const {i18n} = useTranslation(); - const {ref: echartRef, echart} = useECharts({ + const {ref: echartRef, echart, wrapper} = useECharts({ loading: !!loading, - zoom: true + zoom: true, + autoFit: true }); useImperativeHandle(ref, () => ({ restore: () => { - echart?.current?.dispatchAction({ + echart?.dispatchAction({ type: 'restore' }); }, saveAsImage: () => { - if (echart?.current) { - const blob = dataURL2Blob( - echart.current.getDataURL({type: 'png', pixelRatio: 2, backgroundColor: '#FFF'}) - ); + if (echart) { + const blob = dataURL2Blob(echart.getDataURL({type: 'png', pixelRatio: 2, backgroundColor: '#FFF'})); saveAs(blob, `${title?.replace(/[/\\?%*:|"<>]/g, '_') || 'scalar'}.png`); } } @@ -84,7 +83,7 @@ const LineChart = React.forwardRef( useEffect(() => { if (process.browser) { - echart?.current?.setOption( + echart?.setOption( { color: chart.color, title: { @@ -133,22 +132,8 @@ const LineChart = React.forwardRef( } }, [data, title, legend, xAxis, yAxis, xType, yType, xAxisFormatter, xRange, yRange, tooltip, echart]); - const wrapperRef = useRef(null); - useLayoutEffect(() => { - if (process.browser) { - const wrapper = wrapperRef.current; - if (wrapper) { - const observer = new ResizeObserver(() => { - echart?.current?.resize(); - }); - observer.observe(wrapper); - return () => observer.unobserve(wrapper); - } - } - }); - return ( - + {!echart && (
diff --git a/frontend/packages/core/components/RunAside.tsx b/frontend/packages/core/components/RunAside.tsx index 8cee8b0d..7f3714ef 100644 --- a/frontend/packages/core/components/RunAside.tsx +++ b/frontend/packages/core/components/RunAside.tsx @@ -1,5 +1,6 @@ +import Aside, {AsideSection} from '~/components/Aside'; import React, {FunctionComponent, useCallback, useMemo, useState} from 'react'; -import {borderColor, ellipsis, em, rem, size} from '~/utils/style'; +import {ellipsis, em, rem, size} from '~/utils/style'; import Checkbox from '~/components/Checkbox'; import Field from '~/components/Field'; @@ -10,73 +11,53 @@ import styled from 'styled-components'; import uniqBy from 'lodash/uniqBy'; import {useTranslation} from '~/utils/i18n'; -const Aside = styled.div` - height: 100%; - overflow: hidden; - display: flex; - flex-direction: column; +const StyledAside = styled(Aside)` + ${AsideSection}.run-section { + flex: auto; + overflow-x: hidden; + overflow-y: auto; + display: flex; + flex-direction: column; - > section { - margin: ${rem(20)} ${rem(20)} 0; - flex: 0 0 auto; - - &:not(:last-child) { - border-bottom: 1px solid ${borderColor}; - padding-bottom: ${rem(20)}; - } - - &.run-section { - flex: 1 1 auto; + .run-select { + flex: auto; overflow-x: hidden; overflow-y: auto; display: flex; flex-direction: column; - .running-toggle { - flex: 0 0 auto; - box-shadow: 0 -${rem(5)} ${rem(16)} 0 rgba(0, 0, 0, 0.03); + > * { + flex: none; } - .run-select { - flex: 1 1 auto; + .search-input { + margin-bottom: ${rem(15)}; + } + + .run-list { + flex: auto; overflow-x: hidden; overflow-y: auto; - display: flex; - flex-direction: column; - > * { - flex: 0 0 auto; - } + margin-top: ${rem(5)}; - .search-input { - margin-bottom: ${rem(15)}; - } + > div { + margin-top: ${rem(11)}; - .run-list { - flex: 1 1 auto; - overflow-x: hidden; - overflow-y: auto; - - margin-top: ${rem(5)}; - - > div { - margin-top: ${rem(11)}; + > * { + width: 100%; + } - > * { - width: 100%; - } + .run-item { + display: flex; + align-items: center; + ${ellipsis()} - .run-item { - display: flex; - align-items: center; - ${ellipsis()} - - > i { - display: inline-block; - ${size(em(12), em(12))}; - border-radius: ${em(6)}; - margin-right: ${em(8)}; - } + > i { + display: inline-block; + ${size(em(12), em(12))}; + border-radius: ${em(6)}; + margin-right: ${em(8)}; } } } @@ -131,10 +112,15 @@ const RunAside: FunctionComponent = ({ [onChangeRuns, selectedRuns] ); + const bottom = useMemo( + () => , + [running, onToggleRunning] + ); + return ( -
- -
- + + ); }; diff --git a/frontend/packages/core/components/RunningToggle.tsx b/frontend/packages/core/components/RunningToggle.tsx index 382f9984..665a43b1 100644 --- a/frontend/packages/core/components/RunningToggle.tsx +++ b/frontend/packages/core/components/RunningToggle.tsx @@ -8,7 +8,6 @@ import styled from 'styled-components'; import {useTranslation} from '~/utils/i18n'; const Wrapper = styled.div` - padding: ${rem(20)} 0; display: flex; align-items: center; diff --git a/frontend/packages/core/components/SamplesPage/SampleChart.tsx b/frontend/packages/core/components/SamplesPage/SampleChart.tsx index e27b1455..85275bdf 100644 --- a/frontend/packages/core/components/SamplesPage/SampleChart.tsx +++ b/frontend/packages/core/components/SamplesPage/SampleChart.tsx @@ -1,6 +1,6 @@ import Image, {ImageRef} from '~/components/Image'; import React, {FunctionComponent, useCallback, useEffect, useMemo, useRef, useState} from 'react'; -import {ellipsis, em, primaryColor, size, textLightColor, transitionProps} from '~/utils/style'; +import {ellipsis, em, primaryColor, rem, size, textLightColor, transitionProps} from '~/utils/style'; import ChartToolbox from '~/components/ChartToolbox'; import GridLoader from 'react-spinners/GridLoader'; @@ -69,6 +69,10 @@ const Container = styled.div<{brightness?: number; contrast?: number; fit?: bool } `; +const Toolbox = styled(ChartToolbox)` + margin-bottom: ${rem(18)}; +`; + type ImageData = { step: number; wallTime: number; @@ -195,7 +199,7 @@ const SampleChart: FunctionComponent = ({run, tag, brightness, {Content} - = ({data, loading, gl, className}) => { - const {ref, echart} = useECharts({ + const {ref, echart, wrapper} = useECharts({ loading, - gl + gl, + autoFit: true }); const chartOptions = useMemo( @@ -84,13 +85,12 @@ const ScatterChart: FunctionComponent = ({data, useEffect(() => { if (process.browser) { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - echart?.current?.setOption(chartOptions as any, {notMerge: true}); + echart?.setOption(chartOptions); } }, [chartOptions, echart]); return ( - + {!echart && (
diff --git a/frontend/packages/core/components/SearchInput.tsx b/frontend/packages/core/components/SearchInput.tsx index 8a0ba408..00a54737 100644 --- a/frontend/packages/core/components/SearchInput.tsx +++ b/frontend/packages/core/components/SearchInput.tsx @@ -1,14 +1,16 @@ import Input, {InputProps, padding} from '~/components/Input'; -import React, {FunctionComponent} from 'react'; -import {WithStyled, em, math, position, textLighterColor} from '~/utils/style'; +import React, {FunctionComponent, useCallback, useRef} from 'react'; +import {WithStyled, math, position, textColor, textLightColor, textLighterColor} from '~/utils/style'; import Icon from '~/components/Icon'; import styled from 'styled-components'; -const iconSize = em(16); +const searchIconSize = '1.142857143'; +const closeIconSize = '0.857142857'; const StyledInput = styled(Input)` - padding-left: ${math(`${iconSize} + ${padding} * 2`)}; + padding-left: ${math(`1em * ${searchIconSize} + ${padding} * 2`)}; + padding-right: ${math(`1em * ${closeIconSize} + ${padding} * 2`)}; width: 100%; `; @@ -17,18 +19,45 @@ const Control = styled.div` `; const SearchIcon = styled(Icon)` - font-size: ${iconSize}; display: block; - ${position('absolute', padding, null, null, padding)} + transform: translateY(-50%) scale(${searchIconSize}); + transform-origin: center; + ${position('absolute', '50%', null, null, padding)} pointer-events: none; color: ${textLighterColor}; `; -const SearchInput: FunctionComponent = ({className, ...props}) => ( - - - - -); +const CloseIcon = styled(Icon)` + display: block; + transform: translateY(-50%) scale(${closeIconSize}); + transform-origin: center; + ${position('absolute', '50%', padding, null, null)} + cursor: pointer; + color: ${textLighterColor}; + + &:hover { + color: ${textLightColor}; + } + + &:active { + color: ${textColor}; + } +`; + +const SearchInput: FunctionComponent = ({className, value, onChange, ...props}) => { + const input = useRef(null); + const clear = useCallback(() => { + onChange?.(''); + input.current?.focus(); + }, [onChange]); + + return ( + + + + {!!value && } + + ); +}; export default SearchInput; diff --git a/frontend/packages/core/components/Select.tsx b/frontend/packages/core/components/Select.tsx index 2dd0c5bc..fa4ddfa7 100644 --- a/frontend/packages/core/components/Select.tsx +++ b/frontend/packages/core/components/Select.tsx @@ -195,7 +195,7 @@ const Select = ({ [value, onChange] ); - const ref = useClickOutside(setIsOpenedFalse); + const ref = useClickOutside(setIsOpenedFalse); const list = useMemo[]>( () => diff --git a/frontend/packages/core/hooks/useClickOutside.ts b/frontend/packages/core/hooks/useClickOutside.ts index bd903dcb..63b48af6 100644 --- a/frontend/packages/core/hooks/useClickOutside.ts +++ b/frontend/packages/core/hooks/useClickOutside.ts @@ -1,7 +1,7 @@ import {useCallback, useEffect, useRef} from 'react'; -const useClickOutside = (callback: () => void) => { - const ref = useRef(null); +const useClickOutside = (callback: () => void) => { + const ref = useRef(null); const escapeListener = useCallback( (e: KeyboardEvent) => { diff --git a/frontend/packages/core/hooks/useECharts.ts b/frontend/packages/core/hooks/useECharts.ts index 6221b6bd..f4517e25 100644 --- a/frontend/packages/core/hooks/useECharts.ts +++ b/frontend/packages/core/hooks/useECharts.ts @@ -1,19 +1,21 @@ -import {MutableRefObject, useCallback, useEffect, useRef, useState} from 'react'; +import {MutableRefObject, useCallback, useEffect, useLayoutEffect, useRef, useState} from 'react'; import {maskColor, primaryColor, textColor} from '~/utils/style'; import {ECharts} from 'echarts'; -const useECharts = (options: { +const useECharts = (options: { loading?: boolean; gl?: boolean; zoom?: boolean; + autoFit?: boolean; }): { ref: MutableRefObject; - echart: MutableRefObject | null; + wrapper: MutableRefObject; + echart: ECharts | null; } => { const ref = useRef(null); const echartInstance = useRef(null); - const [echart, setEchart] = useState(null); + const [echart, setEchart] = useState(null); const createChart = useCallback(() => { (async () => { @@ -34,7 +36,7 @@ const useECharts = (options: { }); }, 0); } - setEchart(echartInstance); + setEchart(echartInstance.current); })(); }, [options.gl, options.zoom]); @@ -46,12 +48,12 @@ const useECharts = (options: { useEffect(() => { if (process.browser) { createChart(); - return () => destroyChart(); + return destroyChart; } }, [createChart, destroyChart]); useEffect(() => { - if (process.browser && echart) { + if (process.browser) { if (options.loading) { echartInstance.current?.showLoading('default', { text: '', @@ -64,9 +66,23 @@ const useECharts = (options: { echartInstance.current?.hideLoading(); } } - }, [options.loading, echart]); + }, [options.loading]); + + const wrapper = useRef(null); + useLayoutEffect(() => { + if (options.autoFit && process.browser) { + const w = wrapper.current; + if (w) { + const observer = new ResizeObserver(() => { + echartInstance.current?.resize(); + }); + observer.observe(w); + return () => observer.unobserve(w); + } + } + }, [options.autoFit]); - return {ref, echart}; + return {ref, echart, wrapper}; }; export default useECharts; diff --git a/frontend/packages/core/hooks/useNavItems.ts b/frontend/packages/core/hooks/useNavItems.ts index 9050e958..85a13611 100644 --- a/frontend/packages/core/hooks/useNavItems.ts +++ b/frontend/packages/core/hooks/useNavItems.ts @@ -5,10 +5,11 @@ import {fetcher} from '~/utils/fetch'; import intersection from 'lodash/intersection'; import useRequest from '~/hooks/useRequest'; -const allNavItems = ['scalars', 'samples', 'high-dimensional']; +const allNavItems = ['scalars', 'samples', 'graphs', 'high-dimensional']; export const navMap = { scalar: 'scalars', image: 'samples', + graph: 'graphs', embeddings: 'high-dimensional' } as const; diff --git a/frontend/packages/core/next.config.js b/frontend/packages/core/next.config.js index ab28d1dc..fcb0f385 100644 --- a/frontend/packages/core/next.config.js +++ b/frontend/packages/core/next.config.js @@ -52,7 +52,7 @@ module.exports = { config.resolve.alias['~'] = path.resolve(__dirname); config.node = Object.assign({}, config.node, { - // eslint-disable-next-line @typescript-eslint/camelcase + // eslint-disable-next-line @typescript-eslint/naming-convention child_process: 'empty', fs: 'empty' }); diff --git a/frontend/packages/core/package.json b/frontend/packages/core/package.json index 9775b328..60bfeb8b 100644 --- a/frontend/packages/core/package.json +++ b/frontend/packages/core/package.json @@ -35,19 +35,18 @@ "@visualdl/i18n": "2.0.0-beta.43", "@visualdl/wasm": "2.0.0-beta.43", "bignumber.js": "9.0.0", - "dagre-d3": "0.6.4", - "echarts": "4.7.0", + "echarts": "4.8.0", "echarts-gl": "1.1.1", - "eventemitter3": "4.0.0", + "eventemitter3": "4.0.4", "file-saver": "2.0.2", "isomorphic-unfetch": "3.0.0", "lodash": "4.17.15", "mime-types": "2.1.27", - "moment": "2.25.3", - "nanoid": "3.1.5", - "next": "9.3.6", + "moment": "2.26.0", + "nanoid": "3.1.9", + "next": "9.4.4", "nprogress": "0.2.0", - "polished": "3.6.2", + "polished": "3.6.4", "prop-types": "15.7.2", "query-string": "6.12.1", "react": "16.13.1", @@ -58,21 +57,19 @@ "react-spinners": "0.8.3", "react-tooltip": "4.2.6", "save-svg-as-png": "1.4.17", - "styled-components": "5.1.0", - "swr": "0.2.0" + "styled-components": "5.1.1", + "swr": "0.2.2" }, "devDependencies": { - "@babel/core": "7.9.6", - "@types/d3": "5.7.2", - "@types/dagre-d3": "0.4.39", - "@types/echarts": "4.6.0", + "@babel/core": "7.10.2", + "@types/echarts": "4.6.1", "@types/file-saver": "2.0.1", - "@types/lodash": "4.14.150", + "@types/lodash": "4.14.155", "@types/mime-types": "2.1.0", - "@types/node": "13.13.5", + "@types/node": "14.0.10", "@types/nprogress": "0.2.0", - "@types/react": "16.9.34", - "@types/react-dom": "16.9.7", + "@types/react": "16.9.35", + "@types/react-dom": "16.9.8", "@types/styled-components": "5.1.0", "@visualdl/mock": "2.0.0-beta.43", "babel-plugin-emotion": "10.0.33", @@ -82,7 +79,7 @@ "cross-env": "7.0.2", "css-loader": "3.5.3", "ora": "4.0.4", - "typescript": "3.8.3", + "typescript": "3.9.3", "worker-plugin": "4.0.3" }, "engines": { diff --git a/frontend/packages/core/pages/_document.tsx b/frontend/packages/core/pages/_document.tsx index 8592a12b..abb604a4 100644 --- a/frontend/packages/core/pages/_document.tsx +++ b/frontend/packages/core/pages/_document.tsx @@ -1,4 +1,12 @@ -import Document, {DocumentContext, DocumentProps, Head, Html, Main, NextScript} from 'next/document'; +import Document, { + DocumentContext, + DocumentInitialProps, + DocumentProps, + Head, + Html, + Main, + NextScript +} from 'next/document'; import {ServerStyleSheet} from '~/utils/style'; @@ -8,7 +16,7 @@ interface VDLDocumentProps extends DocumentProps { } export default class VDLDocument extends Document { - static async getInitialProps(ctx: DocumentContext) { + static async getInitialProps(ctx: DocumentContext): Promise { // https://github.com/zeit/next.js/blob/canary/examples/with-typescript-styled-components/pages/_document.tsx const sheet = new ServerStyleSheet(); const originalRenderPage = ctx.renderPage; @@ -42,7 +50,7 @@ export default class VDLDocument extends Document { } } - render() { + render(): JSX.Element { const {language, languageDir} = this.props; return ( diff --git a/frontend/packages/core/pages/graphs.tsx b/frontend/packages/core/pages/graphs.tsx index ec96a5b3..88fa0966 100644 --- a/frontend/packages/core/pages/graphs.tsx +++ b/frontend/packages/core/pages/graphs.tsx @@ -1,313 +1,228 @@ -import {Graph, NodeType, TypedNode, collectDagFacts} from '~/resource/graphs'; +import Aside, {AsideSection} from '~/components/Aside'; +import {Documentation, Properties, SearchItem, SearchResult} from '~/resource/graphs/types'; +import Graph, {GraphRef} from '~/components/GraphsPage/Graph'; import {NextI18NextPage, useTranslation} from '~/utils/i18n'; -import NodeInfo, {NodeInfoProps} from '~/components/GraphsPage/NodeInfo'; -import React, {useEffect, useMemo, useState} from 'react'; +import React, {useCallback, useEffect, useMemo, useRef, useState} from 'react'; +import Button from '~/components/Button'; +import Checkbox from '~/components/Checkbox'; import Content from '~/components/Content'; import Field from '~/components/Field'; -import Preloader from '~/components/Preloader'; -import RawButton from '~/components/Button'; -import RawRangeSlider from '~/components/RangeSlider'; +import ModelPropertiesDialog from '~/components/GraphsPage/ModelPropertiesDialog'; +import NodeDocumentationSidebar from '~/components/GraphsPage/NodeDocumentationSidebar'; +import NodePropertiesSidebar from '~/components/GraphsPage/NodePropertiesSidebar'; +import Search from '~/components/GraphsPage/Search'; import Title from '~/components/Title'; -import isEmpty from 'lodash/isEmpty'; +import Uploader from '~/components/GraphsPage/Uploader'; import {rem} from '~/utils/style'; -import {saveSvgAsPng} from 'save-svg-as-png'; import styled from 'styled-components'; -import useRequest from '~/hooks/useRequest'; -// eslint-disable-next-line @typescript-eslint/no-empty-function -const dumbFn = () => {}; - -const AsideSection = styled.section` - padding: ${rem(20)}; -`; - -const SubSection = styled.div` - margin-bottom: ${rem(30)}; -`; -const Button = styled(RawButton)` +const FullWidthButton = styled(Button)` width: 100%; - text-transform: uppercase; - - & + & { - margin-top: ${rem(20)}; - } `; -const Empty = styled.div` +const ExportButtonWrapper = styled.div` display: flex; - justify-content: center; - align-items: center; - font-size: ${rem(20)}; - height: ${rem(150)}; -`; + justify-content: space-between; -const RangeSlider = styled(RawRangeSlider)` - width: 100%; -`; - -const GraphSvg = styled('svg')` - width: 100%; + > * { + flex: 1 1 auto; - cursor: grab; - &.grabbing { - cursor: grabbing; - } - - .node { - cursor: pointer; - - .label-container { - stroke-width: 3px; - stroke: #e6e6e6; - &.rect { - rx: 10; - ry: 10; - } - } - - &.operator { - .label-container { - fill: #cdd9da; - } - } - - &.output { - .label-container { - stroke-dasharray: 5, 5; - stroke: #e6e6e6; - fill: #cad2d0; - } - } - - &.input { - .label-container { - fill: #d5d3d8; - } - } - - &.active { - .label-container { - stroke: #25c9ff; - } + &:not(:last-child) { + margin-right: ${rem(20)}; } } - - .edgePath path.path { - stroke: #333; - stroke-width: 1.5px; - } `; -const loadDagLibs = [import('d3'), import('dagre-d3')] as const; -const MIN_SCALE = 0.1; -const MAX_SCALE = 4; +// TODO: better way to auto fit height +const SearchSection = styled(AsideSection)` + max-height: calc(100% - ${rem(40)}); + display: flex; + flex-direction: column; -const useDag = (graph?: Graph) => { - const [displaySwitch, setDisplaySwitch] = useState({ - detail: false, - input: false, - output: false - }); - const facts = useMemo(() => collectDagFacts(graph), [graph]); + &:not(:last-child) { + padding-bottom: 0; + } +`; - const dagInfo = useMemo(() => { - const {inputLayer, outputLayer, briefLayer, detailLayer, findNode} = facts; +const Graphs: NextI18NextPage = () => { + const {t} = useTranslation(['graphs', 'common']); - const availableLayers = displaySwitch.detail ? [detailLayer] : [briefLayer]; - if (displaySwitch.input) { - availableLayers.push(inputLayer); + const graph = useRef(null); + const file = useRef(null); + const [files, setFiles] = useState(null); + const onClickFile = useCallback(() => { + if (file.current) { + file.current.value = ''; + file.current.click(); } - if (displaySwitch.output) { - availableLayers.push(outputLayer); + }, []); + const onChangeFile = useCallback((e: React.ChangeEvent) => { + const target = e.target; + if (target && target.files && target.files.length) { + setFiles(target.files); } - - return { - ...availableLayers.reduce( - (memo, {nodes, edges}) => ({ - nodes: memo.nodes.concat(nodes), - edges: memo.edges.concat(edges) - }), - { - nodes: [], - edges: [] - } + }, []); + + const [search, setSearch] = useState(''); + const [searching, setSearching] = useState(false); + const [searchResult, setSearchResult] = useState({text: '', result: []}); + const onSearch = useCallback((value: string) => { + setSearch(value); + graph.current?.search(value); + }, []); + const onSelect = useCallback((item: SearchItem) => { + setSearch(item.name); + graph.current?.select(item); + }, []); + + const [showAttributes, setShowAttributes] = useState(false); + const [showInitializers, setShowInitializers] = useState(true); + const [showNames, setShowNames] = useState(false); + + const [modelData, setModelData] = useState(null); + const [nodeData, setNodeData] = useState(null); + const [nodeDocumentation, setNodeDocumentation] = useState(null); + + useEffect(() => setSearch(''), [showAttributes, showInitializers, showNames]); + + const bottom = useMemo( + () => + searching ? null : ( + + {t('graphs:change-model')} + ), - findNode - }; - }, [facts, displaySwitch]); - - return { - dagInfo, - displaySwitch, - setDisplaySwitch - }; -}; - -const useDagreD3 = (graph?: Graph) => { - const [currentNode, setCurrentNode] = useState(undefined); - const {dagInfo, displaySwitch, setDisplaySwitch} = useDag(graph); - const [downloadImage, setDownloadImageFn] = useState<() => void>(() => dumbFn); - const [fitScreen, setFitScreenFn] = useState<() => void>(() => dumbFn); - const [scale, setScaleValue] = useState(1); - const [setScale, setScaleFn] = useState<(n: number) => void>(() => dumbFn); - - useEffect(() => { - Promise.all(loadDagLibs).then(([d3, {default: dagre}]) => { - if (!dagInfo.nodes.length || !dagInfo.edges.length) { - return; - } - - const g = new dagre.graphlib.Graph<{type: NodeType; elem: HTMLElement}>(); - g.setGraph({}).setDefaultEdgeLabel(() => ({})); - - dagInfo.nodes.forEach(n => g.setNode(n.key, n)); - dagInfo.edges.forEach(e => g.setEdge(e[0], e[1])); - - const render = new dagre.render(); - const svg = d3.select('svg'); // eslint-disable-line @typescript-eslint/no-explicit-any - const inner = svg.select('svg g'); - render(inner, g); - - const {width, height} = g.graph(); - const scaleFactor = 1; - svg.attr('height', Math.max(640, window.innerHeight + 40)); - - const zoom = d3 - .zoom() // eslint-disable-line @typescript-eslint/no-explicit-any - .scaleExtent([MIN_SCALE, MAX_SCALE]) - .on('zoom', function () { - setScaleValue(d3.event.transform.k / scaleFactor); - inner.attr('transform', d3.event.transform); - }) - .on('start', () => svg.classed('grabbing', true)) - .on('end', () => svg.classed('grabbing', false)); - svg.call(zoom); - - let prevDom: HTMLElement | undefined; - // install event listeners - svg.selectAll('g.node').on('click', v => { - const uid = v as string; - const {type, elem: dom} = g.node(uid); - if (prevDom) { - prevDom.classList.remove('active'); - } - dom.classList.add('active'); - prevDom = dom; - const node = dagInfo.findNode(type, uid); - if (!node) { - setCurrentNode({type: 'unknown', guessType: type, msg: uid}); - return; - } - - setCurrentNode({...node, type} as TypedNode); - }); - - const fitScreen = () => { - if (!svg) { - return; - } - - const parent = svg.node()?.parentElement; - if (!parent) { - return; - } - - const {width: parentWidth} = parent.getBoundingClientRect(); - svg.call( - zoom.transform, - d3.zoomIdentity.translate((parentWidth - (width ?? 0) * scaleFactor) / 2, 20).scale(scaleFactor) - ); - }; - fitScreen(); - - setFitScreenFn(() => fitScreen); - - setDownloadImageFn(() => { - let processing = false; - return async () => { - if (processing) { - return; - } - - processing = true; - fitScreen(); - const svgNode = svg.node(); - if (!svgNode) { - return; - } - const originalHeight = +svg.attr('height'); - svg.attr('height', (height ?? 0) + 40); - await saveSvgAsPng(svgNode, 'graph.png'); - svg.attr('height', originalHeight); - processing = false; - }; - }); - - setScaleFn(() => (n: number) => { - zoom.scaleTo(svg, scaleFactor * n); - setScaleValue(n); - }); - }); - }, [dagInfo]); - - return {currentNode, displaySwitch, setDisplaySwitch, downloadImage, fitScreen, scale, setScale}; -}; - -const Graphs: NextI18NextPage = () => { - const {t} = useTranslation(['graphs', 'common']); - const {data, error, loading} = useRequest<{data: Graph}>('/graphs/graph'); - const graph = useMemo(() => (loading || isEmpty(data?.data) ? undefined : data?.data), [loading, data]); - const {currentNode, downloadImage, fitScreen, scale, setScale} = useDagreD3(graph); - - const aside = ( - - - - - - - - - - - - - - - - - + [t, onClickFile, searching] ); - const ContentInner = useMemo(() => { - if (loading) { + const [rendered, setRendered] = useState(false); + + const aside = useMemo(() => { + if (!rendered) { return null; } - if (error) { - return {t('common:error')}; + if (nodeDocumentation) { + return ( + + ); } - if (!graph) { - return {t('common:empty')}; + if (nodeData) { + return ( + + ); } return ( - - - + ); - }, [loading, error, graph, t]); + }, [ + t, + bottom, + search, + searching, + searchResult, + onSearch, + onSelect, + showAttributes, + showInitializers, + showNames, + rendered, + nodeData, + nodeDocumentation + ]); + + const uploader = useMemo(() => , [onClickFile]); return ( <> - {t('common:graphs')} - - - {ContentInner} + setModelData(null)} /> + + setRendered(true)} + onSearch={data => setSearchResult(data)} + onShowModelProperties={data => setModelData(data)} + onShowNodeProperties={data => { + setNodeData(data); + setNodeDocumentation(null); + }} + onShowNodeDocumentation={data => setNodeDocumentation(data)} + /> + ); diff --git a/frontend/packages/core/pages/high-dimensional.tsx b/frontend/packages/core/pages/high-dimensional.tsx index e0f73f0e..3adb0252 100644 --- a/frontend/packages/core/pages/high-dimensional.tsx +++ b/frontend/packages/core/pages/high-dimensional.tsx @@ -1,10 +1,10 @@ +import Aside, {AsideSection} from '~/components/Aside'; import {Dimension, Reduction} from '~/resource/high-dimensional'; import {NextI18NextPage, useTranslation} from '~/utils/i18n'; import React, {useEffect, useMemo, useState} from 'react'; import Select, {SelectProps} from '~/components/Select'; import {em, rem} from '~/utils/style'; -import AsideDivider from '~/components/AsideDivider'; import Checkbox from '~/components/Checkbox'; import Content from '~/components/Content'; import Field from '~/components/Field'; @@ -24,10 +24,6 @@ import useSearchValue from '~/hooks/useSearchValue'; const dimensions = ['2d', '3d']; const reductions = ['pca', 'tsne']; -const AsideSection = styled.section` - padding: ${rem(20)}; -`; - const StyledSelect = styled>>(Select)` min-width: ${em(160)}; `; @@ -96,49 +92,58 @@ const HighDimensional: NextI18NextPage = () => { const [reduction, setReduction] = useState(reductions[0] as Reduction); const [labelVisibility, setLabelVisibility] = useState(true); - const aside = ( - - {t('common:select-runs')} - - - - - - - - {t('high-dimensional:display-all-label')} - - - - - - {t('high-dimensional:dimension')} - - - setDimension(value)}> - {dimensions.map(item => ( - - {t(item)} - - ))} - - - - - - {t('high-dimensional:reduction-method')} - - - setReduction(value)}> - {reductions.map(item => ( - - {t(item)} - - ))} - - - - + const bottom = useMemo(() => , [running, setRunning]); + + const aside = useMemo( + () => ( + + ), + [t, bottom, dimension, label, labelList, labelVisibility, reduction, search] ); return ( diff --git a/frontend/packages/core/pages/samples.tsx b/frontend/packages/core/pages/samples.tsx index 6548bf3c..8e6da2df 100644 --- a/frontend/packages/core/pages/samples.tsx +++ b/frontend/packages/core/pages/samples.tsx @@ -4,6 +4,7 @@ import ChartPage, {WithChart} from '~/components/ChartPage'; import {NextI18NextPage, useTranslation} from '~/utils/i18n'; import React, {useCallback, useMemo, useState} from 'react'; +import {AsideSection} from '~/components/Aside'; import Checkbox from '~/components/Checkbox'; import Content from '~/components/Content'; import Field from '~/components/Field'; @@ -47,30 +48,33 @@ const Samples: NextI18NextPage = () => { const [brightness, setBrightness] = useState(1); const [contrast, setContrast] = useState(1); - const aside = ( - -
- - {t('samples:show-actual-size')} - -
-
- - - -
-
- - - -
-
+ const aside = useMemo( + () => ( + + + + {t('samples:show-actual-size')} + + + + + + + + + + + + + + ), + [t, brightness, contrast, onChangeRuns, running, runs, selectedRuns, showActualSize] ); const withChart = useCallback>( diff --git a/frontend/packages/core/pages/scalars.tsx b/frontend/packages/core/pages/scalars.tsx index 50143185..54e25f1c 100644 --- a/frontend/packages/core/pages/scalars.tsx +++ b/frontend/packages/core/pages/scalars.tsx @@ -1,8 +1,9 @@ import ChartPage, {WithChart} from '~/components/ChartPage'; import {NextI18NextPage, useTranslation} from '~/utils/i18n'; -import React, {useCallback, useState} from 'react'; +import React, {useCallback, useMemo, useState} from 'react'; import {sortingMethodMap, xAxisMap} from '~/resource/scalars'; +import {AsideSection} from '~/components/Aside'; import Checkbox from '~/components/Checkbox'; import Content from '~/components/Content'; import Field from '~/components/Field'; @@ -51,44 +52,50 @@ const Scalars: NextI18NextPage = () => { const [ignoreOutliers, setIgnoreOutliers] = useState(false); - const aside = ( - -
- - {t('scalars:ignore-outliers')} - - - {t('scalars:tooltip-sorting')} - ({ + label: t(`tooltip-sorting-value.${value}`), + value + }))} + value={tooltipSorting} + onChange={setTooltipSorting} + /> + + + + + + + + + + + {xAxisValues.map(value => ( + + {t(`x-axis-value.${value}`)} + + ))} + + + + + ), + [t, ignoreOutliers, onChangeRuns, running, runs, selectedRuns, smoothing, tooltipSorting, xAxis] ); const withChart = useCallback>( diff --git a/frontend/packages/core/public/locales/en/common.json b/frontend/packages/core/public/locales/en/common.json index e1c6c9c3..c6708fbd 100644 --- a/frontend/packages/core/public/locales/en/common.json +++ b/frontend/packages/core/public/locales/en/common.json @@ -1,4 +1,6 @@ { + "cancel": "Cancel", + "close": "Close", "confirm": "Confirm", "empty": "Nothing to display", "error": "Error occurred", @@ -12,19 +14,19 @@ "runs": "Runs", "samples": "Samples", "scalars": "Scalars", - "search": "Search", "search-empty": "Nothing found. Please try again with another word. <1/>Or you can <3>see all charts.", "search-result": "Search Result", "search-runs": "Search runs", "search-tags": "Search tags in RegExp", - "select": "Please Select", + "search": "Search", "select-all": "Select All", "select-runs": "Select Runs", + "select": "Please Select", "start-realtime-refresh": "Start realtime refresh", - "stop": "Stop", "stop-realtime-refresh": "Stop realtime refresh", + "stop": "Stop", "stopped": "Stopped", - "total-page": "{{count}} page, jump to", "total-page_plural": "{{count}} pages, jump to", + "total-page": "{{count}} page, jump to", "unselected-empty": "Nothing selected. <1/>Please select display data from right side." } diff --git a/frontend/packages/core/public/locales/en/graphs.json b/frontend/packages/core/public/locales/en/graphs.json index 1f362d04..a9c71c9b 100644 --- a/frontend/packages/core/public/locales/en/graphs.json +++ b/frontend/packages/core/public/locales/en/graphs.json @@ -1,14 +1,52 @@ { - "click-node": "Click a node to view its detail", - "download-image": "Download Image", - "input": "Input", - "node-data-shape": "Shape", - "node-data-type": "Data Type", - "node-info": "Node Info", - "node-name": "Node Name", - "node-type": "Node Type", - "op-type": "Operator Type", - "output": "Output", - "restore-image": "Restore Image", - "scale": "Scale" + "change-model": "Change Model", + "model-properties": "Model Properties", + "node-properties": "Node Properties", + "node-documentation": "Documentation", + "nothing-matched": "Nothing matched", + "display-data": "Select Display Data", + "show-attributes": "Show Attributes", + "show-initializers": "Show Initializers", + "show-node-names": "Show Node Names", + "export-file": "Export File", + "export-png": "PNG", + "export-svg": "SVG", + "upload-tip": "Click or Drop file here to view neural network models", + "upload-model": "Upload Model", + "supported-model": "Supported models: ", + "experimental-supported-model": "Experimental supported models: ", + "supported-model-list": "PaddlePaddle, ONNX, Keras, Core ML, Caffe, Caffe2, Darknet, MXNet, ncnn, TensorFlow Lite", + "experimental-supported-model-list": "TorchScript, PyTorch, Torch, ArmNN, BigDL, Chainer, CNTK, Deeplearning4j, MediaPipe, ML.NET, MNN, OpenVINO, Scikit-learn, Tengine, TensorFlow.js, TensorFlow", + "properties": { + "format": "Format", + "producer": "Producer", + "source": "Source", + "name": "Name", + "version": "Version", + "description": "Description", + "author": "Author", + "company": "Company", + "license": "License", + "domain": "Domain", + "imports": "Imports", + "runtime": "Runtime", + "type": "Type", + "tags": "Tags", + "inputs": "Inputs", + "outputs": "Outputs", + "attributes": "Attributes" + }, + "documentation": { + "attributes": "Attributes", + "inputs": "Inputs", + "outputs": "Outputs", + "type-constraints": "Type Constraints", + "examples": "Examples", + "references": "References", + "support": "Support", + "support-info": "In domain <1>{{domain}} since version <3>{{since_version}} at support level <5>{{support_level}}." + }, + "restore-size": "Restore Size", + "zoom-in": "Zoom In", + "zoom-out": "Zoom Out" } diff --git a/frontend/packages/core/public/locales/zh/common.json b/frontend/packages/core/public/locales/zh/common.json index 6f235c51..d436f887 100644 --- a/frontend/packages/core/public/locales/zh/common.json +++ b/frontend/packages/core/public/locales/zh/common.json @@ -1,4 +1,6 @@ { + "cancel": "取消", + "close": "关闭", "confirm": "确定", "empty": "暂无数据", "error": "发生错误", @@ -12,19 +14,19 @@ "runs": "数据流", "samples": "样本数据", "scalars": "标量数据", - "search": "搜索", "search-empty": "没有找到您期望的内容,你可以尝试其他搜索词<1/>或者点击<3>查看全部图表", "search-result": "搜索结果", "search-runs": "搜索数据流", "search-tags": "搜索标签(支持正则)", - "select": "请选择", + "search": "搜索", "select-all": "全选", "select-runs": "选择数据流", + "select": "请选择", "start-realtime-refresh": "运行实时数据刷新", - "stop": "停止", "stop-realtime-refresh": "停止实时数据刷新", + "stop": "停止", "stopped": "已停止", - "total-page": "共 {{count}} 页,跳转至", "total-page_plural": "共 {{count}} 页,跳转至", + "total-page": "共 {{count}} 页,跳转至", "unselected-empty": "未选中任何数据<1/>请在右侧操作栏选择要展示的数据" } diff --git a/frontend/packages/core/public/locales/zh/graphs.json b/frontend/packages/core/public/locales/zh/graphs.json index 12493b89..96ca3618 100644 --- a/frontend/packages/core/public/locales/zh/graphs.json +++ b/frontend/packages/core/public/locales/zh/graphs.json @@ -1,14 +1,52 @@ { - "scale": "比例", - "download-image": "下载图片", - "restore-image": "还原图片", - "node-info": "节点信息", - "node-type": "节点类型", - "node-name": "节点名称", - "node-data-shape": "数据类型", - "input": "输入", - "output": "输出", - "op-type": "算子类型", - "node-data-type": "数据类型", - "click-node": "点击左侧节点,查看节点信息" + "change-model": "更换模型", + "model-properties": "模型属性", + "node-properties": "节点属性", + "node-documentation": "文档", + "nothing-matched": "无匹配的内容", + "display-data": "选择展示数据", + "show-attributes": "显示参数", + "show-initializers": "显示初始化参数", + "show-node-names": "显示节点名称", + "export-file": "导出文件", + "export-png": "PNG", + "export-svg": "SVG", + "upload-tip": "点击或拖拽文件到页面上传模型,进行结构展示", + "upload-model": "上传模型", + "supported-model": "VisualDL支持:", + "experimental-supported-model": "VisualDL实验性支持:", + "supported-model-list": "PaddlePaddle、ONNX、Keras、Core ML、Caffe、Caffe2、Darknet、MXNet、ncnn、TensorFlow Lite", + "experimental-supported-model-list": "TorchScript、PyTorch、Torch、 ArmNN、BigDL、Chainer、CNTK、Deeplearning4j、MediaPipe、ML.NET、MNN、OpenVINO、Scikit-learn、Tengine、TensorFlow.js、TensorFlow", + "properties": { + "format": "格式", + "producer": "框架", + "source": "源", + "name": "名称", + "version": "版本", + "description": "描述", + "author": "作者", + "company": "公司", + "license": "许可证", + "domain": "域名", + "imports": "导入", + "runtime": "运行时", + "type": "类型", + "tags": "标签", + "inputs": "输入", + "outputs": "输出", + "attributes": "属性" + }, + "documentation": { + "attributes": "属性", + "inputs": "输入", + "outputs": "输出", + "type-constraints": "类型约束", + "examples": "示例", + "references": "参考", + "support": "支持", + "support-info": "从 <3>{{since_version}} 版本起域名 <1>{{domain}} 的支持等级为 <5>{{support_level}}。" + }, + "restore-size": "重置大小", + "zoom-in": "放大", + "zoom-out": "缩小" } diff --git a/frontend/packages/core/public/netron/armnn-metadata.json b/frontend/packages/core/public/netron/armnn-metadata.json new file mode 100644 index 00000000..d69e0df2 --- /dev/null +++ b/frontend/packages/core/public/netron/armnn-metadata.json @@ -0,0 +1,476 @@ +[ + { + "name": "InputLayer", + "schema": { + "bindings": [ + { "name": "layerBindingId", "type": "int", "src": "layerBindingId" } + ] + } + }, + { + "name": "OutputLayer", + "schema": { + "category": "Tensor", + "bindings": [ + { "name": "layerBindingId", "type": "int", "src": "layerBindingId" } + ] + } + }, + { + "name": "Pooling2dLayer", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "type", "type": "string", "src": "poolType", "src_type": "PoolingAlgorithm"}, + { "name": "padding", "type": "string", "src": ["padTop", "padRight", "padBottom", "padLeft"] }, + { "name": "width", "type": "string", "src": "poolWidth" }, + { "name": "height", "type": "string", "src": "poolHeight" }, + { "name": "stride", "type": "string", "src": ["strideX", "strideY"] }, + { "name": "outputShapeRounding", "type": "string", "src": "outputShapeRounding", "src_type": "OutputShapeRounding"}, + { "name": "paddingMethod", "type": "string", "src": "paddingMethod", "src_type": "PaddingMethod"}, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "ReshapeLayer", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "targetShape", "type": "string", "src": "targetShape" } + ] + } + }, + { + "name": "SoftmaxLayer", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "beta", "type": "float", "src": "beta" } + ] + } + }, + { + "name": "Convolution2dLayer", + "schema": { + "category": "Layer", + "inputs": [ + + { "name": "weight", "src": "weights" }, + { "name": "bias", "src": "biases" } + ], + "attributes": [ + { "name": "padding", "type": "string", "src": ["padTop", "padRight", "padBottom", "padLeft"] }, + { "name": "stride", "type": "string", "src": ["strideX", "strideY"] }, + { "name": "dilation", "type": "string", "src": ["dilationX", "dilationY"] }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "DepthwiseConvolution2dLayer", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "weight", "src": "weights" }, + { "name": "bias", "src": "biases" } + ], + "attributes": [ + { "name": "padding", "type": "string", "src": ["padTop", "padRight", "padBottom", "padLeft"] }, + { "name": "stride", "type": "string", "src": ["strideX", "strideY"] }, + { "name": "dilation", "type": "string", "src": ["dilationX", "dilationY"] }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "ActivationLayer", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "function", "type": "string", "src": "activationFunction", "src_type": "ActivationFunction" }, + { "name": "a", "type": "float", "src": "a" }, + { "name": "b", "type": "float", "src": "b" } + ] + } + }, + { + "name": "PermuteLayer", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "dimMappings", "type": "string", "src": "dimMappings" } + ] + } + }, + { + "name": "FullyConnectedLayer", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "weights", "src": "weights" }, + { "name": "biases", "src": "biases" } + ], + "attributes": [ + { "name": "transposeWeightsMatrix", "type": "bool", "src": "transposeWeightsMatrix" } + ] + } + }, + { + "name": "ConstantLayer", + "schema": { + "category": "Constant", + "inputs": [ + { "name": "input", "src": "input" } + ] + } + }, + { + "name": "SpaceToBatchNdLayer", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "blockShape", "type": "string", "src": "blockShape" }, + { "name": "padList", "type": "string", "src": "padList" }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "BatchToSpaceNdLayer", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "blockShape", "type": "string", "src": "blockShape" }, + { "name": "crops", "type": "string", "src": "crops" }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "DivisionLayer", + "schema": { + "category": "Layer" + } + }, + { + "name": "MinimumLayer", + "schema": { + "category": "Layer" + } + }, + { + "name": "EqualLayer", + "schema": { + "category": "Layer" + } + }, + { + "name": "MaximumLayer", + "schema": { + "category": "Layer" + } + }, + { + "name": "NormalizationLayer", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "normChannelType", "type": "string", "src": "normChannelType", "src_type": "NormalizationAlgorithmChannel" }, + { "name": "normMethodType", "type": "string", "src": "normMethodType", "src_type": "NormalizationAlgorithmMethod" }, + { "name": "normSize", "type": "uint", "src": "normSize" }, + { "name": "alpha", "type": "float", "src": "alpha" }, + { "name": "beta", "type": "float", "src": "beta" }, + { "name": "k", "type": "float", "src": "k" }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "PadLayer", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "padList", "type": "uint", "src": "padList" }, + { "name": "padValue", "type": "float", "src": "padValue" } + ] + } + }, + { + "name": "RsqrtLayer", + "schema": { + "category": "Layer" + } + }, + { + "name": "FloorLayer", + "schema": { + "category": "Layer" + } + }, + { + "name": "BatchNormalizationLayer", + "schema": { + "category": "Normalization", + "inputs": [ + { "name": "mean", "src": "mean" }, + { "name": "variance", "src": "variance" }, + { "name": "beta", "src": "beta" }, + { "name": "gamma", "src": "gamma" } + ], + "attributes": [ + { "name": "eps", "type": "float", "src": "eps" }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "GreaterLayer", + "schema": { + "category": "Layer", + "attributes": [ + ] + } + }, + { + "name": "ResizeBilinearLayer", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "targetWidth", "type": "uint", "src": "targetWidth" }, + { "name": "targetHeight", "type": "uint", "src": "targetHeight" }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "SubtractionLayer", + "schema": { + } + }, + { + "name": "StridedSliceLayer", + "schema": { + "category": "Tensor", + "attributes": [ + { "name": "begin", "type": "int", "src": "begin" }, + { "name": "end", "type": "int", "src": "end" }, + { "name": "stride", "type": "int", "src": "stride" }, + { "name": "beginMask", "type": "int", "src": "beginMask" }, + { "name": "endMask", "type": "int", "src": "endMask" }, + { "name": "shrinkAxisMask", "type": "int", "src": "shrinkAxisMask" }, + { "name": "ellipsisMask", "type": "int", "src": "ellipsisMask" }, + { "name": "newAxisMask", "type": "int", "src": "newAxisMask" }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "GatherLayer", + "schema": { + "category": "Tensor" + } + }, + { + "name": "MeanLayer", + "schema": { + "attributes": [ + { "name": "axis", "type": "uint", "src": "axis" }, + { "name": "keepDims", "type": "bool", "src": "keepDims" } + ] + } + }, + { + "name": "MergerLayer", + "schema": { + "category": "Tensor" + } + }, + { + "name": "L2NormalizationLayer", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "eps", "type": "float", "src": "eps" }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "SplitterLayer", + "schema": { + "category": "Tensor", + "attributes": [ + { "name": "concatAxis", "type": "uint", "src": "concatAxis" }, + { "name": "numViews", "type": "uint", "src": "numViewes" }, + { "name": "numDimensions", "type": "uint", "src": "numDimensions" } + ] + } + }, + { + "name": "DetectionPostProcessLayer", + "schema": { + "category": "Custom", + "attributes": [ + { "name": "maxDetections", "type": "uint", "src": "maxDetections" }, + { "name": "maxClassesPerDetection", "type": "uint", "src": "maxClassesPerDetection" }, + { "name": "detectionsPerClass", "type": "uint", "src": "detectionsPerClass" }, + { "name": "nmsScoreThreshold", "type": "float", "src": "nmsScoreThreshold" }, + { "name": "numIouThreshold", "type": "float", "src": "nmsIouThreshold" }, + { "name": "numClasses", "type": "uint", "src": "numClasses" }, + { "name": "useRegularNms", "type": "bool", "src": "useRegularNms" }, + { "name": "scaleX", "type": "float", "src": "scaleX" }, + { "name": "scaleY", "type": "float", "src": "scaleY" }, + { "name": "scaleW", "type": "float", "src": "scaleW" }, + { "name": "scaleH", "type": "float", "src": "scaleH" } + ] + } + }, + { + "name": "LstmLayer", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "inputToForgetWeights1", "src": "inputToForgetWeights1" }, + { "name": "inputToCellWeights1", "src": "inputToCellWeights1" }, + { "name": "inputToOutputWeights1", "src": "inputToOutputWeights1" }, + { "name": "recurrentToForgetWeights1", "src": "recurrentToForgetWeights1" }, + { "name": "recurrentToCellWeights1", "src": "recurrentToCellWeights1" }, + { "name": "recurrentToOutputWeights1", "src": "recurrentToOutputWeights1" }, + { "name": "forgetGateBias1", "src": "forgetGateBias1" }, + { "name": "cellBias1", "src": "cellBias1" }, + { "name": "outputGateBias1", "src": "outputGateBias1" }, + { "name": "inputToInputWeights1", "src": "inputToInputWeights1" }, + { "name": "recurrentToInputWeights1", "src": "recurrentToInputWeights1" }, + { "name": "cellToInputWeights1", "src": "cellToInputWeights1" }, + { "name": "inputGateBias1", "src": "inputGateBias1" }, + { "name": "projectionWeights1", "src": "projectionWeights1" }, + { "name": "projectionBias1", "src": "projectionBias1" }, + { "name": "cellToForgetWeights1", "src": "cellToForgetWeights1" }, + { "name": "cellToOutputWeights1", "src": "cellToOutputWeights1" }, + { "name": "inputLayerNormWeights1", "src": "inputLayerNormWeights1" }, + { "name": "forgetLayerNormWeights1", "src": "forgetLayerNormWeights1" }, + { "name": "cellLayerNormWeights1", "src": "cellLayerNormWeights1" }, + { "name": "outputLayerNormWeights1", "src": "outputLayerNormWeights1" } + ], + "attributes": [ + { "name": "activationFunc", "type": "uint", "src": "activationFunc" }, + { "name": "clippingThresCell", "type": "float", "src": "clippingThresCell" }, + { "name": "clippingThresProj", "type": "float", "src": "clippingThresProj" }, + { "name": "cifgEnabled", "type": "bool", "src": "cifgEnabled" }, + { "name": "peepholeEnabled", "type": "bool", "src": "peepholeEnabled" }, + { "name": "projectionEnabled", "type": "bool", "src": "projectionEnabled" }, + { "name": "layerNormEnabled", "type": "bool", "src": "layerNormEnabled" } + ] + } + }, + { + "name": "QuantizeLayer", + "schema": { + "category": "Layer" + } + }, + { + "name": "DequantizeLayer", + "schema": { + "category": "Layer" + } + }, + { + "name": "MergeLayer", + "schema": { + "category": "Layer" + } + }, + { + "name": "SwitchLayer", + "schema": { + "category": "Layer" + } + }, + { + "name": "ConcatLayer", + "schema": { + "category": "Tensor", + "attributes": [ + { "name": "concatAxis", "type": "uint", "src": "concatAxis" }, + { "name": "numViews", "type": "uint", "src": "numViewes" }, + { "name": "numDimensions", "type": "uint", "src": "numDimensions" } + ] + } + }, + { + "name": "SpaceToDepthLayer", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "blockSize", "type": "uint", "src": "blockSize" }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "PreluLayer", + "schema": { + "category": "Layer" + } + }, + { + "name": "TransposeConvolution2dLayer", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "weight", "src": "weights" }, + { "name": "bias", "src": "biases" } + ], + "attributes": [ + { "name": "padding", "type": "string", "src": ["padTop", "padRight", "padBottom", "padLeft"] }, + { "name": "stride", "type": "string", "src": ["strideX", "strideY"] }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "ResizeLayer", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "targetWidth", "type": "uint", "src": "targetWidth" }, + { "name": "targetHeight", "type": "uint", "src": "targetHeight" }, + { "name": "method", "type": "string", "src": "method", "src_type": "ResizeMethod" }, + { "name": "dataLayout", "type": "string", "src": "dataLayout", "src_type": "DataLayout" } + ] + } + }, + { + "name": "StackLayer", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "uint", "src": "axis" }, + { "name": "numInputs", "type": "uint", "src": "numInputs" }, + { "name": "inputShape", "type": "uint", "src": "inputShape" } + ] + } + }, + { + "name": "QuantizedLstmLayer", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "inputToInputWeights1", "src": "inputToInputWeights1" }, + { "name": "inputToForgetWeights1", "src": "inputToForgetWeights1" }, + { "name": "inputToCellWeights1", "src": "inputToCellWeights1" }, + { "name": "inputToOutputWeights1", "src": "inputToOutputWeights1" }, + { "name": "recurrentToInputWeights1", "src": "recurrentToInputWeights1" }, + { "name": "recurrentToForgetWeights1", "src": "recurrentToForgetWeights1" }, + { "name": "recurrentToCellWeights1", "src": "recurrentToCellWeights1" }, + { "name": "recurrentToOutputWeights1", "src": "recurrentToOutputWeights1" }, + { "name": "inputGateBias1", "src": "inputGateBias1" }, + { "name": "forgetGateBias1", "src": "forgetGateBias1" }, + { "name": "cellBias1", "src": "cellBias1" }, + { "name": "outputGateBias1", "src": "outputGateBias1" } + ] + } + } +] diff --git a/frontend/packages/core/public/netron/armnn-schema.js b/frontend/packages/core/public/netron/armnn-schema.js new file mode 100644 index 00000000..09e0878a --- /dev/null +++ b/frontend/packages/core/public/netron/armnn-schema.js @@ -0,0 +1,15584 @@ +// automatically generated by the FlatBuffers compiler, do not modify + +/** + * @const + * @namespace + */ +var armnnSerializer = armnnSerializer || {}; + +/** + * @enum {number} + */ +armnnSerializer.ActivationFunction = { + Sigmoid: 0, + TanH: 1, + Linear: 2, + ReLu: 3, + BoundedReLu: 4, + SoftReLu: 5, + LeakyReLu: 6, + Abs: 7, + Sqrt: 8, + Square: 9, + Elu: 10, + HardSwish: 11 +}; + +/** + * @enum {string} + */ +armnnSerializer.ActivationFunctionName = { + '0': 'Sigmoid', + '1': 'TanH', + '2': 'Linear', + '3': 'ReLu', + '4': 'BoundedReLu', + '5': 'SoftReLu', + '6': 'LeakyReLu', + '7': 'Abs', + '8': 'Sqrt', + '9': 'Square', + '10': 'Elu', + '11': 'HardSwish' +}; + +/** + * @enum {number} + */ +armnnSerializer.ArgMinMaxFunction = { + Min: 0, + Max: 1 +}; + +/** + * @enum {string} + */ +armnnSerializer.ArgMinMaxFunctionName = { + '0': 'Min', + '1': 'Max' +}; + +/** + * @enum {number} + */ +armnnSerializer.DataType = { + Float16: 0, + Float32: 1, + QuantisedAsymm8: 2, + Signed32: 3, + Boolean: 4, + QuantisedSymm16: 5, + QAsymmU8: 6, + QSymmS16: 7, + QAsymmS8: 8, + QSymmS8: 9 +}; + +/** + * @enum {string} + */ +armnnSerializer.DataTypeName = { + '0': 'Float16', + '1': 'Float32', + '2': 'QuantisedAsymm8', + '3': 'Signed32', + '4': 'Boolean', + '5': 'QuantisedSymm16', + '6': 'QAsymmU8', + '7': 'QSymmS16', + '8': 'QAsymmS8', + '9': 'QSymmS8' +}; + +/** + * @enum {number} + */ +armnnSerializer.DataLayout = { + NHWC: 0, + NCHW: 1 +}; + +/** + * @enum {string} + */ +armnnSerializer.DataLayoutName = { + '0': 'NHWC', + '1': 'NCHW' +}; + +/** + * @enum {number} + */ +armnnSerializer.ResizeMethod = { + NearestNeighbor: 0, + Bilinear: 1 +}; + +/** + * @enum {string} + */ +armnnSerializer.ResizeMethodName = { + '0': 'NearestNeighbor', + '1': 'Bilinear' +}; + +/** + * @enum {number} + */ +armnnSerializer.ConstTensorData = { + NONE: 0, + ByteData: 1, + ShortData: 2, + IntData: 3, + LongData: 4 +}; + +/** + * @enum {string} + */ +armnnSerializer.ConstTensorDataName = { + '0': 'NONE', + '1': 'ByteData', + '2': 'ShortData', + '3': 'IntData', + '4': 'LongData' +}; + +/** + * @enum {number} + */ +armnnSerializer.LayerType = { + Addition: 0, + Input: 1, + Multiplication: 2, + Output: 3, + Pooling2d: 4, + Reshape: 5, + Softmax: 6, + Convolution2d: 7, + DepthwiseConvolution2d: 8, + Activation: 9, + Permute: 10, + FullyConnected: 11, + Constant: 12, + SpaceToBatchNd: 13, + BatchToSpaceNd: 14, + Division: 15, + Minimum: 16, + Equal: 17, + Maximum: 18, + Normalization: 19, + Pad: 20, + Rsqrt: 21, + Floor: 22, + BatchNormalization: 23, + Greater: 24, + ResizeBilinear: 25, + Subtraction: 26, + StridedSlice: 27, + Gather: 28, + Mean: 29, + Merger: 30, + L2Normalization: 31, + Splitter: 32, + DetectionPostProcess: 33, + Lstm: 34, + Quantize: 35, + Dequantize: 36, + Merge: 37, + Switch: 38, + Concat: 39, + SpaceToDepth: 40, + Prelu: 41, + TransposeConvolution2d: 42, + Resize: 43, + Stack: 44, + QuantizedLstm: 45, + Abs: 46, + ArgMinMax: 47, + Slice: 48, + DepthToSpace: 49, + InstanceNormalization: 50, + LogSoftmax: 51, + Comparison: 52, + StandIn: 53, + ElementwiseUnary: 54, + Transpose: 55, + QLstm: 56 +}; + +/** + * @enum {string} + */ +armnnSerializer.LayerTypeName = { + '0': 'Addition', + '1': 'Input', + '2': 'Multiplication', + '3': 'Output', + '4': 'Pooling2d', + '5': 'Reshape', + '6': 'Softmax', + '7': 'Convolution2d', + '8': 'DepthwiseConvolution2d', + '9': 'Activation', + '10': 'Permute', + '11': 'FullyConnected', + '12': 'Constant', + '13': 'SpaceToBatchNd', + '14': 'BatchToSpaceNd', + '15': 'Division', + '16': 'Minimum', + '17': 'Equal', + '18': 'Maximum', + '19': 'Normalization', + '20': 'Pad', + '21': 'Rsqrt', + '22': 'Floor', + '23': 'BatchNormalization', + '24': 'Greater', + '25': 'ResizeBilinear', + '26': 'Subtraction', + '27': 'StridedSlice', + '28': 'Gather', + '29': 'Mean', + '30': 'Merger', + '31': 'L2Normalization', + '32': 'Splitter', + '33': 'DetectionPostProcess', + '34': 'Lstm', + '35': 'Quantize', + '36': 'Dequantize', + '37': 'Merge', + '38': 'Switch', + '39': 'Concat', + '40': 'SpaceToDepth', + '41': 'Prelu', + '42': 'TransposeConvolution2d', + '43': 'Resize', + '44': 'Stack', + '45': 'QuantizedLstm', + '46': 'Abs', + '47': 'ArgMinMax', + '48': 'Slice', + '49': 'DepthToSpace', + '50': 'InstanceNormalization', + '51': 'LogSoftmax', + '52': 'Comparison', + '53': 'StandIn', + '54': 'ElementwiseUnary', + '55': 'Transpose', + '56': 'QLstm' +}; + +/** + * @enum {number} + */ +armnnSerializer.ComparisonOperation = { + Equal: 0, + Greater: 1, + GreaterOrEqual: 2, + Less: 3, + LessOrEqual: 4, + NotEqual: 5 +}; + +/** + * @enum {string} + */ +armnnSerializer.ComparisonOperationName = { + '0': 'Equal', + '1': 'Greater', + '2': 'GreaterOrEqual', + '3': 'Less', + '4': 'LessOrEqual', + '5': 'NotEqual' +}; + +/** + * @enum {number} + */ +armnnSerializer.UnaryOperation = { + Abs: 0, + Rsqrt: 1, + Sqrt: 2, + Exp: 3, + Neg: 4 +}; + +/** + * @enum {string} + */ +armnnSerializer.UnaryOperationName = { + '0': 'Abs', + '1': 'Rsqrt', + '2': 'Sqrt', + '3': 'Exp', + '4': 'Neg' +}; + +/** + * @enum {number} + */ +armnnSerializer.PoolingAlgorithm = { + Max: 0, + Average: 1, + L2: 2 +}; + +/** + * @enum {string} + */ +armnnSerializer.PoolingAlgorithmName = { + '0': 'Max', + '1': 'Average', + '2': 'L2' +}; + +/** + * @enum {number} + */ +armnnSerializer.OutputShapeRounding = { + Floor: 0, + Ceiling: 1 +}; + +/** + * @enum {string} + */ +armnnSerializer.OutputShapeRoundingName = { + '0': 'Floor', + '1': 'Ceiling' +}; + +/** + * @enum {number} + */ +armnnSerializer.PaddingMethod = { + IgnoreValue: 0, + Exclude: 1 +}; + +/** + * @enum {string} + */ +armnnSerializer.PaddingMethodName = { + '0': 'IgnoreValue', + '1': 'Exclude' +}; + +/** + * @enum {number} + */ +armnnSerializer.NormalizationAlgorithmChannel = { + Across: 0, + Within: 1 +}; + +/** + * @enum {string} + */ +armnnSerializer.NormalizationAlgorithmChannelName = { + '0': 'Across', + '1': 'Within' +}; + +/** + * @enum {number} + */ +armnnSerializer.NormalizationAlgorithmMethod = { + LocalBrightness: 0, + LocalContrast: 1 +}; + +/** + * @enum {string} + */ +armnnSerializer.NormalizationAlgorithmMethodName = { + '0': 'LocalBrightness', + '1': 'LocalContrast' +}; + +/** + * @enum {number} + */ +armnnSerializer.Layer = { + NONE: 0, + ActivationLayer: 1, + AdditionLayer: 2, + BatchToSpaceNdLayer: 3, + BatchNormalizationLayer: 4, + ConstantLayer: 5, + Convolution2dLayer: 6, + DepthwiseConvolution2dLayer: 7, + FullyConnectedLayer: 8, + InputLayer: 9, + MultiplicationLayer: 10, + OutputLayer: 11, + PermuteLayer: 12, + Pooling2dLayer: 13, + ReshapeLayer: 14, + SoftmaxLayer: 15, + SpaceToBatchNdLayer: 16, + DivisionLayer: 17, + MinimumLayer: 18, + EqualLayer: 19, + MaximumLayer: 20, + NormalizationLayer: 21, + PadLayer: 22, + RsqrtLayer: 23, + FloorLayer: 24, + GreaterLayer: 25, + ResizeBilinearLayer: 26, + SubtractionLayer: 27, + StridedSliceLayer: 28, + GatherLayer: 29, + MeanLayer: 30, + MergerLayer: 31, + L2NormalizationLayer: 32, + SplitterLayer: 33, + DetectionPostProcessLayer: 34, + LstmLayer: 35, + QuantizedLstmLayer: 36, + QuantizeLayer: 37, + DequantizeLayer: 38, + MergeLayer: 39, + SwitchLayer: 40, + ConcatLayer: 41, + SpaceToDepthLayer: 42, + PreluLayer: 43, + TransposeConvolution2dLayer: 44, + ResizeLayer: 45, + StackLayer: 46, + AbsLayer: 47, + ArgMinMaxLayer: 48, + SliceLayer: 49, + DepthToSpaceLayer: 50, + InstanceNormalizationLayer: 51, + LogSoftmaxLayer: 52, + ComparisonLayer: 53, + StandInLayer: 54, + ElementwiseUnaryLayer: 55, + TransposeLayer: 56, + QLstmLayer: 57 +}; + +/** + * @enum {string} + */ +armnnSerializer.LayerName = { + '0': 'NONE', + '1': 'ActivationLayer', + '2': 'AdditionLayer', + '3': 'BatchToSpaceNdLayer', + '4': 'BatchNormalizationLayer', + '5': 'ConstantLayer', + '6': 'Convolution2dLayer', + '7': 'DepthwiseConvolution2dLayer', + '8': 'FullyConnectedLayer', + '9': 'InputLayer', + '10': 'MultiplicationLayer', + '11': 'OutputLayer', + '12': 'PermuteLayer', + '13': 'Pooling2dLayer', + '14': 'ReshapeLayer', + '15': 'SoftmaxLayer', + '16': 'SpaceToBatchNdLayer', + '17': 'DivisionLayer', + '18': 'MinimumLayer', + '19': 'EqualLayer', + '20': 'MaximumLayer', + '21': 'NormalizationLayer', + '22': 'PadLayer', + '23': 'RsqrtLayer', + '24': 'FloorLayer', + '25': 'GreaterLayer', + '26': 'ResizeBilinearLayer', + '27': 'SubtractionLayer', + '28': 'StridedSliceLayer', + '29': 'GatherLayer', + '30': 'MeanLayer', + '31': 'MergerLayer', + '32': 'L2NormalizationLayer', + '33': 'SplitterLayer', + '34': 'DetectionPostProcessLayer', + '35': 'LstmLayer', + '36': 'QuantizedLstmLayer', + '37': 'QuantizeLayer', + '38': 'DequantizeLayer', + '39': 'MergeLayer', + '40': 'SwitchLayer', + '41': 'ConcatLayer', + '42': 'SpaceToDepthLayer', + '43': 'PreluLayer', + '44': 'TransposeConvolution2dLayer', + '45': 'ResizeLayer', + '46': 'StackLayer', + '47': 'AbsLayer', + '48': 'ArgMinMaxLayer', + '49': 'SliceLayer', + '50': 'DepthToSpaceLayer', + '51': 'InstanceNormalizationLayer', + '52': 'LogSoftmaxLayer', + '53': 'ComparisonLayer', + '54': 'StandInLayer', + '55': 'ElementwiseUnaryLayer', + '56': 'TransposeLayer', + '57': 'QLstmLayer' +}; + +/** + * @constructor + */ +armnnSerializer.TensorInfo = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.TensorInfo} + */ +armnnSerializer.TensorInfo.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.TensorInfo=} obj + * @returns {armnnSerializer.TensorInfo} + */ +armnnSerializer.TensorInfo.getRootAsTensorInfo = function(bb, obj) { + return (obj || new armnnSerializer.TensorInfo).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.TensorInfo=} obj + * @returns {armnnSerializer.TensorInfo} + */ +armnnSerializer.TensorInfo.getSizePrefixedRootAsTensorInfo = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.TensorInfo).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.TensorInfo.prototype.dimensions = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.TensorInfo.prototype.dimensionsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.TensorInfo.prototype.dimensionsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {armnnSerializer.DataType} + */ +armnnSerializer.TensorInfo.prototype.dataType = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {armnnSerializer.DataType} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataType.Float16; +}; + +/** + * @returns {number} + */ +armnnSerializer.TensorInfo.prototype.quantizationScale = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 1.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.TensorInfo.prototype.quantizationOffset = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.TensorInfo.prototype.quantizationScales = function(index) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.TensorInfo.prototype.quantizationScalesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +armnnSerializer.TensorInfo.prototype.quantizationScalesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {number} + */ +armnnSerializer.TensorInfo.prototype.quantizationDim = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.TensorInfo.startTensorInfo = function(builder) { + builder.startObject(6); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimensionsOffset + */ +armnnSerializer.TensorInfo.addDimensions = function(builder, dimensionsOffset) { + builder.addFieldOffset(0, dimensionsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TensorInfo.createDimensionsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.TensorInfo.startDimensionsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataType} dataType + */ +armnnSerializer.TensorInfo.addDataType = function(builder, dataType) { + builder.addFieldInt8(1, dataType, armnnSerializer.DataType.Float16); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} quantizationScale + */ +armnnSerializer.TensorInfo.addQuantizationScale = function(builder, quantizationScale) { + builder.addFieldFloat32(2, quantizationScale, 1.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} quantizationOffset + */ +armnnSerializer.TensorInfo.addQuantizationOffset = function(builder, quantizationOffset) { + builder.addFieldInt32(3, quantizationOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} quantizationScalesOffset + */ +armnnSerializer.TensorInfo.addQuantizationScales = function(builder, quantizationScalesOffset) { + builder.addFieldOffset(4, quantizationScalesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TensorInfo.createQuantizationScalesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.TensorInfo.startQuantizationScalesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} quantizationDim + */ +armnnSerializer.TensorInfo.addQuantizationDim = function(builder, quantizationDim) { + builder.addFieldInt32(5, quantizationDim, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TensorInfo.endTensorInfo = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimensionsOffset + * @param {armnnSerializer.DataType} dataType + * @param {number} quantizationScale + * @param {number} quantizationOffset + * @param {flatbuffers.Offset} quantizationScalesOffset + * @param {number} quantizationDim + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TensorInfo.createTensorInfo = function(builder, dimensionsOffset, dataType, quantizationScale, quantizationOffset, quantizationScalesOffset, quantizationDim) { + armnnSerializer.TensorInfo.startTensorInfo(builder); + armnnSerializer.TensorInfo.addDimensions(builder, dimensionsOffset); + armnnSerializer.TensorInfo.addDataType(builder, dataType); + armnnSerializer.TensorInfo.addQuantizationScale(builder, quantizationScale); + armnnSerializer.TensorInfo.addQuantizationOffset(builder, quantizationOffset); + armnnSerializer.TensorInfo.addQuantizationScales(builder, quantizationScalesOffset); + armnnSerializer.TensorInfo.addQuantizationDim(builder, quantizationDim); + return armnnSerializer.TensorInfo.endTensorInfo(builder); +} + +/** + * @constructor + */ +armnnSerializer.Connection = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.Connection} + */ +armnnSerializer.Connection.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @returns {number} + */ +armnnSerializer.Connection.prototype.sourceLayerIndex = function() { + return this.bb.readUint32(this.bb_pos); +}; + +/** + * @returns {number} + */ +armnnSerializer.Connection.prototype.outputSlotIndex = function() { + return this.bb.readUint32(this.bb_pos + 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} sourceLayerIndex + * @param {number} outputSlotIndex + * @returns {flatbuffers.Offset} + */ +armnnSerializer.Connection.createConnection = function(builder, sourceLayerIndex, outputSlotIndex) { + builder.prep(4, 8); + builder.writeInt32(outputSlotIndex); + builder.writeInt32(sourceLayerIndex); + return builder.offset(); +}; + +/** + * @constructor + */ +armnnSerializer.ByteData = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ByteData} + */ +armnnSerializer.ByteData.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ByteData=} obj + * @returns {armnnSerializer.ByteData} + */ +armnnSerializer.ByteData.getRootAsByteData = function(bb, obj) { + return (obj || new armnnSerializer.ByteData).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ByteData=} obj + * @returns {armnnSerializer.ByteData} + */ +armnnSerializer.ByteData.getSizePrefixedRootAsByteData = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ByteData).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.ByteData.prototype.data = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.ByteData.prototype.dataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int8Array} + */ +armnnSerializer.ByteData.prototype.dataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ByteData.startByteData = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + */ +armnnSerializer.ByteData.addData = function(builder, dataOffset) { + builder.addFieldOffset(0, dataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ByteData.createDataVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.ByteData.startDataVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ByteData.endByteData = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ByteData.createByteData = function(builder, dataOffset) { + armnnSerializer.ByteData.startByteData(builder); + armnnSerializer.ByteData.addData(builder, dataOffset); + return armnnSerializer.ByteData.endByteData(builder); +} + +/** + * @constructor + */ +armnnSerializer.ShortData = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ShortData} + */ +armnnSerializer.ShortData.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ShortData=} obj + * @returns {armnnSerializer.ShortData} + */ +armnnSerializer.ShortData.getRootAsShortData = function(bb, obj) { + return (obj || new armnnSerializer.ShortData).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ShortData=} obj + * @returns {armnnSerializer.ShortData} + */ +armnnSerializer.ShortData.getSizePrefixedRootAsShortData = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ShortData).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.ShortData.prototype.data = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt16(this.bb.__vector(this.bb_pos + offset) + index * 2) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.ShortData.prototype.dataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int16Array} + */ +armnnSerializer.ShortData.prototype.dataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int16Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ShortData.startShortData = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + */ +armnnSerializer.ShortData.addData = function(builder, dataOffset) { + builder.addFieldOffset(0, dataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ShortData.createDataVector = function(builder, data) { + builder.startVector(2, data.length, 2); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt16(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.ShortData.startDataVector = function(builder, numElems) { + builder.startVector(2, numElems, 2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ShortData.endShortData = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ShortData.createShortData = function(builder, dataOffset) { + armnnSerializer.ShortData.startShortData(builder); + armnnSerializer.ShortData.addData(builder, dataOffset); + return armnnSerializer.ShortData.endShortData(builder); +} + +/** + * @constructor + */ +armnnSerializer.IntData = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.IntData} + */ +armnnSerializer.IntData.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.IntData=} obj + * @returns {armnnSerializer.IntData} + */ +armnnSerializer.IntData.getRootAsIntData = function(bb, obj) { + return (obj || new armnnSerializer.IntData).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.IntData=} obj + * @returns {armnnSerializer.IntData} + */ +armnnSerializer.IntData.getSizePrefixedRootAsIntData = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.IntData).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.IntData.prototype.data = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.IntData.prototype.dataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +armnnSerializer.IntData.prototype.dataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.IntData.startIntData = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + */ +armnnSerializer.IntData.addData = function(builder, dataOffset) { + builder.addFieldOffset(0, dataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.IntData.createDataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.IntData.startDataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.IntData.endIntData = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.IntData.createIntData = function(builder, dataOffset) { + armnnSerializer.IntData.startIntData(builder); + armnnSerializer.IntData.addData(builder, dataOffset); + return armnnSerializer.IntData.endIntData(builder); +} + +/** + * @constructor + */ +armnnSerializer.LongData = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.LongData} + */ +armnnSerializer.LongData.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LongData=} obj + * @returns {armnnSerializer.LongData} + */ +armnnSerializer.LongData.getRootAsLongData = function(bb, obj) { + return (obj || new armnnSerializer.LongData).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LongData=} obj + * @returns {armnnSerializer.LongData} + */ +armnnSerializer.LongData.getSizePrefixedRootAsLongData = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.LongData).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {flatbuffers.Long} + */ +armnnSerializer.LongData.prototype.data = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt64(this.bb.__vector(this.bb_pos + offset) + index * 8) : this.bb.createLong(0, 0); +}; + +/** + * @returns {number} + */ +armnnSerializer.LongData.prototype.dataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.LongData.startLongData = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + */ +armnnSerializer.LongData.addData = function(builder, dataOffset) { + builder.addFieldOffset(0, dataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LongData.createDataVector = function(builder, data) { + builder.startVector(8, data.length, 8); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt64(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.LongData.startDataVector = function(builder, numElems) { + builder.startVector(8, numElems, 8); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LongData.endLongData = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LongData.createLongData = function(builder, dataOffset) { + armnnSerializer.LongData.startLongData(builder); + armnnSerializer.LongData.addData(builder, dataOffset); + return armnnSerializer.LongData.endLongData(builder); +} + +/** + * @constructor + */ +armnnSerializer.ConstTensor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ConstTensor} + */ +armnnSerializer.ConstTensor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor} + */ +armnnSerializer.ConstTensor.getRootAsConstTensor = function(bb, obj) { + return (obj || new armnnSerializer.ConstTensor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor} + */ +armnnSerializer.ConstTensor.getSizePrefixedRootAsConstTensor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ConstTensor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.TensorInfo=} obj + * @returns {armnnSerializer.TensorInfo|null} + */ +armnnSerializer.ConstTensor.prototype.info = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.TensorInfo).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @returns {armnnSerializer.ConstTensorData} + */ +armnnSerializer.ConstTensor.prototype.dataType = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {armnnSerializer.ConstTensorData} */ (this.bb.readUint8(this.bb_pos + offset)) : armnnSerializer.ConstTensorData.NONE; +}; + +/** + * @param {flatbuffers.Table} obj + * @returns {?flatbuffers.Table} + */ +armnnSerializer.ConstTensor.prototype.data = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__union(obj, this.bb_pos + offset) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ConstTensor.startConstTensor = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} infoOffset + */ +armnnSerializer.ConstTensor.addInfo = function(builder, infoOffset) { + builder.addFieldOffset(0, infoOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.ConstTensorData} dataType + */ +armnnSerializer.ConstTensor.addDataType = function(builder, dataType) { + builder.addFieldInt8(1, dataType, armnnSerializer.ConstTensorData.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + */ +armnnSerializer.ConstTensor.addData = function(builder, dataOffset) { + builder.addFieldOffset(2, dataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ConstTensor.endConstTensor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} infoOffset + * @param {armnnSerializer.ConstTensorData} dataType + * @param {flatbuffers.Offset} dataOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ConstTensor.createConstTensor = function(builder, infoOffset, dataType, dataOffset) { + armnnSerializer.ConstTensor.startConstTensor(builder); + armnnSerializer.ConstTensor.addInfo(builder, infoOffset); + armnnSerializer.ConstTensor.addDataType(builder, dataType); + armnnSerializer.ConstTensor.addData(builder, dataOffset); + return armnnSerializer.ConstTensor.endConstTensor(builder); +} + +/** + * @constructor + */ +armnnSerializer.InputSlot = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.InputSlot} + */ +armnnSerializer.InputSlot.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.InputSlot=} obj + * @returns {armnnSerializer.InputSlot} + */ +armnnSerializer.InputSlot.getRootAsInputSlot = function(bb, obj) { + return (obj || new armnnSerializer.InputSlot).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.InputSlot=} obj + * @returns {armnnSerializer.InputSlot} + */ +armnnSerializer.InputSlot.getSizePrefixedRootAsInputSlot = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.InputSlot).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.InputSlot.prototype.index = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {armnnSerializer.Connection=} obj + * @returns {armnnSerializer.Connection|null} + */ +armnnSerializer.InputSlot.prototype.connection = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.Connection).__init(this.bb_pos + offset, this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.InputSlot.startInputSlot = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} index + */ +armnnSerializer.InputSlot.addIndex = function(builder, index) { + builder.addFieldInt32(0, index, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} connectionOffset + */ +armnnSerializer.InputSlot.addConnection = function(builder, connectionOffset) { + builder.addFieldStruct(1, connectionOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.InputSlot.endInputSlot = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} index + * @param {flatbuffers.Offset} connectionOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.InputSlot.createInputSlot = function(builder, index, connectionOffset) { + armnnSerializer.InputSlot.startInputSlot(builder); + armnnSerializer.InputSlot.addIndex(builder, index); + armnnSerializer.InputSlot.addConnection(builder, connectionOffset); + return armnnSerializer.InputSlot.endInputSlot(builder); +} + +/** + * @constructor + */ +armnnSerializer.OutputSlot = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.OutputSlot} + */ +armnnSerializer.OutputSlot.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.OutputSlot=} obj + * @returns {armnnSerializer.OutputSlot} + */ +armnnSerializer.OutputSlot.getRootAsOutputSlot = function(bb, obj) { + return (obj || new armnnSerializer.OutputSlot).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.OutputSlot=} obj + * @returns {armnnSerializer.OutputSlot} + */ +armnnSerializer.OutputSlot.getSizePrefixedRootAsOutputSlot = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.OutputSlot).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.OutputSlot.prototype.index = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {armnnSerializer.TensorInfo=} obj + * @returns {armnnSerializer.TensorInfo|null} + */ +armnnSerializer.OutputSlot.prototype.tensorInfo = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.TensorInfo).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.OutputSlot.startOutputSlot = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} index + */ +armnnSerializer.OutputSlot.addIndex = function(builder, index) { + builder.addFieldInt32(0, index, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} tensorInfoOffset + */ +armnnSerializer.OutputSlot.addTensorInfo = function(builder, tensorInfoOffset) { + builder.addFieldOffset(1, tensorInfoOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.OutputSlot.endOutputSlot = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} index + * @param {flatbuffers.Offset} tensorInfoOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.OutputSlot.createOutputSlot = function(builder, index, tensorInfoOffset) { + armnnSerializer.OutputSlot.startOutputSlot(builder); + armnnSerializer.OutputSlot.addIndex(builder, index); + armnnSerializer.OutputSlot.addTensorInfo(builder, tensorInfoOffset); + return armnnSerializer.OutputSlot.endOutputSlot(builder); +} + +/** + * @constructor + */ +armnnSerializer.LayerBase = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.LayerBase} + */ +armnnSerializer.LayerBase.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase} + */ +armnnSerializer.LayerBase.getRootAsLayerBase = function(bb, obj) { + return (obj || new armnnSerializer.LayerBase).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase} + */ +armnnSerializer.LayerBase.getSizePrefixedRootAsLayerBase = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.LayerBase).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.LayerBase.prototype.index = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +armnnSerializer.LayerBase.prototype.layerName = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @returns {armnnSerializer.LayerType} + */ +armnnSerializer.LayerBase.prototype.layerType = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {armnnSerializer.LayerType} */ (this.bb.readUint32(this.bb_pos + offset)) : armnnSerializer.LayerType.Addition; +}; + +/** + * @param {number} index + * @param {armnnSerializer.InputSlot=} obj + * @returns {armnnSerializer.InputSlot} + */ +armnnSerializer.LayerBase.prototype.inputSlots = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new armnnSerializer.InputSlot).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +armnnSerializer.LayerBase.prototype.inputSlotsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @param {armnnSerializer.OutputSlot=} obj + * @returns {armnnSerializer.OutputSlot} + */ +armnnSerializer.LayerBase.prototype.outputSlots = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new armnnSerializer.OutputSlot).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +armnnSerializer.LayerBase.prototype.outputSlotsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.LayerBase.startLayerBase = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} index + */ +armnnSerializer.LayerBase.addIndex = function(builder, index) { + builder.addFieldInt32(0, index, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} layerNameOffset + */ +armnnSerializer.LayerBase.addLayerName = function(builder, layerNameOffset) { + builder.addFieldOffset(1, layerNameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.LayerType} layerType + */ +armnnSerializer.LayerBase.addLayerType = function(builder, layerType) { + builder.addFieldInt32(2, layerType, armnnSerializer.LayerType.Addition); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputSlotsOffset + */ +armnnSerializer.LayerBase.addInputSlots = function(builder, inputSlotsOffset) { + builder.addFieldOffset(3, inputSlotsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LayerBase.createInputSlotsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.LayerBase.startInputSlotsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputSlotsOffset + */ +armnnSerializer.LayerBase.addOutputSlots = function(builder, outputSlotsOffset) { + builder.addFieldOffset(4, outputSlotsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LayerBase.createOutputSlotsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.LayerBase.startOutputSlotsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LayerBase.endLayerBase = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} index + * @param {flatbuffers.Offset} layerNameOffset + * @param {armnnSerializer.LayerType} layerType + * @param {flatbuffers.Offset} inputSlotsOffset + * @param {flatbuffers.Offset} outputSlotsOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LayerBase.createLayerBase = function(builder, index, layerNameOffset, layerType, inputSlotsOffset, outputSlotsOffset) { + armnnSerializer.LayerBase.startLayerBase(builder); + armnnSerializer.LayerBase.addIndex(builder, index); + armnnSerializer.LayerBase.addLayerName(builder, layerNameOffset); + armnnSerializer.LayerBase.addLayerType(builder, layerType); + armnnSerializer.LayerBase.addInputSlots(builder, inputSlotsOffset); + armnnSerializer.LayerBase.addOutputSlots(builder, outputSlotsOffset); + return armnnSerializer.LayerBase.endLayerBase(builder); +} + +/** + * @constructor + */ +armnnSerializer.BindableLayerBase = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.BindableLayerBase} + */ +armnnSerializer.BindableLayerBase.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.BindableLayerBase=} obj + * @returns {armnnSerializer.BindableLayerBase} + */ +armnnSerializer.BindableLayerBase.getRootAsBindableLayerBase = function(bb, obj) { + return (obj || new armnnSerializer.BindableLayerBase).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.BindableLayerBase=} obj + * @returns {armnnSerializer.BindableLayerBase} + */ +armnnSerializer.BindableLayerBase.getSizePrefixedRootAsBindableLayerBase = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.BindableLayerBase).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.BindableLayerBase.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @returns {number} + */ +armnnSerializer.BindableLayerBase.prototype.layerBindingId = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.BindableLayerBase.startBindableLayerBase = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.BindableLayerBase.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} layerBindingId + */ +armnnSerializer.BindableLayerBase.addLayerBindingId = function(builder, layerBindingId) { + builder.addFieldInt32(1, layerBindingId, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BindableLayerBase.endBindableLayerBase = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {number} layerBindingId + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BindableLayerBase.createBindableLayerBase = function(builder, baseOffset, layerBindingId) { + armnnSerializer.BindableLayerBase.startBindableLayerBase(builder); + armnnSerializer.BindableLayerBase.addBase(builder, baseOffset); + armnnSerializer.BindableLayerBase.addLayerBindingId(builder, layerBindingId); + return armnnSerializer.BindableLayerBase.endBindableLayerBase(builder); +} + +/** + * @deprecated Use ElementwiseUnaryLayer instead + * + * @constructor + */ +armnnSerializer.AbsLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.AbsLayer} + */ +armnnSerializer.AbsLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.AbsLayer=} obj + * @returns {armnnSerializer.AbsLayer} + */ +armnnSerializer.AbsLayer.getRootAsAbsLayer = function(bb, obj) { + return (obj || new armnnSerializer.AbsLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.AbsLayer=} obj + * @returns {armnnSerializer.AbsLayer} + */ +armnnSerializer.AbsLayer.getSizePrefixedRootAsAbsLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.AbsLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.AbsLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.AbsLayer.startAbsLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.AbsLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.AbsLayer.endAbsLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.AbsLayer.createAbsLayer = function(builder, baseOffset) { + armnnSerializer.AbsLayer.startAbsLayer(builder); + armnnSerializer.AbsLayer.addBase(builder, baseOffset); + return armnnSerializer.AbsLayer.endAbsLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.ActivationLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ActivationLayer} + */ +armnnSerializer.ActivationLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ActivationLayer=} obj + * @returns {armnnSerializer.ActivationLayer} + */ +armnnSerializer.ActivationLayer.getRootAsActivationLayer = function(bb, obj) { + return (obj || new armnnSerializer.ActivationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ActivationLayer=} obj + * @returns {armnnSerializer.ActivationLayer} + */ +armnnSerializer.ActivationLayer.getSizePrefixedRootAsActivationLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ActivationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.ActivationLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ActivationDescriptor=} obj + * @returns {armnnSerializer.ActivationDescriptor|null} + */ +armnnSerializer.ActivationLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ActivationDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ActivationLayer.startActivationLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.ActivationLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.ActivationLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ActivationLayer.endActivationLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ActivationLayer.createActivationLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.ActivationLayer.startActivationLayer(builder); + armnnSerializer.ActivationLayer.addBase(builder, baseOffset); + armnnSerializer.ActivationLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.ActivationLayer.endActivationLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.ActivationDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ActivationDescriptor} + */ +armnnSerializer.ActivationDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ActivationDescriptor=} obj + * @returns {armnnSerializer.ActivationDescriptor} + */ +armnnSerializer.ActivationDescriptor.getRootAsActivationDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.ActivationDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ActivationDescriptor=} obj + * @returns {armnnSerializer.ActivationDescriptor} + */ +armnnSerializer.ActivationDescriptor.getSizePrefixedRootAsActivationDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ActivationDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {armnnSerializer.ActivationFunction} + */ +armnnSerializer.ActivationDescriptor.prototype.activationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {armnnSerializer.ActivationFunction} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.ActivationFunction.Sigmoid; +}; + +/** + * @returns {number} + */ +armnnSerializer.ActivationDescriptor.prototype.a = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.ActivationDescriptor.prototype.b = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ActivationDescriptor.startActivationDescriptor = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.ActivationFunction} activationFunction + */ +armnnSerializer.ActivationDescriptor.addActivationFunction = function(builder, activationFunction) { + builder.addFieldInt8(0, activationFunction, armnnSerializer.ActivationFunction.Sigmoid); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} a + */ +armnnSerializer.ActivationDescriptor.addA = function(builder, a) { + builder.addFieldFloat32(1, a, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} b + */ +armnnSerializer.ActivationDescriptor.addB = function(builder, b) { + builder.addFieldFloat32(2, b, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ActivationDescriptor.endActivationDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.ActivationFunction} activationFunction + * @param {number} a + * @param {number} b + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ActivationDescriptor.createActivationDescriptor = function(builder, activationFunction, a, b) { + armnnSerializer.ActivationDescriptor.startActivationDescriptor(builder); + armnnSerializer.ActivationDescriptor.addActivationFunction(builder, activationFunction); + armnnSerializer.ActivationDescriptor.addA(builder, a); + armnnSerializer.ActivationDescriptor.addB(builder, b); + return armnnSerializer.ActivationDescriptor.endActivationDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.AdditionLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.AdditionLayer} + */ +armnnSerializer.AdditionLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.AdditionLayer=} obj + * @returns {armnnSerializer.AdditionLayer} + */ +armnnSerializer.AdditionLayer.getRootAsAdditionLayer = function(bb, obj) { + return (obj || new armnnSerializer.AdditionLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.AdditionLayer=} obj + * @returns {armnnSerializer.AdditionLayer} + */ +armnnSerializer.AdditionLayer.getSizePrefixedRootAsAdditionLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.AdditionLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.AdditionLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.AdditionLayer.startAdditionLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.AdditionLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.AdditionLayer.endAdditionLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.AdditionLayer.createAdditionLayer = function(builder, baseOffset) { + armnnSerializer.AdditionLayer.startAdditionLayer(builder); + armnnSerializer.AdditionLayer.addBase(builder, baseOffset); + return armnnSerializer.AdditionLayer.endAdditionLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.ArgMinMaxLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ArgMinMaxLayer} + */ +armnnSerializer.ArgMinMaxLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ArgMinMaxLayer=} obj + * @returns {armnnSerializer.ArgMinMaxLayer} + */ +armnnSerializer.ArgMinMaxLayer.getRootAsArgMinMaxLayer = function(bb, obj) { + return (obj || new armnnSerializer.ArgMinMaxLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ArgMinMaxLayer=} obj + * @returns {armnnSerializer.ArgMinMaxLayer} + */ +armnnSerializer.ArgMinMaxLayer.getSizePrefixedRootAsArgMinMaxLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ArgMinMaxLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.ArgMinMaxLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ArgMinMaxDescriptor=} obj + * @returns {armnnSerializer.ArgMinMaxDescriptor|null} + */ +armnnSerializer.ArgMinMaxLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ArgMinMaxDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ArgMinMaxLayer.startArgMinMaxLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.ArgMinMaxLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.ArgMinMaxLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ArgMinMaxLayer.endArgMinMaxLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ArgMinMaxLayer.createArgMinMaxLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.ArgMinMaxLayer.startArgMinMaxLayer(builder); + armnnSerializer.ArgMinMaxLayer.addBase(builder, baseOffset); + armnnSerializer.ArgMinMaxLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.ArgMinMaxLayer.endArgMinMaxLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.ArgMinMaxDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ArgMinMaxDescriptor} + */ +armnnSerializer.ArgMinMaxDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ArgMinMaxDescriptor=} obj + * @returns {armnnSerializer.ArgMinMaxDescriptor} + */ +armnnSerializer.ArgMinMaxDescriptor.getRootAsArgMinMaxDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.ArgMinMaxDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ArgMinMaxDescriptor=} obj + * @returns {armnnSerializer.ArgMinMaxDescriptor} + */ +armnnSerializer.ArgMinMaxDescriptor.getSizePrefixedRootAsArgMinMaxDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ArgMinMaxDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {armnnSerializer.ArgMinMaxFunction} + */ +armnnSerializer.ArgMinMaxDescriptor.prototype.argMinMaxFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {armnnSerializer.ArgMinMaxFunction} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.ArgMinMaxFunction.Min; +}; + +/** + * @returns {number} + */ +armnnSerializer.ArgMinMaxDescriptor.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ArgMinMaxDescriptor.startArgMinMaxDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.ArgMinMaxFunction} argMinMaxFunction + */ +armnnSerializer.ArgMinMaxDescriptor.addArgMinMaxFunction = function(builder, argMinMaxFunction) { + builder.addFieldInt8(0, argMinMaxFunction, armnnSerializer.ArgMinMaxFunction.Min); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +armnnSerializer.ArgMinMaxDescriptor.addAxis = function(builder, axis) { + builder.addFieldInt32(1, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ArgMinMaxDescriptor.endArgMinMaxDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.ArgMinMaxFunction} argMinMaxFunction + * @param {number} axis + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ArgMinMaxDescriptor.createArgMinMaxDescriptor = function(builder, argMinMaxFunction, axis) { + armnnSerializer.ArgMinMaxDescriptor.startArgMinMaxDescriptor(builder); + armnnSerializer.ArgMinMaxDescriptor.addArgMinMaxFunction(builder, argMinMaxFunction); + armnnSerializer.ArgMinMaxDescriptor.addAxis(builder, axis); + return armnnSerializer.ArgMinMaxDescriptor.endArgMinMaxDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.ComparisonDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ComparisonDescriptor} + */ +armnnSerializer.ComparisonDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ComparisonDescriptor=} obj + * @returns {armnnSerializer.ComparisonDescriptor} + */ +armnnSerializer.ComparisonDescriptor.getRootAsComparisonDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.ComparisonDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ComparisonDescriptor=} obj + * @returns {armnnSerializer.ComparisonDescriptor} + */ +armnnSerializer.ComparisonDescriptor.getSizePrefixedRootAsComparisonDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ComparisonDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {armnnSerializer.ComparisonOperation} + */ +armnnSerializer.ComparisonDescriptor.prototype.operation = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {armnnSerializer.ComparisonOperation} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.ComparisonOperation.Equal; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ComparisonDescriptor.startComparisonDescriptor = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.ComparisonOperation} operation + */ +armnnSerializer.ComparisonDescriptor.addOperation = function(builder, operation) { + builder.addFieldInt8(0, operation, armnnSerializer.ComparisonOperation.Equal); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ComparisonDescriptor.endComparisonDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.ComparisonOperation} operation + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ComparisonDescriptor.createComparisonDescriptor = function(builder, operation) { + armnnSerializer.ComparisonDescriptor.startComparisonDescriptor(builder); + armnnSerializer.ComparisonDescriptor.addOperation(builder, operation); + return armnnSerializer.ComparisonDescriptor.endComparisonDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.ComparisonLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ComparisonLayer} + */ +armnnSerializer.ComparisonLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ComparisonLayer=} obj + * @returns {armnnSerializer.ComparisonLayer} + */ +armnnSerializer.ComparisonLayer.getRootAsComparisonLayer = function(bb, obj) { + return (obj || new armnnSerializer.ComparisonLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ComparisonLayer=} obj + * @returns {armnnSerializer.ComparisonLayer} + */ +armnnSerializer.ComparisonLayer.getSizePrefixedRootAsComparisonLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ComparisonLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.ComparisonLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ComparisonDescriptor=} obj + * @returns {armnnSerializer.ComparisonDescriptor|null} + */ +armnnSerializer.ComparisonLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ComparisonDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ComparisonLayer.startComparisonLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.ComparisonLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.ComparisonLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ComparisonLayer.endComparisonLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ComparisonLayer.createComparisonLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.ComparisonLayer.startComparisonLayer(builder); + armnnSerializer.ComparisonLayer.addBase(builder, baseOffset); + armnnSerializer.ComparisonLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.ComparisonLayer.endComparisonLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.ConstantLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ConstantLayer} + */ +armnnSerializer.ConstantLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ConstantLayer=} obj + * @returns {armnnSerializer.ConstantLayer} + */ +armnnSerializer.ConstantLayer.getRootAsConstantLayer = function(bb, obj) { + return (obj || new armnnSerializer.ConstantLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ConstantLayer=} obj + * @returns {armnnSerializer.ConstantLayer} + */ +armnnSerializer.ConstantLayer.getSizePrefixedRootAsConstantLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ConstantLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.ConstantLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.ConstantLayer.prototype.input = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ConstantLayer.startConstantLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.ConstantLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputOffset + */ +armnnSerializer.ConstantLayer.addInput = function(builder, inputOffset) { + builder.addFieldOffset(1, inputOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ConstantLayer.endConstantLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} inputOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ConstantLayer.createConstantLayer = function(builder, baseOffset, inputOffset) { + armnnSerializer.ConstantLayer.startConstantLayer(builder); + armnnSerializer.ConstantLayer.addBase(builder, baseOffset); + armnnSerializer.ConstantLayer.addInput(builder, inputOffset); + return armnnSerializer.ConstantLayer.endConstantLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.Convolution2dLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.Convolution2dLayer} + */ +armnnSerializer.Convolution2dLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.Convolution2dLayer=} obj + * @returns {armnnSerializer.Convolution2dLayer} + */ +armnnSerializer.Convolution2dLayer.getRootAsConvolution2dLayer = function(bb, obj) { + return (obj || new armnnSerializer.Convolution2dLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.Convolution2dLayer=} obj + * @returns {armnnSerializer.Convolution2dLayer} + */ +armnnSerializer.Convolution2dLayer.getSizePrefixedRootAsConvolution2dLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.Convolution2dLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.Convolution2dLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.Convolution2dDescriptor=} obj + * @returns {armnnSerializer.Convolution2dDescriptor|null} + */ +armnnSerializer.Convolution2dLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.Convolution2dDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.Convolution2dLayer.prototype.weights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.Convolution2dLayer.prototype.biases = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.Convolution2dLayer.startConvolution2dLayer = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.Convolution2dLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.Convolution2dLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightsOffset + */ +armnnSerializer.Convolution2dLayer.addWeights = function(builder, weightsOffset) { + builder.addFieldOffset(2, weightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasesOffset + */ +armnnSerializer.Convolution2dLayer.addBiases = function(builder, biasesOffset) { + builder.addFieldOffset(3, biasesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.Convolution2dLayer.endConvolution2dLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @param {flatbuffers.Offset} weightsOffset + * @param {flatbuffers.Offset} biasesOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.Convolution2dLayer.createConvolution2dLayer = function(builder, baseOffset, descriptorOffset, weightsOffset, biasesOffset) { + armnnSerializer.Convolution2dLayer.startConvolution2dLayer(builder); + armnnSerializer.Convolution2dLayer.addBase(builder, baseOffset); + armnnSerializer.Convolution2dLayer.addDescriptor(builder, descriptorOffset); + armnnSerializer.Convolution2dLayer.addWeights(builder, weightsOffset); + armnnSerializer.Convolution2dLayer.addBiases(builder, biasesOffset); + return armnnSerializer.Convolution2dLayer.endConvolution2dLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.Convolution2dDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.Convolution2dDescriptor} + */ +armnnSerializer.Convolution2dDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.Convolution2dDescriptor=} obj + * @returns {armnnSerializer.Convolution2dDescriptor} + */ +armnnSerializer.Convolution2dDescriptor.getRootAsConvolution2dDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.Convolution2dDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.Convolution2dDescriptor=} obj + * @returns {armnnSerializer.Convolution2dDescriptor} + */ +armnnSerializer.Convolution2dDescriptor.getSizePrefixedRootAsConvolution2dDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.Convolution2dDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.Convolution2dDescriptor.prototype.padLeft = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Convolution2dDescriptor.prototype.padRight = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Convolution2dDescriptor.prototype.padTop = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Convolution2dDescriptor.prototype.padBottom = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Convolution2dDescriptor.prototype.strideX = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Convolution2dDescriptor.prototype.strideY = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Convolution2dDescriptor.prototype.dilationX = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {number} + */ +armnnSerializer.Convolution2dDescriptor.prototype.dilationY = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.Convolution2dDescriptor.prototype.biasEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.Convolution2dDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NCHW; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.Convolution2dDescriptor.startConvolution2dDescriptor = function(builder) { + builder.startObject(10); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padLeft + */ +armnnSerializer.Convolution2dDescriptor.addPadLeft = function(builder, padLeft) { + builder.addFieldInt32(0, padLeft, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padRight + */ +armnnSerializer.Convolution2dDescriptor.addPadRight = function(builder, padRight) { + builder.addFieldInt32(1, padRight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padTop + */ +armnnSerializer.Convolution2dDescriptor.addPadTop = function(builder, padTop) { + builder.addFieldInt32(2, padTop, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padBottom + */ +armnnSerializer.Convolution2dDescriptor.addPadBottom = function(builder, padBottom) { + builder.addFieldInt32(3, padBottom, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideX + */ +armnnSerializer.Convolution2dDescriptor.addStrideX = function(builder, strideX) { + builder.addFieldInt32(4, strideX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideY + */ +armnnSerializer.Convolution2dDescriptor.addStrideY = function(builder, strideY) { + builder.addFieldInt32(5, strideY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} dilationX + */ +armnnSerializer.Convolution2dDescriptor.addDilationX = function(builder, dilationX) { + builder.addFieldInt32(6, dilationX, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} dilationY + */ +armnnSerializer.Convolution2dDescriptor.addDilationY = function(builder, dilationY) { + builder.addFieldInt32(7, dilationY, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} biasEnabled + */ +armnnSerializer.Convolution2dDescriptor.addBiasEnabled = function(builder, biasEnabled) { + builder.addFieldInt8(8, +biasEnabled, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.Convolution2dDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(9, dataLayout, armnnSerializer.DataLayout.NCHW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.Convolution2dDescriptor.endConvolution2dDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padLeft + * @param {number} padRight + * @param {number} padTop + * @param {number} padBottom + * @param {number} strideX + * @param {number} strideY + * @param {number} dilationX + * @param {number} dilationY + * @param {boolean} biasEnabled + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.Convolution2dDescriptor.createConvolution2dDescriptor = function(builder, padLeft, padRight, padTop, padBottom, strideX, strideY, dilationX, dilationY, biasEnabled, dataLayout) { + armnnSerializer.Convolution2dDescriptor.startConvolution2dDescriptor(builder); + armnnSerializer.Convolution2dDescriptor.addPadLeft(builder, padLeft); + armnnSerializer.Convolution2dDescriptor.addPadRight(builder, padRight); + armnnSerializer.Convolution2dDescriptor.addPadTop(builder, padTop); + armnnSerializer.Convolution2dDescriptor.addPadBottom(builder, padBottom); + armnnSerializer.Convolution2dDescriptor.addStrideX(builder, strideX); + armnnSerializer.Convolution2dDescriptor.addStrideY(builder, strideY); + armnnSerializer.Convolution2dDescriptor.addDilationX(builder, dilationX); + armnnSerializer.Convolution2dDescriptor.addDilationY(builder, dilationY); + armnnSerializer.Convolution2dDescriptor.addBiasEnabled(builder, biasEnabled); + armnnSerializer.Convolution2dDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.Convolution2dDescriptor.endConvolution2dDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.DepthToSpaceLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.DepthToSpaceLayer} + */ +armnnSerializer.DepthToSpaceLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DepthToSpaceLayer=} obj + * @returns {armnnSerializer.DepthToSpaceLayer} + */ +armnnSerializer.DepthToSpaceLayer.getRootAsDepthToSpaceLayer = function(bb, obj) { + return (obj || new armnnSerializer.DepthToSpaceLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DepthToSpaceLayer=} obj + * @returns {armnnSerializer.DepthToSpaceLayer} + */ +armnnSerializer.DepthToSpaceLayer.getSizePrefixedRootAsDepthToSpaceLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.DepthToSpaceLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.DepthToSpaceLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.DepthToSpaceDescriptor=} obj + * @returns {armnnSerializer.DepthToSpaceDescriptor|null} + */ +armnnSerializer.DepthToSpaceLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.DepthToSpaceDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.DepthToSpaceLayer.startDepthToSpaceLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.DepthToSpaceLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.DepthToSpaceLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DepthToSpaceLayer.endDepthToSpaceLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DepthToSpaceLayer.createDepthToSpaceLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.DepthToSpaceLayer.startDepthToSpaceLayer(builder); + armnnSerializer.DepthToSpaceLayer.addBase(builder, baseOffset); + armnnSerializer.DepthToSpaceLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.DepthToSpaceLayer.endDepthToSpaceLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.DepthToSpaceDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.DepthToSpaceDescriptor} + */ +armnnSerializer.DepthToSpaceDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DepthToSpaceDescriptor=} obj + * @returns {armnnSerializer.DepthToSpaceDescriptor} + */ +armnnSerializer.DepthToSpaceDescriptor.getRootAsDepthToSpaceDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.DepthToSpaceDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DepthToSpaceDescriptor=} obj + * @returns {armnnSerializer.DepthToSpaceDescriptor} + */ +armnnSerializer.DepthToSpaceDescriptor.getSizePrefixedRootAsDepthToSpaceDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.DepthToSpaceDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.DepthToSpaceDescriptor.prototype.blockSize = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.DepthToSpaceDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NHWC; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.DepthToSpaceDescriptor.startDepthToSpaceDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} blockSize + */ +armnnSerializer.DepthToSpaceDescriptor.addBlockSize = function(builder, blockSize) { + builder.addFieldInt32(0, blockSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.DepthToSpaceDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(1, dataLayout, armnnSerializer.DataLayout.NHWC); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DepthToSpaceDescriptor.endDepthToSpaceDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} blockSize + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DepthToSpaceDescriptor.createDepthToSpaceDescriptor = function(builder, blockSize, dataLayout) { + armnnSerializer.DepthToSpaceDescriptor.startDepthToSpaceDescriptor(builder); + armnnSerializer.DepthToSpaceDescriptor.addBlockSize(builder, blockSize); + armnnSerializer.DepthToSpaceDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.DepthToSpaceDescriptor.endDepthToSpaceDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.DivisionLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.DivisionLayer} + */ +armnnSerializer.DivisionLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DivisionLayer=} obj + * @returns {armnnSerializer.DivisionLayer} + */ +armnnSerializer.DivisionLayer.getRootAsDivisionLayer = function(bb, obj) { + return (obj || new armnnSerializer.DivisionLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DivisionLayer=} obj + * @returns {armnnSerializer.DivisionLayer} + */ +armnnSerializer.DivisionLayer.getSizePrefixedRootAsDivisionLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.DivisionLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.DivisionLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.DivisionLayer.startDivisionLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.DivisionLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DivisionLayer.endDivisionLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DivisionLayer.createDivisionLayer = function(builder, baseOffset) { + armnnSerializer.DivisionLayer.startDivisionLayer(builder); + armnnSerializer.DivisionLayer.addBase(builder, baseOffset); + return armnnSerializer.DivisionLayer.endDivisionLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.ElementwiseUnaryDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ElementwiseUnaryDescriptor} + */ +armnnSerializer.ElementwiseUnaryDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ElementwiseUnaryDescriptor=} obj + * @returns {armnnSerializer.ElementwiseUnaryDescriptor} + */ +armnnSerializer.ElementwiseUnaryDescriptor.getRootAsElementwiseUnaryDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.ElementwiseUnaryDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ElementwiseUnaryDescriptor=} obj + * @returns {armnnSerializer.ElementwiseUnaryDescriptor} + */ +armnnSerializer.ElementwiseUnaryDescriptor.getSizePrefixedRootAsElementwiseUnaryDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ElementwiseUnaryDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {armnnSerializer.UnaryOperation} + */ +armnnSerializer.ElementwiseUnaryDescriptor.prototype.operation = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {armnnSerializer.UnaryOperation} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.UnaryOperation.Abs; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ElementwiseUnaryDescriptor.startElementwiseUnaryDescriptor = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.UnaryOperation} operation + */ +armnnSerializer.ElementwiseUnaryDescriptor.addOperation = function(builder, operation) { + builder.addFieldInt8(0, operation, armnnSerializer.UnaryOperation.Abs); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ElementwiseUnaryDescriptor.endElementwiseUnaryDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.UnaryOperation} operation + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ElementwiseUnaryDescriptor.createElementwiseUnaryDescriptor = function(builder, operation) { + armnnSerializer.ElementwiseUnaryDescriptor.startElementwiseUnaryDescriptor(builder); + armnnSerializer.ElementwiseUnaryDescriptor.addOperation(builder, operation); + return armnnSerializer.ElementwiseUnaryDescriptor.endElementwiseUnaryDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.ElementwiseUnaryLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ElementwiseUnaryLayer} + */ +armnnSerializer.ElementwiseUnaryLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ElementwiseUnaryLayer=} obj + * @returns {armnnSerializer.ElementwiseUnaryLayer} + */ +armnnSerializer.ElementwiseUnaryLayer.getRootAsElementwiseUnaryLayer = function(bb, obj) { + return (obj || new armnnSerializer.ElementwiseUnaryLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ElementwiseUnaryLayer=} obj + * @returns {armnnSerializer.ElementwiseUnaryLayer} + */ +armnnSerializer.ElementwiseUnaryLayer.getSizePrefixedRootAsElementwiseUnaryLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ElementwiseUnaryLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.ElementwiseUnaryLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ElementwiseUnaryDescriptor=} obj + * @returns {armnnSerializer.ElementwiseUnaryDescriptor|null} + */ +armnnSerializer.ElementwiseUnaryLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ElementwiseUnaryDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ElementwiseUnaryLayer.startElementwiseUnaryLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.ElementwiseUnaryLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.ElementwiseUnaryLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ElementwiseUnaryLayer.endElementwiseUnaryLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ElementwiseUnaryLayer.createElementwiseUnaryLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.ElementwiseUnaryLayer.startElementwiseUnaryLayer(builder); + armnnSerializer.ElementwiseUnaryLayer.addBase(builder, baseOffset); + armnnSerializer.ElementwiseUnaryLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.ElementwiseUnaryLayer.endElementwiseUnaryLayer(builder); +} + +/** + * @deprecated Use ComparisonLayer instead + * + * @constructor + */ +armnnSerializer.EqualLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.EqualLayer} + */ +armnnSerializer.EqualLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.EqualLayer=} obj + * @returns {armnnSerializer.EqualLayer} + */ +armnnSerializer.EqualLayer.getRootAsEqualLayer = function(bb, obj) { + return (obj || new armnnSerializer.EqualLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.EqualLayer=} obj + * @returns {armnnSerializer.EqualLayer} + */ +armnnSerializer.EqualLayer.getSizePrefixedRootAsEqualLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.EqualLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.EqualLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.EqualLayer.startEqualLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.EqualLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.EqualLayer.endEqualLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.EqualLayer.createEqualLayer = function(builder, baseOffset) { + armnnSerializer.EqualLayer.startEqualLayer(builder); + armnnSerializer.EqualLayer.addBase(builder, baseOffset); + return armnnSerializer.EqualLayer.endEqualLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.FloorLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.FloorLayer} + */ +armnnSerializer.FloorLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.FloorLayer=} obj + * @returns {armnnSerializer.FloorLayer} + */ +armnnSerializer.FloorLayer.getRootAsFloorLayer = function(bb, obj) { + return (obj || new armnnSerializer.FloorLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.FloorLayer=} obj + * @returns {armnnSerializer.FloorLayer} + */ +armnnSerializer.FloorLayer.getSizePrefixedRootAsFloorLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.FloorLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.FloorLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.FloorLayer.startFloorLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.FloorLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.FloorLayer.endFloorLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.FloorLayer.createFloorLayer = function(builder, baseOffset) { + armnnSerializer.FloorLayer.startFloorLayer(builder); + armnnSerializer.FloorLayer.addBase(builder, baseOffset); + return armnnSerializer.FloorLayer.endFloorLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.FullyConnectedLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.FullyConnectedLayer} + */ +armnnSerializer.FullyConnectedLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.FullyConnectedLayer=} obj + * @returns {armnnSerializer.FullyConnectedLayer} + */ +armnnSerializer.FullyConnectedLayer.getRootAsFullyConnectedLayer = function(bb, obj) { + return (obj || new armnnSerializer.FullyConnectedLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.FullyConnectedLayer=} obj + * @returns {armnnSerializer.FullyConnectedLayer} + */ +armnnSerializer.FullyConnectedLayer.getSizePrefixedRootAsFullyConnectedLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.FullyConnectedLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.FullyConnectedLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.FullyConnectedDescriptor=} obj + * @returns {armnnSerializer.FullyConnectedDescriptor|null} + */ +armnnSerializer.FullyConnectedLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.FullyConnectedDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.FullyConnectedLayer.prototype.weights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.FullyConnectedLayer.prototype.biases = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.FullyConnectedLayer.startFullyConnectedLayer = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.FullyConnectedLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.FullyConnectedLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightsOffset + */ +armnnSerializer.FullyConnectedLayer.addWeights = function(builder, weightsOffset) { + builder.addFieldOffset(2, weightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasesOffset + */ +armnnSerializer.FullyConnectedLayer.addBiases = function(builder, biasesOffset) { + builder.addFieldOffset(3, biasesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.FullyConnectedLayer.endFullyConnectedLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @param {flatbuffers.Offset} weightsOffset + * @param {flatbuffers.Offset} biasesOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.FullyConnectedLayer.createFullyConnectedLayer = function(builder, baseOffset, descriptorOffset, weightsOffset, biasesOffset) { + armnnSerializer.FullyConnectedLayer.startFullyConnectedLayer(builder); + armnnSerializer.FullyConnectedLayer.addBase(builder, baseOffset); + armnnSerializer.FullyConnectedLayer.addDescriptor(builder, descriptorOffset); + armnnSerializer.FullyConnectedLayer.addWeights(builder, weightsOffset); + armnnSerializer.FullyConnectedLayer.addBiases(builder, biasesOffset); + return armnnSerializer.FullyConnectedLayer.endFullyConnectedLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.FullyConnectedDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.FullyConnectedDescriptor} + */ +armnnSerializer.FullyConnectedDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.FullyConnectedDescriptor=} obj + * @returns {armnnSerializer.FullyConnectedDescriptor} + */ +armnnSerializer.FullyConnectedDescriptor.getRootAsFullyConnectedDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.FullyConnectedDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.FullyConnectedDescriptor=} obj + * @returns {armnnSerializer.FullyConnectedDescriptor} + */ +armnnSerializer.FullyConnectedDescriptor.getSizePrefixedRootAsFullyConnectedDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.FullyConnectedDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +armnnSerializer.FullyConnectedDescriptor.prototype.biasEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.FullyConnectedDescriptor.prototype.transposeWeightsMatrix = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.FullyConnectedDescriptor.startFullyConnectedDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} biasEnabled + */ +armnnSerializer.FullyConnectedDescriptor.addBiasEnabled = function(builder, biasEnabled) { + builder.addFieldInt8(0, +biasEnabled, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} transposeWeightsMatrix + */ +armnnSerializer.FullyConnectedDescriptor.addTransposeWeightsMatrix = function(builder, transposeWeightsMatrix) { + builder.addFieldInt8(1, +transposeWeightsMatrix, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.FullyConnectedDescriptor.endFullyConnectedDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} biasEnabled + * @param {boolean} transposeWeightsMatrix + * @returns {flatbuffers.Offset} + */ +armnnSerializer.FullyConnectedDescriptor.createFullyConnectedDescriptor = function(builder, biasEnabled, transposeWeightsMatrix) { + armnnSerializer.FullyConnectedDescriptor.startFullyConnectedDescriptor(builder); + armnnSerializer.FullyConnectedDescriptor.addBiasEnabled(builder, biasEnabled); + armnnSerializer.FullyConnectedDescriptor.addTransposeWeightsMatrix(builder, transposeWeightsMatrix); + return armnnSerializer.FullyConnectedDescriptor.endFullyConnectedDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.GatherLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.GatherLayer} + */ +armnnSerializer.GatherLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.GatherLayer=} obj + * @returns {armnnSerializer.GatherLayer} + */ +armnnSerializer.GatherLayer.getRootAsGatherLayer = function(bb, obj) { + return (obj || new armnnSerializer.GatherLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.GatherLayer=} obj + * @returns {armnnSerializer.GatherLayer} + */ +armnnSerializer.GatherLayer.getSizePrefixedRootAsGatherLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.GatherLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.GatherLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.GatherLayer.startGatherLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.GatherLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.GatherLayer.endGatherLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.GatherLayer.createGatherLayer = function(builder, baseOffset) { + armnnSerializer.GatherLayer.startGatherLayer(builder); + armnnSerializer.GatherLayer.addBase(builder, baseOffset); + return armnnSerializer.GatherLayer.endGatherLayer(builder); +} + +/** + * @deprecated Use ComparisonLayer instead + * + * @constructor + */ +armnnSerializer.GreaterLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.GreaterLayer} + */ +armnnSerializer.GreaterLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.GreaterLayer=} obj + * @returns {armnnSerializer.GreaterLayer} + */ +armnnSerializer.GreaterLayer.getRootAsGreaterLayer = function(bb, obj) { + return (obj || new armnnSerializer.GreaterLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.GreaterLayer=} obj + * @returns {armnnSerializer.GreaterLayer} + */ +armnnSerializer.GreaterLayer.getSizePrefixedRootAsGreaterLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.GreaterLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.GreaterLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.GreaterLayer.startGreaterLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.GreaterLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.GreaterLayer.endGreaterLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.GreaterLayer.createGreaterLayer = function(builder, baseOffset) { + armnnSerializer.GreaterLayer.startGreaterLayer(builder); + armnnSerializer.GreaterLayer.addBase(builder, baseOffset); + return armnnSerializer.GreaterLayer.endGreaterLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.InputLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.InputLayer} + */ +armnnSerializer.InputLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.InputLayer=} obj + * @returns {armnnSerializer.InputLayer} + */ +armnnSerializer.InputLayer.getRootAsInputLayer = function(bb, obj) { + return (obj || new armnnSerializer.InputLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.InputLayer=} obj + * @returns {armnnSerializer.InputLayer} + */ +armnnSerializer.InputLayer.getSizePrefixedRootAsInputLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.InputLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.BindableLayerBase=} obj + * @returns {armnnSerializer.BindableLayerBase|null} + */ +armnnSerializer.InputLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.BindableLayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.InputLayer.startInputLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.InputLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.InputLayer.endInputLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.InputLayer.createInputLayer = function(builder, baseOffset) { + armnnSerializer.InputLayer.startInputLayer(builder); + armnnSerializer.InputLayer.addBase(builder, baseOffset); + return armnnSerializer.InputLayer.endInputLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.InstanceNormalizationLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.InstanceNormalizationLayer} + */ +armnnSerializer.InstanceNormalizationLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.InstanceNormalizationLayer=} obj + * @returns {armnnSerializer.InstanceNormalizationLayer} + */ +armnnSerializer.InstanceNormalizationLayer.getRootAsInstanceNormalizationLayer = function(bb, obj) { + return (obj || new armnnSerializer.InstanceNormalizationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.InstanceNormalizationLayer=} obj + * @returns {armnnSerializer.InstanceNormalizationLayer} + */ +armnnSerializer.InstanceNormalizationLayer.getSizePrefixedRootAsInstanceNormalizationLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.InstanceNormalizationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.InstanceNormalizationLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.InstanceNormalizationDescriptor=} obj + * @returns {armnnSerializer.InstanceNormalizationDescriptor|null} + */ +armnnSerializer.InstanceNormalizationLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.InstanceNormalizationDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.InstanceNormalizationLayer.startInstanceNormalizationLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.InstanceNormalizationLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.InstanceNormalizationLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.InstanceNormalizationLayer.endInstanceNormalizationLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.InstanceNormalizationLayer.createInstanceNormalizationLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.InstanceNormalizationLayer.startInstanceNormalizationLayer(builder); + armnnSerializer.InstanceNormalizationLayer.addBase(builder, baseOffset); + armnnSerializer.InstanceNormalizationLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.InstanceNormalizationLayer.endInstanceNormalizationLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.InstanceNormalizationDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.InstanceNormalizationDescriptor} + */ +armnnSerializer.InstanceNormalizationDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.InstanceNormalizationDescriptor=} obj + * @returns {armnnSerializer.InstanceNormalizationDescriptor} + */ +armnnSerializer.InstanceNormalizationDescriptor.getRootAsInstanceNormalizationDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.InstanceNormalizationDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.InstanceNormalizationDescriptor=} obj + * @returns {armnnSerializer.InstanceNormalizationDescriptor} + */ +armnnSerializer.InstanceNormalizationDescriptor.getSizePrefixedRootAsInstanceNormalizationDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.InstanceNormalizationDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.InstanceNormalizationDescriptor.prototype.gamma = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.InstanceNormalizationDescriptor.prototype.beta = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.InstanceNormalizationDescriptor.prototype.eps = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.InstanceNormalizationDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NHWC; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.InstanceNormalizationDescriptor.startInstanceNormalizationDescriptor = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} gamma + */ +armnnSerializer.InstanceNormalizationDescriptor.addGamma = function(builder, gamma) { + builder.addFieldFloat32(0, gamma, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + */ +armnnSerializer.InstanceNormalizationDescriptor.addBeta = function(builder, beta) { + builder.addFieldFloat32(1, beta, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} eps + */ +armnnSerializer.InstanceNormalizationDescriptor.addEps = function(builder, eps) { + builder.addFieldFloat32(2, eps, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.InstanceNormalizationDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(3, dataLayout, armnnSerializer.DataLayout.NHWC); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.InstanceNormalizationDescriptor.endInstanceNormalizationDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} gamma + * @param {number} beta + * @param {number} eps + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.InstanceNormalizationDescriptor.createInstanceNormalizationDescriptor = function(builder, gamma, beta, eps, dataLayout) { + armnnSerializer.InstanceNormalizationDescriptor.startInstanceNormalizationDescriptor(builder); + armnnSerializer.InstanceNormalizationDescriptor.addGamma(builder, gamma); + armnnSerializer.InstanceNormalizationDescriptor.addBeta(builder, beta); + armnnSerializer.InstanceNormalizationDescriptor.addEps(builder, eps); + armnnSerializer.InstanceNormalizationDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.InstanceNormalizationDescriptor.endInstanceNormalizationDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.LogSoftmaxLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.LogSoftmaxLayer} + */ +armnnSerializer.LogSoftmaxLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LogSoftmaxLayer=} obj + * @returns {armnnSerializer.LogSoftmaxLayer} + */ +armnnSerializer.LogSoftmaxLayer.getRootAsLogSoftmaxLayer = function(bb, obj) { + return (obj || new armnnSerializer.LogSoftmaxLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LogSoftmaxLayer=} obj + * @returns {armnnSerializer.LogSoftmaxLayer} + */ +armnnSerializer.LogSoftmaxLayer.getSizePrefixedRootAsLogSoftmaxLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.LogSoftmaxLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.LogSoftmaxLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.LogSoftmaxDescriptor=} obj + * @returns {armnnSerializer.LogSoftmaxDescriptor|null} + */ +armnnSerializer.LogSoftmaxLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.LogSoftmaxDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.LogSoftmaxLayer.startLogSoftmaxLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.LogSoftmaxLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.LogSoftmaxLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LogSoftmaxLayer.endLogSoftmaxLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LogSoftmaxLayer.createLogSoftmaxLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.LogSoftmaxLayer.startLogSoftmaxLayer(builder); + armnnSerializer.LogSoftmaxLayer.addBase(builder, baseOffset); + armnnSerializer.LogSoftmaxLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.LogSoftmaxLayer.endLogSoftmaxLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.LogSoftmaxDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.LogSoftmaxDescriptor} + */ +armnnSerializer.LogSoftmaxDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LogSoftmaxDescriptor=} obj + * @returns {armnnSerializer.LogSoftmaxDescriptor} + */ +armnnSerializer.LogSoftmaxDescriptor.getRootAsLogSoftmaxDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.LogSoftmaxDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LogSoftmaxDescriptor=} obj + * @returns {armnnSerializer.LogSoftmaxDescriptor} + */ +armnnSerializer.LogSoftmaxDescriptor.getSizePrefixedRootAsLogSoftmaxDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.LogSoftmaxDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.LogSoftmaxDescriptor.prototype.beta = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 1.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.LogSoftmaxDescriptor.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : -1; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.LogSoftmaxDescriptor.startLogSoftmaxDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + */ +armnnSerializer.LogSoftmaxDescriptor.addBeta = function(builder, beta) { + builder.addFieldFloat32(0, beta, 1.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +armnnSerializer.LogSoftmaxDescriptor.addAxis = function(builder, axis) { + builder.addFieldInt32(1, axis, -1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LogSoftmaxDescriptor.endLogSoftmaxDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + * @param {number} axis + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LogSoftmaxDescriptor.createLogSoftmaxDescriptor = function(builder, beta, axis) { + armnnSerializer.LogSoftmaxDescriptor.startLogSoftmaxDescriptor(builder); + armnnSerializer.LogSoftmaxDescriptor.addBeta(builder, beta); + armnnSerializer.LogSoftmaxDescriptor.addAxis(builder, axis); + return armnnSerializer.LogSoftmaxDescriptor.endLogSoftmaxDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.L2NormalizationLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.L2NormalizationLayer} + */ +armnnSerializer.L2NormalizationLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.L2NormalizationLayer=} obj + * @returns {armnnSerializer.L2NormalizationLayer} + */ +armnnSerializer.L2NormalizationLayer.getRootAsL2NormalizationLayer = function(bb, obj) { + return (obj || new armnnSerializer.L2NormalizationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.L2NormalizationLayer=} obj + * @returns {armnnSerializer.L2NormalizationLayer} + */ +armnnSerializer.L2NormalizationLayer.getSizePrefixedRootAsL2NormalizationLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.L2NormalizationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.L2NormalizationLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.L2NormalizationDescriptor=} obj + * @returns {armnnSerializer.L2NormalizationDescriptor|null} + */ +armnnSerializer.L2NormalizationLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.L2NormalizationDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.L2NormalizationLayer.startL2NormalizationLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.L2NormalizationLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.L2NormalizationLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.L2NormalizationLayer.endL2NormalizationLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.L2NormalizationLayer.createL2NormalizationLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.L2NormalizationLayer.startL2NormalizationLayer(builder); + armnnSerializer.L2NormalizationLayer.addBase(builder, baseOffset); + armnnSerializer.L2NormalizationLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.L2NormalizationLayer.endL2NormalizationLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.L2NormalizationDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.L2NormalizationDescriptor} + */ +armnnSerializer.L2NormalizationDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.L2NormalizationDescriptor=} obj + * @returns {armnnSerializer.L2NormalizationDescriptor} + */ +armnnSerializer.L2NormalizationDescriptor.getRootAsL2NormalizationDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.L2NormalizationDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.L2NormalizationDescriptor=} obj + * @returns {armnnSerializer.L2NormalizationDescriptor} + */ +armnnSerializer.L2NormalizationDescriptor.getSizePrefixedRootAsL2NormalizationDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.L2NormalizationDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.L2NormalizationDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NCHW; +}; + +/** + * @returns {number} + */ +armnnSerializer.L2NormalizationDescriptor.prototype.eps = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 1e-12; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.L2NormalizationDescriptor.startL2NormalizationDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.L2NormalizationDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(0, dataLayout, armnnSerializer.DataLayout.NCHW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} eps + */ +armnnSerializer.L2NormalizationDescriptor.addEps = function(builder, eps) { + builder.addFieldFloat32(1, eps, 1e-12); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.L2NormalizationDescriptor.endL2NormalizationDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + * @param {number} eps + * @returns {flatbuffers.Offset} + */ +armnnSerializer.L2NormalizationDescriptor.createL2NormalizationDescriptor = function(builder, dataLayout, eps) { + armnnSerializer.L2NormalizationDescriptor.startL2NormalizationDescriptor(builder); + armnnSerializer.L2NormalizationDescriptor.addDataLayout(builder, dataLayout); + armnnSerializer.L2NormalizationDescriptor.addEps(builder, eps); + return armnnSerializer.L2NormalizationDescriptor.endL2NormalizationDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.MinimumLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.MinimumLayer} + */ +armnnSerializer.MinimumLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MinimumLayer=} obj + * @returns {armnnSerializer.MinimumLayer} + */ +armnnSerializer.MinimumLayer.getRootAsMinimumLayer = function(bb, obj) { + return (obj || new armnnSerializer.MinimumLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MinimumLayer=} obj + * @returns {armnnSerializer.MinimumLayer} + */ +armnnSerializer.MinimumLayer.getSizePrefixedRootAsMinimumLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.MinimumLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.MinimumLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.MinimumLayer.startMinimumLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.MinimumLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MinimumLayer.endMinimumLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MinimumLayer.createMinimumLayer = function(builder, baseOffset) { + armnnSerializer.MinimumLayer.startMinimumLayer(builder); + armnnSerializer.MinimumLayer.addBase(builder, baseOffset); + return armnnSerializer.MinimumLayer.endMinimumLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.MaximumLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.MaximumLayer} + */ +armnnSerializer.MaximumLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MaximumLayer=} obj + * @returns {armnnSerializer.MaximumLayer} + */ +armnnSerializer.MaximumLayer.getRootAsMaximumLayer = function(bb, obj) { + return (obj || new armnnSerializer.MaximumLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MaximumLayer=} obj + * @returns {armnnSerializer.MaximumLayer} + */ +armnnSerializer.MaximumLayer.getSizePrefixedRootAsMaximumLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.MaximumLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.MaximumLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.MaximumLayer.startMaximumLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.MaximumLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MaximumLayer.endMaximumLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MaximumLayer.createMaximumLayer = function(builder, baseOffset) { + armnnSerializer.MaximumLayer.startMaximumLayer(builder); + armnnSerializer.MaximumLayer.addBase(builder, baseOffset); + return armnnSerializer.MaximumLayer.endMaximumLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.MultiplicationLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.MultiplicationLayer} + */ +armnnSerializer.MultiplicationLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MultiplicationLayer=} obj + * @returns {armnnSerializer.MultiplicationLayer} + */ +armnnSerializer.MultiplicationLayer.getRootAsMultiplicationLayer = function(bb, obj) { + return (obj || new armnnSerializer.MultiplicationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MultiplicationLayer=} obj + * @returns {armnnSerializer.MultiplicationLayer} + */ +armnnSerializer.MultiplicationLayer.getSizePrefixedRootAsMultiplicationLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.MultiplicationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.MultiplicationLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.MultiplicationLayer.startMultiplicationLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.MultiplicationLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MultiplicationLayer.endMultiplicationLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MultiplicationLayer.createMultiplicationLayer = function(builder, baseOffset) { + armnnSerializer.MultiplicationLayer.startMultiplicationLayer(builder); + armnnSerializer.MultiplicationLayer.addBase(builder, baseOffset); + return armnnSerializer.MultiplicationLayer.endMultiplicationLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.Pooling2dLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.Pooling2dLayer} + */ +armnnSerializer.Pooling2dLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.Pooling2dLayer=} obj + * @returns {armnnSerializer.Pooling2dLayer} + */ +armnnSerializer.Pooling2dLayer.getRootAsPooling2dLayer = function(bb, obj) { + return (obj || new armnnSerializer.Pooling2dLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.Pooling2dLayer=} obj + * @returns {armnnSerializer.Pooling2dLayer} + */ +armnnSerializer.Pooling2dLayer.getSizePrefixedRootAsPooling2dLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.Pooling2dLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.Pooling2dLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.Pooling2dDescriptor=} obj + * @returns {armnnSerializer.Pooling2dDescriptor|null} + */ +armnnSerializer.Pooling2dLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.Pooling2dDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.Pooling2dLayer.startPooling2dLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.Pooling2dLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.Pooling2dLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.Pooling2dLayer.endPooling2dLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.Pooling2dLayer.createPooling2dLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.Pooling2dLayer.startPooling2dLayer(builder); + armnnSerializer.Pooling2dLayer.addBase(builder, baseOffset); + armnnSerializer.Pooling2dLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.Pooling2dLayer.endPooling2dLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.Pooling2dDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.Pooling2dDescriptor} + */ +armnnSerializer.Pooling2dDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.Pooling2dDescriptor=} obj + * @returns {armnnSerializer.Pooling2dDescriptor} + */ +armnnSerializer.Pooling2dDescriptor.getRootAsPooling2dDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.Pooling2dDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.Pooling2dDescriptor=} obj + * @returns {armnnSerializer.Pooling2dDescriptor} + */ +armnnSerializer.Pooling2dDescriptor.getSizePrefixedRootAsPooling2dDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.Pooling2dDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {armnnSerializer.PoolingAlgorithm} + */ +armnnSerializer.Pooling2dDescriptor.prototype.poolType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {armnnSerializer.PoolingAlgorithm} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.PoolingAlgorithm.Max; +}; + +/** + * @returns {number} + */ +armnnSerializer.Pooling2dDescriptor.prototype.padLeft = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Pooling2dDescriptor.prototype.padRight = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Pooling2dDescriptor.prototype.padTop = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Pooling2dDescriptor.prototype.padBottom = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Pooling2dDescriptor.prototype.poolWidth = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Pooling2dDescriptor.prototype.poolHeight = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Pooling2dDescriptor.prototype.strideX = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.Pooling2dDescriptor.prototype.strideY = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {armnnSerializer.OutputShapeRounding} + */ +armnnSerializer.Pooling2dDescriptor.prototype.outputShapeRounding = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? /** @type {armnnSerializer.OutputShapeRounding} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.OutputShapeRounding.Floor; +}; + +/** + * @returns {armnnSerializer.PaddingMethod} + */ +armnnSerializer.Pooling2dDescriptor.prototype.paddingMethod = function() { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? /** @type {armnnSerializer.PaddingMethod} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.PaddingMethod.IgnoreValue; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.Pooling2dDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 26); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NHWC; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.Pooling2dDescriptor.startPooling2dDescriptor = function(builder) { + builder.startObject(12); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.PoolingAlgorithm} poolType + */ +armnnSerializer.Pooling2dDescriptor.addPoolType = function(builder, poolType) { + builder.addFieldInt8(0, poolType, armnnSerializer.PoolingAlgorithm.Max); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padLeft + */ +armnnSerializer.Pooling2dDescriptor.addPadLeft = function(builder, padLeft) { + builder.addFieldInt32(1, padLeft, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padRight + */ +armnnSerializer.Pooling2dDescriptor.addPadRight = function(builder, padRight) { + builder.addFieldInt32(2, padRight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padTop + */ +armnnSerializer.Pooling2dDescriptor.addPadTop = function(builder, padTop) { + builder.addFieldInt32(3, padTop, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padBottom + */ +armnnSerializer.Pooling2dDescriptor.addPadBottom = function(builder, padBottom) { + builder.addFieldInt32(4, padBottom, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} poolWidth + */ +armnnSerializer.Pooling2dDescriptor.addPoolWidth = function(builder, poolWidth) { + builder.addFieldInt32(5, poolWidth, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} poolHeight + */ +armnnSerializer.Pooling2dDescriptor.addPoolHeight = function(builder, poolHeight) { + builder.addFieldInt32(6, poolHeight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideX + */ +armnnSerializer.Pooling2dDescriptor.addStrideX = function(builder, strideX) { + builder.addFieldInt32(7, strideX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideY + */ +armnnSerializer.Pooling2dDescriptor.addStrideY = function(builder, strideY) { + builder.addFieldInt32(8, strideY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.OutputShapeRounding} outputShapeRounding + */ +armnnSerializer.Pooling2dDescriptor.addOutputShapeRounding = function(builder, outputShapeRounding) { + builder.addFieldInt8(9, outputShapeRounding, armnnSerializer.OutputShapeRounding.Floor); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.PaddingMethod} paddingMethod + */ +armnnSerializer.Pooling2dDescriptor.addPaddingMethod = function(builder, paddingMethod) { + builder.addFieldInt8(10, paddingMethod, armnnSerializer.PaddingMethod.IgnoreValue); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.Pooling2dDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(11, dataLayout, armnnSerializer.DataLayout.NHWC); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.Pooling2dDescriptor.endPooling2dDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.PoolingAlgorithm} poolType + * @param {number} padLeft + * @param {number} padRight + * @param {number} padTop + * @param {number} padBottom + * @param {number} poolWidth + * @param {number} poolHeight + * @param {number} strideX + * @param {number} strideY + * @param {armnnSerializer.OutputShapeRounding} outputShapeRounding + * @param {armnnSerializer.PaddingMethod} paddingMethod + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.Pooling2dDescriptor.createPooling2dDescriptor = function(builder, poolType, padLeft, padRight, padTop, padBottom, poolWidth, poolHeight, strideX, strideY, outputShapeRounding, paddingMethod, dataLayout) { + armnnSerializer.Pooling2dDescriptor.startPooling2dDescriptor(builder); + armnnSerializer.Pooling2dDescriptor.addPoolType(builder, poolType); + armnnSerializer.Pooling2dDescriptor.addPadLeft(builder, padLeft); + armnnSerializer.Pooling2dDescriptor.addPadRight(builder, padRight); + armnnSerializer.Pooling2dDescriptor.addPadTop(builder, padTop); + armnnSerializer.Pooling2dDescriptor.addPadBottom(builder, padBottom); + armnnSerializer.Pooling2dDescriptor.addPoolWidth(builder, poolWidth); + armnnSerializer.Pooling2dDescriptor.addPoolHeight(builder, poolHeight); + armnnSerializer.Pooling2dDescriptor.addStrideX(builder, strideX); + armnnSerializer.Pooling2dDescriptor.addStrideY(builder, strideY); + armnnSerializer.Pooling2dDescriptor.addOutputShapeRounding(builder, outputShapeRounding); + armnnSerializer.Pooling2dDescriptor.addPaddingMethod(builder, paddingMethod); + armnnSerializer.Pooling2dDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.Pooling2dDescriptor.endPooling2dDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.QuantizeLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.QuantizeLayer} + */ +armnnSerializer.QuantizeLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QuantizeLayer=} obj + * @returns {armnnSerializer.QuantizeLayer} + */ +armnnSerializer.QuantizeLayer.getRootAsQuantizeLayer = function(bb, obj) { + return (obj || new armnnSerializer.QuantizeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QuantizeLayer=} obj + * @returns {armnnSerializer.QuantizeLayer} + */ +armnnSerializer.QuantizeLayer.getSizePrefixedRootAsQuantizeLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.QuantizeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.QuantizeLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.QuantizeLayer.startQuantizeLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.QuantizeLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QuantizeLayer.endQuantizeLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QuantizeLayer.createQuantizeLayer = function(builder, baseOffset) { + armnnSerializer.QuantizeLayer.startQuantizeLayer(builder); + armnnSerializer.QuantizeLayer.addBase(builder, baseOffset); + return armnnSerializer.QuantizeLayer.endQuantizeLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.SoftmaxLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SoftmaxLayer} + */ +armnnSerializer.SoftmaxLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SoftmaxLayer=} obj + * @returns {armnnSerializer.SoftmaxLayer} + */ +armnnSerializer.SoftmaxLayer.getRootAsSoftmaxLayer = function(bb, obj) { + return (obj || new armnnSerializer.SoftmaxLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SoftmaxLayer=} obj + * @returns {armnnSerializer.SoftmaxLayer} + */ +armnnSerializer.SoftmaxLayer.getSizePrefixedRootAsSoftmaxLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SoftmaxLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.SoftmaxLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.SoftmaxDescriptor=} obj + * @returns {armnnSerializer.SoftmaxDescriptor|null} + */ +armnnSerializer.SoftmaxLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.SoftmaxDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SoftmaxLayer.startSoftmaxLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.SoftmaxLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.SoftmaxLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SoftmaxLayer.endSoftmaxLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SoftmaxLayer.createSoftmaxLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.SoftmaxLayer.startSoftmaxLayer(builder); + armnnSerializer.SoftmaxLayer.addBase(builder, baseOffset); + armnnSerializer.SoftmaxLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.SoftmaxLayer.endSoftmaxLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.SoftmaxDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SoftmaxDescriptor} + */ +armnnSerializer.SoftmaxDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SoftmaxDescriptor=} obj + * @returns {armnnSerializer.SoftmaxDescriptor} + */ +armnnSerializer.SoftmaxDescriptor.getRootAsSoftmaxDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.SoftmaxDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SoftmaxDescriptor=} obj + * @returns {armnnSerializer.SoftmaxDescriptor} + */ +armnnSerializer.SoftmaxDescriptor.getSizePrefixedRootAsSoftmaxDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SoftmaxDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.SoftmaxDescriptor.prototype.beta = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SoftmaxDescriptor.startSoftmaxDescriptor = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + */ +armnnSerializer.SoftmaxDescriptor.addBeta = function(builder, beta) { + builder.addFieldFloat32(0, beta, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SoftmaxDescriptor.endSoftmaxDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SoftmaxDescriptor.createSoftmaxDescriptor = function(builder, beta) { + armnnSerializer.SoftmaxDescriptor.startSoftmaxDescriptor(builder); + armnnSerializer.SoftmaxDescriptor.addBeta(builder, beta); + return armnnSerializer.SoftmaxDescriptor.endSoftmaxDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.DepthwiseConvolution2dLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.DepthwiseConvolution2dLayer} + */ +armnnSerializer.DepthwiseConvolution2dLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DepthwiseConvolution2dLayer=} obj + * @returns {armnnSerializer.DepthwiseConvolution2dLayer} + */ +armnnSerializer.DepthwiseConvolution2dLayer.getRootAsDepthwiseConvolution2dLayer = function(bb, obj) { + return (obj || new armnnSerializer.DepthwiseConvolution2dLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DepthwiseConvolution2dLayer=} obj + * @returns {armnnSerializer.DepthwiseConvolution2dLayer} + */ +armnnSerializer.DepthwiseConvolution2dLayer.getSizePrefixedRootAsDepthwiseConvolution2dLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.DepthwiseConvolution2dLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.DepthwiseConvolution2dLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.DepthwiseConvolution2dDescriptor=} obj + * @returns {armnnSerializer.DepthwiseConvolution2dDescriptor|null} + */ +armnnSerializer.DepthwiseConvolution2dLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.DepthwiseConvolution2dDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.DepthwiseConvolution2dLayer.prototype.weights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.DepthwiseConvolution2dLayer.prototype.biases = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.DepthwiseConvolution2dLayer.startDepthwiseConvolution2dLayer = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.DepthwiseConvolution2dLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.DepthwiseConvolution2dLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightsOffset + */ +armnnSerializer.DepthwiseConvolution2dLayer.addWeights = function(builder, weightsOffset) { + builder.addFieldOffset(2, weightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasesOffset + */ +armnnSerializer.DepthwiseConvolution2dLayer.addBiases = function(builder, biasesOffset) { + builder.addFieldOffset(3, biasesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DepthwiseConvolution2dLayer.endDepthwiseConvolution2dLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @param {flatbuffers.Offset} weightsOffset + * @param {flatbuffers.Offset} biasesOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DepthwiseConvolution2dLayer.createDepthwiseConvolution2dLayer = function(builder, baseOffset, descriptorOffset, weightsOffset, biasesOffset) { + armnnSerializer.DepthwiseConvolution2dLayer.startDepthwiseConvolution2dLayer(builder); + armnnSerializer.DepthwiseConvolution2dLayer.addBase(builder, baseOffset); + armnnSerializer.DepthwiseConvolution2dLayer.addDescriptor(builder, descriptorOffset); + armnnSerializer.DepthwiseConvolution2dLayer.addWeights(builder, weightsOffset); + armnnSerializer.DepthwiseConvolution2dLayer.addBiases(builder, biasesOffset); + return armnnSerializer.DepthwiseConvolution2dLayer.endDepthwiseConvolution2dLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.DepthwiseConvolution2dDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.DepthwiseConvolution2dDescriptor} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DepthwiseConvolution2dDescriptor=} obj + * @returns {armnnSerializer.DepthwiseConvolution2dDescriptor} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.getRootAsDepthwiseConvolution2dDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.DepthwiseConvolution2dDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DepthwiseConvolution2dDescriptor=} obj + * @returns {armnnSerializer.DepthwiseConvolution2dDescriptor} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.getSizePrefixedRootAsDepthwiseConvolution2dDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.DepthwiseConvolution2dDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.prototype.padLeft = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.prototype.padRight = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.prototype.padTop = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.prototype.padBottom = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.prototype.strideX = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.prototype.strideY = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.prototype.dilationX = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {number} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.prototype.dilationY = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.prototype.biasEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NCHW; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.startDepthwiseConvolution2dDescriptor = function(builder) { + builder.startObject(10); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padLeft + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.addPadLeft = function(builder, padLeft) { + builder.addFieldInt32(0, padLeft, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padRight + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.addPadRight = function(builder, padRight) { + builder.addFieldInt32(1, padRight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padTop + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.addPadTop = function(builder, padTop) { + builder.addFieldInt32(2, padTop, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padBottom + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.addPadBottom = function(builder, padBottom) { + builder.addFieldInt32(3, padBottom, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideX + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.addStrideX = function(builder, strideX) { + builder.addFieldInt32(4, strideX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideY + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.addStrideY = function(builder, strideY) { + builder.addFieldInt32(5, strideY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} dilationX + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.addDilationX = function(builder, dilationX) { + builder.addFieldInt32(6, dilationX, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} dilationY + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.addDilationY = function(builder, dilationY) { + builder.addFieldInt32(7, dilationY, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} biasEnabled + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.addBiasEnabled = function(builder, biasEnabled) { + builder.addFieldInt8(8, +biasEnabled, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(9, dataLayout, armnnSerializer.DataLayout.NCHW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.endDepthwiseConvolution2dDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padLeft + * @param {number} padRight + * @param {number} padTop + * @param {number} padBottom + * @param {number} strideX + * @param {number} strideY + * @param {number} dilationX + * @param {number} dilationY + * @param {boolean} biasEnabled + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DepthwiseConvolution2dDescriptor.createDepthwiseConvolution2dDescriptor = function(builder, padLeft, padRight, padTop, padBottom, strideX, strideY, dilationX, dilationY, biasEnabled, dataLayout) { + armnnSerializer.DepthwiseConvolution2dDescriptor.startDepthwiseConvolution2dDescriptor(builder); + armnnSerializer.DepthwiseConvolution2dDescriptor.addPadLeft(builder, padLeft); + armnnSerializer.DepthwiseConvolution2dDescriptor.addPadRight(builder, padRight); + armnnSerializer.DepthwiseConvolution2dDescriptor.addPadTop(builder, padTop); + armnnSerializer.DepthwiseConvolution2dDescriptor.addPadBottom(builder, padBottom); + armnnSerializer.DepthwiseConvolution2dDescriptor.addStrideX(builder, strideX); + armnnSerializer.DepthwiseConvolution2dDescriptor.addStrideY(builder, strideY); + armnnSerializer.DepthwiseConvolution2dDescriptor.addDilationX(builder, dilationX); + armnnSerializer.DepthwiseConvolution2dDescriptor.addDilationY(builder, dilationY); + armnnSerializer.DepthwiseConvolution2dDescriptor.addBiasEnabled(builder, biasEnabled); + armnnSerializer.DepthwiseConvolution2dDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.DepthwiseConvolution2dDescriptor.endDepthwiseConvolution2dDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.OutputLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.OutputLayer} + */ +armnnSerializer.OutputLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.OutputLayer=} obj + * @returns {armnnSerializer.OutputLayer} + */ +armnnSerializer.OutputLayer.getRootAsOutputLayer = function(bb, obj) { + return (obj || new armnnSerializer.OutputLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.OutputLayer=} obj + * @returns {armnnSerializer.OutputLayer} + */ +armnnSerializer.OutputLayer.getSizePrefixedRootAsOutputLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.OutputLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.BindableLayerBase=} obj + * @returns {armnnSerializer.BindableLayerBase|null} + */ +armnnSerializer.OutputLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.BindableLayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.OutputLayer.startOutputLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.OutputLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.OutputLayer.endOutputLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.OutputLayer.createOutputLayer = function(builder, baseOffset) { + armnnSerializer.OutputLayer.startOutputLayer(builder); + armnnSerializer.OutputLayer.addBase(builder, baseOffset); + return armnnSerializer.OutputLayer.endOutputLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.ReshapeLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ReshapeLayer} + */ +armnnSerializer.ReshapeLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ReshapeLayer=} obj + * @returns {armnnSerializer.ReshapeLayer} + */ +armnnSerializer.ReshapeLayer.getRootAsReshapeLayer = function(bb, obj) { + return (obj || new armnnSerializer.ReshapeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ReshapeLayer=} obj + * @returns {armnnSerializer.ReshapeLayer} + */ +armnnSerializer.ReshapeLayer.getSizePrefixedRootAsReshapeLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ReshapeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.ReshapeLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ReshapeDescriptor=} obj + * @returns {armnnSerializer.ReshapeDescriptor|null} + */ +armnnSerializer.ReshapeLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ReshapeDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ReshapeLayer.startReshapeLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.ReshapeLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.ReshapeLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ReshapeLayer.endReshapeLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ReshapeLayer.createReshapeLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.ReshapeLayer.startReshapeLayer(builder); + armnnSerializer.ReshapeLayer.addBase(builder, baseOffset); + armnnSerializer.ReshapeLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.ReshapeLayer.endReshapeLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.ReshapeDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ReshapeDescriptor} + */ +armnnSerializer.ReshapeDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ReshapeDescriptor=} obj + * @returns {armnnSerializer.ReshapeDescriptor} + */ +armnnSerializer.ReshapeDescriptor.getRootAsReshapeDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.ReshapeDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ReshapeDescriptor=} obj + * @returns {armnnSerializer.ReshapeDescriptor} + */ +armnnSerializer.ReshapeDescriptor.getSizePrefixedRootAsReshapeDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ReshapeDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.ReshapeDescriptor.prototype.targetShape = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.ReshapeDescriptor.prototype.targetShapeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.ReshapeDescriptor.prototype.targetShapeArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ReshapeDescriptor.startReshapeDescriptor = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} targetShapeOffset + */ +armnnSerializer.ReshapeDescriptor.addTargetShape = function(builder, targetShapeOffset) { + builder.addFieldOffset(0, targetShapeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ReshapeDescriptor.createTargetShapeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.ReshapeDescriptor.startTargetShapeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ReshapeDescriptor.endReshapeDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} targetShapeOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ReshapeDescriptor.createReshapeDescriptor = function(builder, targetShapeOffset) { + armnnSerializer.ReshapeDescriptor.startReshapeDescriptor(builder); + armnnSerializer.ReshapeDescriptor.addTargetShape(builder, targetShapeOffset); + return armnnSerializer.ReshapeDescriptor.endReshapeDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.PermuteLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.PermuteLayer} + */ +armnnSerializer.PermuteLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.PermuteLayer=} obj + * @returns {armnnSerializer.PermuteLayer} + */ +armnnSerializer.PermuteLayer.getRootAsPermuteLayer = function(bb, obj) { + return (obj || new armnnSerializer.PermuteLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.PermuteLayer=} obj + * @returns {armnnSerializer.PermuteLayer} + */ +armnnSerializer.PermuteLayer.getSizePrefixedRootAsPermuteLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.PermuteLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.PermuteLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.PermuteDescriptor=} obj + * @returns {armnnSerializer.PermuteDescriptor|null} + */ +armnnSerializer.PermuteLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.PermuteDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.PermuteLayer.startPermuteLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.PermuteLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.PermuteLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PermuteLayer.endPermuteLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PermuteLayer.createPermuteLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.PermuteLayer.startPermuteLayer(builder); + armnnSerializer.PermuteLayer.addBase(builder, baseOffset); + armnnSerializer.PermuteLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.PermuteLayer.endPermuteLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.PermuteDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.PermuteDescriptor} + */ +armnnSerializer.PermuteDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.PermuteDescriptor=} obj + * @returns {armnnSerializer.PermuteDescriptor} + */ +armnnSerializer.PermuteDescriptor.getRootAsPermuteDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.PermuteDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.PermuteDescriptor=} obj + * @returns {armnnSerializer.PermuteDescriptor} + */ +armnnSerializer.PermuteDescriptor.getSizePrefixedRootAsPermuteDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.PermuteDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.PermuteDescriptor.prototype.dimMappings = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.PermuteDescriptor.prototype.dimMappingsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.PermuteDescriptor.prototype.dimMappingsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.PermuteDescriptor.startPermuteDescriptor = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimMappingsOffset + */ +armnnSerializer.PermuteDescriptor.addDimMappings = function(builder, dimMappingsOffset) { + builder.addFieldOffset(0, dimMappingsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PermuteDescriptor.createDimMappingsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.PermuteDescriptor.startDimMappingsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PermuteDescriptor.endPermuteDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimMappingsOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PermuteDescriptor.createPermuteDescriptor = function(builder, dimMappingsOffset) { + armnnSerializer.PermuteDescriptor.startPermuteDescriptor(builder); + armnnSerializer.PermuteDescriptor.addDimMappings(builder, dimMappingsOffset); + return armnnSerializer.PermuteDescriptor.endPermuteDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.SpaceToBatchNdLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SpaceToBatchNdLayer} + */ +armnnSerializer.SpaceToBatchNdLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SpaceToBatchNdLayer=} obj + * @returns {armnnSerializer.SpaceToBatchNdLayer} + */ +armnnSerializer.SpaceToBatchNdLayer.getRootAsSpaceToBatchNdLayer = function(bb, obj) { + return (obj || new armnnSerializer.SpaceToBatchNdLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SpaceToBatchNdLayer=} obj + * @returns {armnnSerializer.SpaceToBatchNdLayer} + */ +armnnSerializer.SpaceToBatchNdLayer.getSizePrefixedRootAsSpaceToBatchNdLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SpaceToBatchNdLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.SpaceToBatchNdLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.SpaceToBatchNdDescriptor=} obj + * @returns {armnnSerializer.SpaceToBatchNdDescriptor|null} + */ +armnnSerializer.SpaceToBatchNdLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.SpaceToBatchNdDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SpaceToBatchNdLayer.startSpaceToBatchNdLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.SpaceToBatchNdLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.SpaceToBatchNdLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SpaceToBatchNdLayer.endSpaceToBatchNdLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SpaceToBatchNdLayer.createSpaceToBatchNdLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.SpaceToBatchNdLayer.startSpaceToBatchNdLayer(builder); + armnnSerializer.SpaceToBatchNdLayer.addBase(builder, baseOffset); + armnnSerializer.SpaceToBatchNdLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.SpaceToBatchNdLayer.endSpaceToBatchNdLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.SpaceToBatchNdDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SpaceToBatchNdDescriptor} + */ +armnnSerializer.SpaceToBatchNdDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SpaceToBatchNdDescriptor=} obj + * @returns {armnnSerializer.SpaceToBatchNdDescriptor} + */ +armnnSerializer.SpaceToBatchNdDescriptor.getRootAsSpaceToBatchNdDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.SpaceToBatchNdDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SpaceToBatchNdDescriptor=} obj + * @returns {armnnSerializer.SpaceToBatchNdDescriptor} + */ +armnnSerializer.SpaceToBatchNdDescriptor.getSizePrefixedRootAsSpaceToBatchNdDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SpaceToBatchNdDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.SpaceToBatchNdDescriptor.prototype.blockShape = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.SpaceToBatchNdDescriptor.prototype.blockShapeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.SpaceToBatchNdDescriptor.prototype.blockShapeArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.SpaceToBatchNdDescriptor.prototype.padList = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.SpaceToBatchNdDescriptor.prototype.padListLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.SpaceToBatchNdDescriptor.prototype.padListArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.SpaceToBatchNdDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NHWC; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SpaceToBatchNdDescriptor.startSpaceToBatchNdDescriptor = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} blockShapeOffset + */ +armnnSerializer.SpaceToBatchNdDescriptor.addBlockShape = function(builder, blockShapeOffset) { + builder.addFieldOffset(0, blockShapeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SpaceToBatchNdDescriptor.createBlockShapeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.SpaceToBatchNdDescriptor.startBlockShapeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} padListOffset + */ +armnnSerializer.SpaceToBatchNdDescriptor.addPadList = function(builder, padListOffset) { + builder.addFieldOffset(1, padListOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SpaceToBatchNdDescriptor.createPadListVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.SpaceToBatchNdDescriptor.startPadListVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.SpaceToBatchNdDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(2, dataLayout, armnnSerializer.DataLayout.NHWC); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SpaceToBatchNdDescriptor.endSpaceToBatchNdDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} blockShapeOffset + * @param {flatbuffers.Offset} padListOffset + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SpaceToBatchNdDescriptor.createSpaceToBatchNdDescriptor = function(builder, blockShapeOffset, padListOffset, dataLayout) { + armnnSerializer.SpaceToBatchNdDescriptor.startSpaceToBatchNdDescriptor(builder); + armnnSerializer.SpaceToBatchNdDescriptor.addBlockShape(builder, blockShapeOffset); + armnnSerializer.SpaceToBatchNdDescriptor.addPadList(builder, padListOffset); + armnnSerializer.SpaceToBatchNdDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.SpaceToBatchNdDescriptor.endSpaceToBatchNdDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.SpaceToDepthLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SpaceToDepthLayer} + */ +armnnSerializer.SpaceToDepthLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SpaceToDepthLayer=} obj + * @returns {armnnSerializer.SpaceToDepthLayer} + */ +armnnSerializer.SpaceToDepthLayer.getRootAsSpaceToDepthLayer = function(bb, obj) { + return (obj || new armnnSerializer.SpaceToDepthLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SpaceToDepthLayer=} obj + * @returns {armnnSerializer.SpaceToDepthLayer} + */ +armnnSerializer.SpaceToDepthLayer.getSizePrefixedRootAsSpaceToDepthLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SpaceToDepthLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.SpaceToDepthLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.SpaceToDepthDescriptor=} obj + * @returns {armnnSerializer.SpaceToDepthDescriptor|null} + */ +armnnSerializer.SpaceToDepthLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.SpaceToDepthDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SpaceToDepthLayer.startSpaceToDepthLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.SpaceToDepthLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.SpaceToDepthLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SpaceToDepthLayer.endSpaceToDepthLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SpaceToDepthLayer.createSpaceToDepthLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.SpaceToDepthLayer.startSpaceToDepthLayer(builder); + armnnSerializer.SpaceToDepthLayer.addBase(builder, baseOffset); + armnnSerializer.SpaceToDepthLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.SpaceToDepthLayer.endSpaceToDepthLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.SpaceToDepthDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SpaceToDepthDescriptor} + */ +armnnSerializer.SpaceToDepthDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SpaceToDepthDescriptor=} obj + * @returns {armnnSerializer.SpaceToDepthDescriptor} + */ +armnnSerializer.SpaceToDepthDescriptor.getRootAsSpaceToDepthDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.SpaceToDepthDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SpaceToDepthDescriptor=} obj + * @returns {armnnSerializer.SpaceToDepthDescriptor} + */ +armnnSerializer.SpaceToDepthDescriptor.getSizePrefixedRootAsSpaceToDepthDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SpaceToDepthDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.SpaceToDepthDescriptor.prototype.blockSize = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.SpaceToDepthDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NHWC; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SpaceToDepthDescriptor.startSpaceToDepthDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} blockSize + */ +armnnSerializer.SpaceToDepthDescriptor.addBlockSize = function(builder, blockSize) { + builder.addFieldInt32(0, blockSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.SpaceToDepthDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(1, dataLayout, armnnSerializer.DataLayout.NHWC); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SpaceToDepthDescriptor.endSpaceToDepthDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} blockSize + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SpaceToDepthDescriptor.createSpaceToDepthDescriptor = function(builder, blockSize, dataLayout) { + armnnSerializer.SpaceToDepthDescriptor.startSpaceToDepthDescriptor(builder); + armnnSerializer.SpaceToDepthDescriptor.addBlockSize(builder, blockSize); + armnnSerializer.SpaceToDepthDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.SpaceToDepthDescriptor.endSpaceToDepthDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.SubtractionLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SubtractionLayer} + */ +armnnSerializer.SubtractionLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SubtractionLayer=} obj + * @returns {armnnSerializer.SubtractionLayer} + */ +armnnSerializer.SubtractionLayer.getRootAsSubtractionLayer = function(bb, obj) { + return (obj || new armnnSerializer.SubtractionLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SubtractionLayer=} obj + * @returns {armnnSerializer.SubtractionLayer} + */ +armnnSerializer.SubtractionLayer.getSizePrefixedRootAsSubtractionLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SubtractionLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.SubtractionLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SubtractionLayer.startSubtractionLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.SubtractionLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SubtractionLayer.endSubtractionLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SubtractionLayer.createSubtractionLayer = function(builder, baseOffset) { + armnnSerializer.SubtractionLayer.startSubtractionLayer(builder); + armnnSerializer.SubtractionLayer.addBase(builder, baseOffset); + return armnnSerializer.SubtractionLayer.endSubtractionLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.BatchToSpaceNdLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.BatchToSpaceNdLayer} + */ +armnnSerializer.BatchToSpaceNdLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.BatchToSpaceNdLayer=} obj + * @returns {armnnSerializer.BatchToSpaceNdLayer} + */ +armnnSerializer.BatchToSpaceNdLayer.getRootAsBatchToSpaceNdLayer = function(bb, obj) { + return (obj || new armnnSerializer.BatchToSpaceNdLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.BatchToSpaceNdLayer=} obj + * @returns {armnnSerializer.BatchToSpaceNdLayer} + */ +armnnSerializer.BatchToSpaceNdLayer.getSizePrefixedRootAsBatchToSpaceNdLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.BatchToSpaceNdLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.BatchToSpaceNdLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.BatchToSpaceNdDescriptor=} obj + * @returns {armnnSerializer.BatchToSpaceNdDescriptor|null} + */ +armnnSerializer.BatchToSpaceNdLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.BatchToSpaceNdDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.BatchToSpaceNdLayer.startBatchToSpaceNdLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.BatchToSpaceNdLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.BatchToSpaceNdLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BatchToSpaceNdLayer.endBatchToSpaceNdLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BatchToSpaceNdLayer.createBatchToSpaceNdLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.BatchToSpaceNdLayer.startBatchToSpaceNdLayer(builder); + armnnSerializer.BatchToSpaceNdLayer.addBase(builder, baseOffset); + armnnSerializer.BatchToSpaceNdLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.BatchToSpaceNdLayer.endBatchToSpaceNdLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.BatchToSpaceNdDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.BatchToSpaceNdDescriptor} + */ +armnnSerializer.BatchToSpaceNdDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.BatchToSpaceNdDescriptor=} obj + * @returns {armnnSerializer.BatchToSpaceNdDescriptor} + */ +armnnSerializer.BatchToSpaceNdDescriptor.getRootAsBatchToSpaceNdDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.BatchToSpaceNdDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.BatchToSpaceNdDescriptor=} obj + * @returns {armnnSerializer.BatchToSpaceNdDescriptor} + */ +armnnSerializer.BatchToSpaceNdDescriptor.getSizePrefixedRootAsBatchToSpaceNdDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.BatchToSpaceNdDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.BatchToSpaceNdDescriptor.prototype.blockShape = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.BatchToSpaceNdDescriptor.prototype.blockShapeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.BatchToSpaceNdDescriptor.prototype.blockShapeArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.BatchToSpaceNdDescriptor.prototype.crops = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.BatchToSpaceNdDescriptor.prototype.cropsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.BatchToSpaceNdDescriptor.prototype.cropsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.BatchToSpaceNdDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NHWC; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.BatchToSpaceNdDescriptor.startBatchToSpaceNdDescriptor = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} blockShapeOffset + */ +armnnSerializer.BatchToSpaceNdDescriptor.addBlockShape = function(builder, blockShapeOffset) { + builder.addFieldOffset(0, blockShapeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BatchToSpaceNdDescriptor.createBlockShapeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.BatchToSpaceNdDescriptor.startBlockShapeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cropsOffset + */ +armnnSerializer.BatchToSpaceNdDescriptor.addCrops = function(builder, cropsOffset) { + builder.addFieldOffset(1, cropsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BatchToSpaceNdDescriptor.createCropsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.BatchToSpaceNdDescriptor.startCropsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.BatchToSpaceNdDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(2, dataLayout, armnnSerializer.DataLayout.NHWC); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BatchToSpaceNdDescriptor.endBatchToSpaceNdDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} blockShapeOffset + * @param {flatbuffers.Offset} cropsOffset + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BatchToSpaceNdDescriptor.createBatchToSpaceNdDescriptor = function(builder, blockShapeOffset, cropsOffset, dataLayout) { + armnnSerializer.BatchToSpaceNdDescriptor.startBatchToSpaceNdDescriptor(builder); + armnnSerializer.BatchToSpaceNdDescriptor.addBlockShape(builder, blockShapeOffset); + armnnSerializer.BatchToSpaceNdDescriptor.addCrops(builder, cropsOffset); + armnnSerializer.BatchToSpaceNdDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.BatchToSpaceNdDescriptor.endBatchToSpaceNdDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.NormalizationLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.NormalizationLayer} + */ +armnnSerializer.NormalizationLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.NormalizationLayer=} obj + * @returns {armnnSerializer.NormalizationLayer} + */ +armnnSerializer.NormalizationLayer.getRootAsNormalizationLayer = function(bb, obj) { + return (obj || new armnnSerializer.NormalizationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.NormalizationLayer=} obj + * @returns {armnnSerializer.NormalizationLayer} + */ +armnnSerializer.NormalizationLayer.getSizePrefixedRootAsNormalizationLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.NormalizationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.NormalizationLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.NormalizationDescriptor=} obj + * @returns {armnnSerializer.NormalizationDescriptor|null} + */ +armnnSerializer.NormalizationLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.NormalizationDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.NormalizationLayer.startNormalizationLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.NormalizationLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.NormalizationLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.NormalizationLayer.endNormalizationLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.NormalizationLayer.createNormalizationLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.NormalizationLayer.startNormalizationLayer(builder); + armnnSerializer.NormalizationLayer.addBase(builder, baseOffset); + armnnSerializer.NormalizationLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.NormalizationLayer.endNormalizationLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.NormalizationDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.NormalizationDescriptor} + */ +armnnSerializer.NormalizationDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.NormalizationDescriptor=} obj + * @returns {armnnSerializer.NormalizationDescriptor} + */ +armnnSerializer.NormalizationDescriptor.getRootAsNormalizationDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.NormalizationDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.NormalizationDescriptor=} obj + * @returns {armnnSerializer.NormalizationDescriptor} + */ +armnnSerializer.NormalizationDescriptor.getSizePrefixedRootAsNormalizationDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.NormalizationDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {armnnSerializer.NormalizationAlgorithmChannel} + */ +armnnSerializer.NormalizationDescriptor.prototype.normChannelType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {armnnSerializer.NormalizationAlgorithmChannel} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.NormalizationAlgorithmChannel.Across; +}; + +/** + * @returns {armnnSerializer.NormalizationAlgorithmMethod} + */ +armnnSerializer.NormalizationDescriptor.prototype.normMethodType = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {armnnSerializer.NormalizationAlgorithmMethod} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.NormalizationAlgorithmMethod.LocalBrightness; +}; + +/** + * @returns {number} + */ +armnnSerializer.NormalizationDescriptor.prototype.normSize = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.NormalizationDescriptor.prototype.alpha = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.NormalizationDescriptor.prototype.beta = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.NormalizationDescriptor.prototype.k = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.NormalizationDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NCHW; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.NormalizationDescriptor.startNormalizationDescriptor = function(builder) { + builder.startObject(7); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.NormalizationAlgorithmChannel} normChannelType + */ +armnnSerializer.NormalizationDescriptor.addNormChannelType = function(builder, normChannelType) { + builder.addFieldInt8(0, normChannelType, armnnSerializer.NormalizationAlgorithmChannel.Across); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.NormalizationAlgorithmMethod} normMethodType + */ +armnnSerializer.NormalizationDescriptor.addNormMethodType = function(builder, normMethodType) { + builder.addFieldInt8(1, normMethodType, armnnSerializer.NormalizationAlgorithmMethod.LocalBrightness); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} normSize + */ +armnnSerializer.NormalizationDescriptor.addNormSize = function(builder, normSize) { + builder.addFieldInt32(2, normSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} alpha + */ +armnnSerializer.NormalizationDescriptor.addAlpha = function(builder, alpha) { + builder.addFieldFloat32(3, alpha, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + */ +armnnSerializer.NormalizationDescriptor.addBeta = function(builder, beta) { + builder.addFieldFloat32(4, beta, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} k + */ +armnnSerializer.NormalizationDescriptor.addK = function(builder, k) { + builder.addFieldFloat32(5, k, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.NormalizationDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(6, dataLayout, armnnSerializer.DataLayout.NCHW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.NormalizationDescriptor.endNormalizationDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.NormalizationAlgorithmChannel} normChannelType + * @param {armnnSerializer.NormalizationAlgorithmMethod} normMethodType + * @param {number} normSize + * @param {number} alpha + * @param {number} beta + * @param {number} k + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.NormalizationDescriptor.createNormalizationDescriptor = function(builder, normChannelType, normMethodType, normSize, alpha, beta, k, dataLayout) { + armnnSerializer.NormalizationDescriptor.startNormalizationDescriptor(builder); + armnnSerializer.NormalizationDescriptor.addNormChannelType(builder, normChannelType); + armnnSerializer.NormalizationDescriptor.addNormMethodType(builder, normMethodType); + armnnSerializer.NormalizationDescriptor.addNormSize(builder, normSize); + armnnSerializer.NormalizationDescriptor.addAlpha(builder, alpha); + armnnSerializer.NormalizationDescriptor.addBeta(builder, beta); + armnnSerializer.NormalizationDescriptor.addK(builder, k); + armnnSerializer.NormalizationDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.NormalizationDescriptor.endNormalizationDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.MeanLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.MeanLayer} + */ +armnnSerializer.MeanLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MeanLayer=} obj + * @returns {armnnSerializer.MeanLayer} + */ +armnnSerializer.MeanLayer.getRootAsMeanLayer = function(bb, obj) { + return (obj || new armnnSerializer.MeanLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MeanLayer=} obj + * @returns {armnnSerializer.MeanLayer} + */ +armnnSerializer.MeanLayer.getSizePrefixedRootAsMeanLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.MeanLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.MeanLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.MeanDescriptor=} obj + * @returns {armnnSerializer.MeanDescriptor|null} + */ +armnnSerializer.MeanLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.MeanDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.MeanLayer.startMeanLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.MeanLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.MeanLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MeanLayer.endMeanLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MeanLayer.createMeanLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.MeanLayer.startMeanLayer(builder); + armnnSerializer.MeanLayer.addBase(builder, baseOffset); + armnnSerializer.MeanLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.MeanLayer.endMeanLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.MeanDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.MeanDescriptor} + */ +armnnSerializer.MeanDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MeanDescriptor=} obj + * @returns {armnnSerializer.MeanDescriptor} + */ +armnnSerializer.MeanDescriptor.getRootAsMeanDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.MeanDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MeanDescriptor=} obj + * @returns {armnnSerializer.MeanDescriptor} + */ +armnnSerializer.MeanDescriptor.getSizePrefixedRootAsMeanDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.MeanDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.MeanDescriptor.prototype.axis = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.MeanDescriptor.prototype.axisLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.MeanDescriptor.prototype.axisArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.MeanDescriptor.prototype.keepDims = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.MeanDescriptor.startMeanDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} axisOffset + */ +armnnSerializer.MeanDescriptor.addAxis = function(builder, axisOffset) { + builder.addFieldOffset(0, axisOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MeanDescriptor.createAxisVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.MeanDescriptor.startAxisVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} keepDims + */ +armnnSerializer.MeanDescriptor.addKeepDims = function(builder, keepDims) { + builder.addFieldInt8(1, +keepDims, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MeanDescriptor.endMeanDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} axisOffset + * @param {boolean} keepDims + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MeanDescriptor.createMeanDescriptor = function(builder, axisOffset, keepDims) { + armnnSerializer.MeanDescriptor.startMeanDescriptor(builder); + armnnSerializer.MeanDescriptor.addAxis(builder, axisOffset); + armnnSerializer.MeanDescriptor.addKeepDims(builder, keepDims); + return armnnSerializer.MeanDescriptor.endMeanDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.PadLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.PadLayer} + */ +armnnSerializer.PadLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.PadLayer=} obj + * @returns {armnnSerializer.PadLayer} + */ +armnnSerializer.PadLayer.getRootAsPadLayer = function(bb, obj) { + return (obj || new armnnSerializer.PadLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.PadLayer=} obj + * @returns {armnnSerializer.PadLayer} + */ +armnnSerializer.PadLayer.getSizePrefixedRootAsPadLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.PadLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.PadLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.PadDescriptor=} obj + * @returns {armnnSerializer.PadDescriptor|null} + */ +armnnSerializer.PadLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.PadDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.PadLayer.startPadLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.PadLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.PadLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PadLayer.endPadLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PadLayer.createPadLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.PadLayer.startPadLayer(builder); + armnnSerializer.PadLayer.addBase(builder, baseOffset); + armnnSerializer.PadLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.PadLayer.endPadLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.PadDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.PadDescriptor} + */ +armnnSerializer.PadDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.PadDescriptor=} obj + * @returns {armnnSerializer.PadDescriptor} + */ +armnnSerializer.PadDescriptor.getRootAsPadDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.PadDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.PadDescriptor=} obj + * @returns {armnnSerializer.PadDescriptor} + */ +armnnSerializer.PadDescriptor.getSizePrefixedRootAsPadDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.PadDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.PadDescriptor.prototype.padList = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.PadDescriptor.prototype.padListLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.PadDescriptor.prototype.padListArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {number} + */ +armnnSerializer.PadDescriptor.prototype.padValue = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.PadDescriptor.startPadDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} padListOffset + */ +armnnSerializer.PadDescriptor.addPadList = function(builder, padListOffset) { + builder.addFieldOffset(0, padListOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PadDescriptor.createPadListVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.PadDescriptor.startPadListVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padValue + */ +armnnSerializer.PadDescriptor.addPadValue = function(builder, padValue) { + builder.addFieldFloat32(1, padValue, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PadDescriptor.endPadDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} padListOffset + * @param {number} padValue + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PadDescriptor.createPadDescriptor = function(builder, padListOffset, padValue) { + armnnSerializer.PadDescriptor.startPadDescriptor(builder); + armnnSerializer.PadDescriptor.addPadList(builder, padListOffset); + armnnSerializer.PadDescriptor.addPadValue(builder, padValue); + return armnnSerializer.PadDescriptor.endPadDescriptor(builder); +} + +/** + * @deprecated Use ElementwiseUnaryLayer instead + * + * @constructor + */ +armnnSerializer.RsqrtLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.RsqrtLayer} + */ +armnnSerializer.RsqrtLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.RsqrtLayer=} obj + * @returns {armnnSerializer.RsqrtLayer} + */ +armnnSerializer.RsqrtLayer.getRootAsRsqrtLayer = function(bb, obj) { + return (obj || new armnnSerializer.RsqrtLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.RsqrtLayer=} obj + * @returns {armnnSerializer.RsqrtLayer} + */ +armnnSerializer.RsqrtLayer.getSizePrefixedRootAsRsqrtLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.RsqrtLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.RsqrtLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.RsqrtLayer.startRsqrtLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.RsqrtLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.RsqrtLayer.endRsqrtLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.RsqrtLayer.createRsqrtLayer = function(builder, baseOffset) { + armnnSerializer.RsqrtLayer.startRsqrtLayer(builder); + armnnSerializer.RsqrtLayer.addBase(builder, baseOffset); + return armnnSerializer.RsqrtLayer.endRsqrtLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.BatchNormalizationLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.BatchNormalizationLayer} + */ +armnnSerializer.BatchNormalizationLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.BatchNormalizationLayer=} obj + * @returns {armnnSerializer.BatchNormalizationLayer} + */ +armnnSerializer.BatchNormalizationLayer.getRootAsBatchNormalizationLayer = function(bb, obj) { + return (obj || new armnnSerializer.BatchNormalizationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.BatchNormalizationLayer=} obj + * @returns {armnnSerializer.BatchNormalizationLayer} + */ +armnnSerializer.BatchNormalizationLayer.getSizePrefixedRootAsBatchNormalizationLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.BatchNormalizationLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.BatchNormalizationLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.BatchNormalizationDescriptor=} obj + * @returns {armnnSerializer.BatchNormalizationDescriptor|null} + */ +armnnSerializer.BatchNormalizationLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.BatchNormalizationDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.BatchNormalizationLayer.prototype.mean = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.BatchNormalizationLayer.prototype.variance = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.BatchNormalizationLayer.prototype.beta = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.BatchNormalizationLayer.prototype.gamma = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.BatchNormalizationLayer.startBatchNormalizationLayer = function(builder) { + builder.startObject(6); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.BatchNormalizationLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.BatchNormalizationLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} meanOffset + */ +armnnSerializer.BatchNormalizationLayer.addMean = function(builder, meanOffset) { + builder.addFieldOffset(2, meanOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} varianceOffset + */ +armnnSerializer.BatchNormalizationLayer.addVariance = function(builder, varianceOffset) { + builder.addFieldOffset(3, varianceOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} betaOffset + */ +armnnSerializer.BatchNormalizationLayer.addBeta = function(builder, betaOffset) { + builder.addFieldOffset(4, betaOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} gammaOffset + */ +armnnSerializer.BatchNormalizationLayer.addGamma = function(builder, gammaOffset) { + builder.addFieldOffset(5, gammaOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BatchNormalizationLayer.endBatchNormalizationLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @param {flatbuffers.Offset} meanOffset + * @param {flatbuffers.Offset} varianceOffset + * @param {flatbuffers.Offset} betaOffset + * @param {flatbuffers.Offset} gammaOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BatchNormalizationLayer.createBatchNormalizationLayer = function(builder, baseOffset, descriptorOffset, meanOffset, varianceOffset, betaOffset, gammaOffset) { + armnnSerializer.BatchNormalizationLayer.startBatchNormalizationLayer(builder); + armnnSerializer.BatchNormalizationLayer.addBase(builder, baseOffset); + armnnSerializer.BatchNormalizationLayer.addDescriptor(builder, descriptorOffset); + armnnSerializer.BatchNormalizationLayer.addMean(builder, meanOffset); + armnnSerializer.BatchNormalizationLayer.addVariance(builder, varianceOffset); + armnnSerializer.BatchNormalizationLayer.addBeta(builder, betaOffset); + armnnSerializer.BatchNormalizationLayer.addGamma(builder, gammaOffset); + return armnnSerializer.BatchNormalizationLayer.endBatchNormalizationLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.BatchNormalizationDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.BatchNormalizationDescriptor} + */ +armnnSerializer.BatchNormalizationDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.BatchNormalizationDescriptor=} obj + * @returns {armnnSerializer.BatchNormalizationDescriptor} + */ +armnnSerializer.BatchNormalizationDescriptor.getRootAsBatchNormalizationDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.BatchNormalizationDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.BatchNormalizationDescriptor=} obj + * @returns {armnnSerializer.BatchNormalizationDescriptor} + */ +armnnSerializer.BatchNormalizationDescriptor.getSizePrefixedRootAsBatchNormalizationDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.BatchNormalizationDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.BatchNormalizationDescriptor.prototype.eps = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.BatchNormalizationDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NHWC; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.BatchNormalizationDescriptor.startBatchNormalizationDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} eps + */ +armnnSerializer.BatchNormalizationDescriptor.addEps = function(builder, eps) { + builder.addFieldFloat32(0, eps, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.BatchNormalizationDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(1, dataLayout, armnnSerializer.DataLayout.NHWC); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BatchNormalizationDescriptor.endBatchNormalizationDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} eps + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.BatchNormalizationDescriptor.createBatchNormalizationDescriptor = function(builder, eps, dataLayout) { + armnnSerializer.BatchNormalizationDescriptor.startBatchNormalizationDescriptor(builder); + armnnSerializer.BatchNormalizationDescriptor.addEps(builder, eps); + armnnSerializer.BatchNormalizationDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.BatchNormalizationDescriptor.endBatchNormalizationDescriptor(builder); +} + +/** + * @deprecated Use ResizeLayer instead + * + * @constructor + */ +armnnSerializer.ResizeBilinearLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ResizeBilinearLayer} + */ +armnnSerializer.ResizeBilinearLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ResizeBilinearLayer=} obj + * @returns {armnnSerializer.ResizeBilinearLayer} + */ +armnnSerializer.ResizeBilinearLayer.getRootAsResizeBilinearLayer = function(bb, obj) { + return (obj || new armnnSerializer.ResizeBilinearLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ResizeBilinearLayer=} obj + * @returns {armnnSerializer.ResizeBilinearLayer} + */ +armnnSerializer.ResizeBilinearLayer.getSizePrefixedRootAsResizeBilinearLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ResizeBilinearLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.ResizeBilinearLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ResizeBilinearDescriptor=} obj + * @returns {armnnSerializer.ResizeBilinearDescriptor|null} + */ +armnnSerializer.ResizeBilinearLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ResizeBilinearDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ResizeBilinearLayer.startResizeBilinearLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.ResizeBilinearLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.ResizeBilinearLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ResizeBilinearLayer.endResizeBilinearLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ResizeBilinearLayer.createResizeBilinearLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.ResizeBilinearLayer.startResizeBilinearLayer(builder); + armnnSerializer.ResizeBilinearLayer.addBase(builder, baseOffset); + armnnSerializer.ResizeBilinearLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.ResizeBilinearLayer.endResizeBilinearLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.ResizeBilinearDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ResizeBilinearDescriptor} + */ +armnnSerializer.ResizeBilinearDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ResizeBilinearDescriptor=} obj + * @returns {armnnSerializer.ResizeBilinearDescriptor} + */ +armnnSerializer.ResizeBilinearDescriptor.getRootAsResizeBilinearDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.ResizeBilinearDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ResizeBilinearDescriptor=} obj + * @returns {armnnSerializer.ResizeBilinearDescriptor} + */ +armnnSerializer.ResizeBilinearDescriptor.getSizePrefixedRootAsResizeBilinearDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ResizeBilinearDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.ResizeBilinearDescriptor.prototype.targetWidth = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.ResizeBilinearDescriptor.prototype.targetHeight = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.ResizeBilinearDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NHWC; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ResizeBilinearDescriptor.startResizeBilinearDescriptor = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} targetWidth + */ +armnnSerializer.ResizeBilinearDescriptor.addTargetWidth = function(builder, targetWidth) { + builder.addFieldInt32(0, targetWidth, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} targetHeight + */ +armnnSerializer.ResizeBilinearDescriptor.addTargetHeight = function(builder, targetHeight) { + builder.addFieldInt32(1, targetHeight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.ResizeBilinearDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(2, dataLayout, armnnSerializer.DataLayout.NHWC); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ResizeBilinearDescriptor.endResizeBilinearDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} targetWidth + * @param {number} targetHeight + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ResizeBilinearDescriptor.createResizeBilinearDescriptor = function(builder, targetWidth, targetHeight, dataLayout) { + armnnSerializer.ResizeBilinearDescriptor.startResizeBilinearDescriptor(builder); + armnnSerializer.ResizeBilinearDescriptor.addTargetWidth(builder, targetWidth); + armnnSerializer.ResizeBilinearDescriptor.addTargetHeight(builder, targetHeight); + armnnSerializer.ResizeBilinearDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.ResizeBilinearDescriptor.endResizeBilinearDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.SliceLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SliceLayer} + */ +armnnSerializer.SliceLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SliceLayer=} obj + * @returns {armnnSerializer.SliceLayer} + */ +armnnSerializer.SliceLayer.getRootAsSliceLayer = function(bb, obj) { + return (obj || new armnnSerializer.SliceLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SliceLayer=} obj + * @returns {armnnSerializer.SliceLayer} + */ +armnnSerializer.SliceLayer.getSizePrefixedRootAsSliceLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SliceLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.SliceLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.SliceDescriptor=} obj + * @returns {armnnSerializer.SliceDescriptor|null} + */ +armnnSerializer.SliceLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.SliceDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SliceLayer.startSliceLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.SliceLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.SliceLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SliceLayer.endSliceLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SliceLayer.createSliceLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.SliceLayer.startSliceLayer(builder); + armnnSerializer.SliceLayer.addBase(builder, baseOffset); + armnnSerializer.SliceLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.SliceLayer.endSliceLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.SliceDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SliceDescriptor} + */ +armnnSerializer.SliceDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SliceDescriptor=} obj + * @returns {armnnSerializer.SliceDescriptor} + */ +armnnSerializer.SliceDescriptor.getRootAsSliceDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.SliceDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SliceDescriptor=} obj + * @returns {armnnSerializer.SliceDescriptor} + */ +armnnSerializer.SliceDescriptor.getSizePrefixedRootAsSliceDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SliceDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.SliceDescriptor.prototype.begin = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.SliceDescriptor.prototype.beginLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.SliceDescriptor.prototype.beginArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.SliceDescriptor.prototype.size = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.SliceDescriptor.prototype.sizeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.SliceDescriptor.prototype.sizeArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SliceDescriptor.startSliceDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} beginOffset + */ +armnnSerializer.SliceDescriptor.addBegin = function(builder, beginOffset) { + builder.addFieldOffset(0, beginOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SliceDescriptor.createBeginVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.SliceDescriptor.startBeginVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} sizeOffset + */ +armnnSerializer.SliceDescriptor.addSize = function(builder, sizeOffset) { + builder.addFieldOffset(1, sizeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SliceDescriptor.createSizeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.SliceDescriptor.startSizeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SliceDescriptor.endSliceDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} beginOffset + * @param {flatbuffers.Offset} sizeOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SliceDescriptor.createSliceDescriptor = function(builder, beginOffset, sizeOffset) { + armnnSerializer.SliceDescriptor.startSliceDescriptor(builder); + armnnSerializer.SliceDescriptor.addBegin(builder, beginOffset); + armnnSerializer.SliceDescriptor.addSize(builder, sizeOffset); + return armnnSerializer.SliceDescriptor.endSliceDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.StridedSliceLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.StridedSliceLayer} + */ +armnnSerializer.StridedSliceLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StridedSliceLayer=} obj + * @returns {armnnSerializer.StridedSliceLayer} + */ +armnnSerializer.StridedSliceLayer.getRootAsStridedSliceLayer = function(bb, obj) { + return (obj || new armnnSerializer.StridedSliceLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StridedSliceLayer=} obj + * @returns {armnnSerializer.StridedSliceLayer} + */ +armnnSerializer.StridedSliceLayer.getSizePrefixedRootAsStridedSliceLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.StridedSliceLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.StridedSliceLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.StridedSliceDescriptor=} obj + * @returns {armnnSerializer.StridedSliceDescriptor|null} + */ +armnnSerializer.StridedSliceLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.StridedSliceDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.StridedSliceLayer.startStridedSliceLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.StridedSliceLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.StridedSliceLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StridedSliceLayer.endStridedSliceLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StridedSliceLayer.createStridedSliceLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.StridedSliceLayer.startStridedSliceLayer(builder); + armnnSerializer.StridedSliceLayer.addBase(builder, baseOffset); + armnnSerializer.StridedSliceLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.StridedSliceLayer.endStridedSliceLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.StridedSliceDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.StridedSliceDescriptor} + */ +armnnSerializer.StridedSliceDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StridedSliceDescriptor=} obj + * @returns {armnnSerializer.StridedSliceDescriptor} + */ +armnnSerializer.StridedSliceDescriptor.getRootAsStridedSliceDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.StridedSliceDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StridedSliceDescriptor=} obj + * @returns {armnnSerializer.StridedSliceDescriptor} + */ +armnnSerializer.StridedSliceDescriptor.getSizePrefixedRootAsStridedSliceDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.StridedSliceDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.StridedSliceDescriptor.prototype.begin = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.StridedSliceDescriptor.prototype.beginLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +armnnSerializer.StridedSliceDescriptor.prototype.beginArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.StridedSliceDescriptor.prototype.end = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.StridedSliceDescriptor.prototype.endLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +armnnSerializer.StridedSliceDescriptor.prototype.endArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.StridedSliceDescriptor.prototype.stride = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.StridedSliceDescriptor.prototype.strideLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +armnnSerializer.StridedSliceDescriptor.prototype.strideArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {number} + */ +armnnSerializer.StridedSliceDescriptor.prototype.beginMask = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.StridedSliceDescriptor.prototype.endMask = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.StridedSliceDescriptor.prototype.shrinkAxisMask = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.StridedSliceDescriptor.prototype.ellipsisMask = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.StridedSliceDescriptor.prototype.newAxisMask = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.StridedSliceDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NHWC; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.StridedSliceDescriptor.startStridedSliceDescriptor = function(builder) { + builder.startObject(9); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} beginOffset + */ +armnnSerializer.StridedSliceDescriptor.addBegin = function(builder, beginOffset) { + builder.addFieldOffset(0, beginOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StridedSliceDescriptor.createBeginVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.StridedSliceDescriptor.startBeginVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} endOffset + */ +armnnSerializer.StridedSliceDescriptor.addEnd = function(builder, endOffset) { + builder.addFieldOffset(1, endOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StridedSliceDescriptor.createEndVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.StridedSliceDescriptor.startEndVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} strideOffset + */ +armnnSerializer.StridedSliceDescriptor.addStride = function(builder, strideOffset) { + builder.addFieldOffset(2, strideOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StridedSliceDescriptor.createStrideVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.StridedSliceDescriptor.startStrideVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beginMask + */ +armnnSerializer.StridedSliceDescriptor.addBeginMask = function(builder, beginMask) { + builder.addFieldInt32(3, beginMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} endMask + */ +armnnSerializer.StridedSliceDescriptor.addEndMask = function(builder, endMask) { + builder.addFieldInt32(4, endMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} shrinkAxisMask + */ +armnnSerializer.StridedSliceDescriptor.addShrinkAxisMask = function(builder, shrinkAxisMask) { + builder.addFieldInt32(5, shrinkAxisMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} ellipsisMask + */ +armnnSerializer.StridedSliceDescriptor.addEllipsisMask = function(builder, ellipsisMask) { + builder.addFieldInt32(6, ellipsisMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} newAxisMask + */ +armnnSerializer.StridedSliceDescriptor.addNewAxisMask = function(builder, newAxisMask) { + builder.addFieldInt32(7, newAxisMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.StridedSliceDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(8, dataLayout, armnnSerializer.DataLayout.NHWC); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StridedSliceDescriptor.endStridedSliceDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} beginOffset + * @param {flatbuffers.Offset} endOffset + * @param {flatbuffers.Offset} strideOffset + * @param {number} beginMask + * @param {number} endMask + * @param {number} shrinkAxisMask + * @param {number} ellipsisMask + * @param {number} newAxisMask + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StridedSliceDescriptor.createStridedSliceDescriptor = function(builder, beginOffset, endOffset, strideOffset, beginMask, endMask, shrinkAxisMask, ellipsisMask, newAxisMask, dataLayout) { + armnnSerializer.StridedSliceDescriptor.startStridedSliceDescriptor(builder); + armnnSerializer.StridedSliceDescriptor.addBegin(builder, beginOffset); + armnnSerializer.StridedSliceDescriptor.addEnd(builder, endOffset); + armnnSerializer.StridedSliceDescriptor.addStride(builder, strideOffset); + armnnSerializer.StridedSliceDescriptor.addBeginMask(builder, beginMask); + armnnSerializer.StridedSliceDescriptor.addEndMask(builder, endMask); + armnnSerializer.StridedSliceDescriptor.addShrinkAxisMask(builder, shrinkAxisMask); + armnnSerializer.StridedSliceDescriptor.addEllipsisMask(builder, ellipsisMask); + armnnSerializer.StridedSliceDescriptor.addNewAxisMask(builder, newAxisMask); + armnnSerializer.StridedSliceDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.StridedSliceDescriptor.endStridedSliceDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.ConcatLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ConcatLayer} + */ +armnnSerializer.ConcatLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ConcatLayer=} obj + * @returns {armnnSerializer.ConcatLayer} + */ +armnnSerializer.ConcatLayer.getRootAsConcatLayer = function(bb, obj) { + return (obj || new armnnSerializer.ConcatLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ConcatLayer=} obj + * @returns {armnnSerializer.ConcatLayer} + */ +armnnSerializer.ConcatLayer.getSizePrefixedRootAsConcatLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ConcatLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.ConcatLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.OriginsDescriptor=} obj + * @returns {armnnSerializer.OriginsDescriptor|null} + */ +armnnSerializer.ConcatLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.OriginsDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ConcatLayer.startConcatLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.ConcatLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.ConcatLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ConcatLayer.endConcatLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ConcatLayer.createConcatLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.ConcatLayer.startConcatLayer(builder); + armnnSerializer.ConcatLayer.addBase(builder, baseOffset); + armnnSerializer.ConcatLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.ConcatLayer.endConcatLayer(builder); +} + +/** + * @deprecated Use ConcatLayer instead + * + * @constructor + */ +armnnSerializer.MergerLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.MergerLayer} + */ +armnnSerializer.MergerLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MergerLayer=} obj + * @returns {armnnSerializer.MergerLayer} + */ +armnnSerializer.MergerLayer.getRootAsMergerLayer = function(bb, obj) { + return (obj || new armnnSerializer.MergerLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MergerLayer=} obj + * @returns {armnnSerializer.MergerLayer} + */ +armnnSerializer.MergerLayer.getSizePrefixedRootAsMergerLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.MergerLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.MergerLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.OriginsDescriptor=} obj + * @returns {armnnSerializer.OriginsDescriptor|null} + */ +armnnSerializer.MergerLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.OriginsDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.MergerLayer.startMergerLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.MergerLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.MergerLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MergerLayer.endMergerLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MergerLayer.createMergerLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.MergerLayer.startMergerLayer(builder); + armnnSerializer.MergerLayer.addBase(builder, baseOffset); + armnnSerializer.MergerLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.MergerLayer.endMergerLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.UintVector = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.UintVector} + */ +armnnSerializer.UintVector.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.UintVector=} obj + * @returns {armnnSerializer.UintVector} + */ +armnnSerializer.UintVector.getRootAsUintVector = function(bb, obj) { + return (obj || new armnnSerializer.UintVector).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.UintVector=} obj + * @returns {armnnSerializer.UintVector} + */ +armnnSerializer.UintVector.getSizePrefixedRootAsUintVector = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.UintVector).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.UintVector.prototype.data = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.UintVector.prototype.dataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.UintVector.prototype.dataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.UintVector.startUintVector = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + */ +armnnSerializer.UintVector.addData = function(builder, dataOffset) { + builder.addFieldOffset(0, dataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.UintVector.createDataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.UintVector.startDataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.UintVector.endUintVector = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.UintVector.createUintVector = function(builder, dataOffset) { + armnnSerializer.UintVector.startUintVector(builder); + armnnSerializer.UintVector.addData(builder, dataOffset); + return armnnSerializer.UintVector.endUintVector(builder); +} + +/** + * @constructor + */ +armnnSerializer.OriginsDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.OriginsDescriptor} + */ +armnnSerializer.OriginsDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.OriginsDescriptor=} obj + * @returns {armnnSerializer.OriginsDescriptor} + */ +armnnSerializer.OriginsDescriptor.getRootAsOriginsDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.OriginsDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.OriginsDescriptor=} obj + * @returns {armnnSerializer.OriginsDescriptor} + */ +armnnSerializer.OriginsDescriptor.getSizePrefixedRootAsOriginsDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.OriginsDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.OriginsDescriptor.prototype.concatAxis = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.OriginsDescriptor.prototype.numViews = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.OriginsDescriptor.prototype.numDimensions = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @param {armnnSerializer.UintVector=} obj + * @returns {armnnSerializer.UintVector} + */ +armnnSerializer.OriginsDescriptor.prototype.viewOrigins = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new armnnSerializer.UintVector).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +armnnSerializer.OriginsDescriptor.prototype.viewOriginsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.OriginsDescriptor.startOriginsDescriptor = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} concatAxis + */ +armnnSerializer.OriginsDescriptor.addConcatAxis = function(builder, concatAxis) { + builder.addFieldInt32(0, concatAxis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numViews + */ +armnnSerializer.OriginsDescriptor.addNumViews = function(builder, numViews) { + builder.addFieldInt32(1, numViews, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numDimensions + */ +armnnSerializer.OriginsDescriptor.addNumDimensions = function(builder, numDimensions) { + builder.addFieldInt32(2, numDimensions, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} viewOriginsOffset + */ +armnnSerializer.OriginsDescriptor.addViewOrigins = function(builder, viewOriginsOffset) { + builder.addFieldOffset(3, viewOriginsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.OriginsDescriptor.createViewOriginsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.OriginsDescriptor.startViewOriginsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.OriginsDescriptor.endOriginsDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} concatAxis + * @param {number} numViews + * @param {number} numDimensions + * @param {flatbuffers.Offset} viewOriginsOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.OriginsDescriptor.createOriginsDescriptor = function(builder, concatAxis, numViews, numDimensions, viewOriginsOffset) { + armnnSerializer.OriginsDescriptor.startOriginsDescriptor(builder); + armnnSerializer.OriginsDescriptor.addConcatAxis(builder, concatAxis); + armnnSerializer.OriginsDescriptor.addNumViews(builder, numViews); + armnnSerializer.OriginsDescriptor.addNumDimensions(builder, numDimensions); + armnnSerializer.OriginsDescriptor.addViewOrigins(builder, viewOriginsOffset); + return armnnSerializer.OriginsDescriptor.endOriginsDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.ViewsDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ViewsDescriptor} + */ +armnnSerializer.ViewsDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ViewsDescriptor=} obj + * @returns {armnnSerializer.ViewsDescriptor} + */ +armnnSerializer.ViewsDescriptor.getRootAsViewsDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.ViewsDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ViewsDescriptor=} obj + * @returns {armnnSerializer.ViewsDescriptor} + */ +armnnSerializer.ViewsDescriptor.getSizePrefixedRootAsViewsDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ViewsDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.OriginsDescriptor=} obj + * @returns {armnnSerializer.OriginsDescriptor|null} + */ +armnnSerializer.ViewsDescriptor.prototype.origins = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.OriginsDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {number} index + * @param {armnnSerializer.UintVector=} obj + * @returns {armnnSerializer.UintVector} + */ +armnnSerializer.ViewsDescriptor.prototype.viewSizes = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.UintVector).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +armnnSerializer.ViewsDescriptor.prototype.viewSizesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ViewsDescriptor.startViewsDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} originsOffset + */ +armnnSerializer.ViewsDescriptor.addOrigins = function(builder, originsOffset) { + builder.addFieldOffset(0, originsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} viewSizesOffset + */ +armnnSerializer.ViewsDescriptor.addViewSizes = function(builder, viewSizesOffset) { + builder.addFieldOffset(1, viewSizesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ViewsDescriptor.createViewSizesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.ViewsDescriptor.startViewSizesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ViewsDescriptor.endViewsDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} originsOffset + * @param {flatbuffers.Offset} viewSizesOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ViewsDescriptor.createViewsDescriptor = function(builder, originsOffset, viewSizesOffset) { + armnnSerializer.ViewsDescriptor.startViewsDescriptor(builder); + armnnSerializer.ViewsDescriptor.addOrigins(builder, originsOffset); + armnnSerializer.ViewsDescriptor.addViewSizes(builder, viewSizesOffset); + return armnnSerializer.ViewsDescriptor.endViewsDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.SplitterLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SplitterLayer} + */ +armnnSerializer.SplitterLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SplitterLayer=} obj + * @returns {armnnSerializer.SplitterLayer} + */ +armnnSerializer.SplitterLayer.getRootAsSplitterLayer = function(bb, obj) { + return (obj || new armnnSerializer.SplitterLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SplitterLayer=} obj + * @returns {armnnSerializer.SplitterLayer} + */ +armnnSerializer.SplitterLayer.getSizePrefixedRootAsSplitterLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SplitterLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.SplitterLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ViewsDescriptor=} obj + * @returns {armnnSerializer.ViewsDescriptor|null} + */ +armnnSerializer.SplitterLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ViewsDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SplitterLayer.startSplitterLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.SplitterLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.SplitterLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SplitterLayer.endSplitterLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SplitterLayer.createSplitterLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.SplitterLayer.startSplitterLayer(builder); + armnnSerializer.SplitterLayer.addBase(builder, baseOffset); + armnnSerializer.SplitterLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.SplitterLayer.endSplitterLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.DetectionPostProcessLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.DetectionPostProcessLayer} + */ +armnnSerializer.DetectionPostProcessLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DetectionPostProcessLayer=} obj + * @returns {armnnSerializer.DetectionPostProcessLayer} + */ +armnnSerializer.DetectionPostProcessLayer.getRootAsDetectionPostProcessLayer = function(bb, obj) { + return (obj || new armnnSerializer.DetectionPostProcessLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DetectionPostProcessLayer=} obj + * @returns {armnnSerializer.DetectionPostProcessLayer} + */ +armnnSerializer.DetectionPostProcessLayer.getSizePrefixedRootAsDetectionPostProcessLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.DetectionPostProcessLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.DetectionPostProcessLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.DetectionPostProcessDescriptor=} obj + * @returns {armnnSerializer.DetectionPostProcessDescriptor|null} + */ +armnnSerializer.DetectionPostProcessLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.DetectionPostProcessDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.DetectionPostProcessLayer.prototype.anchors = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.DetectionPostProcessLayer.startDetectionPostProcessLayer = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.DetectionPostProcessLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.DetectionPostProcessLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} anchorsOffset + */ +armnnSerializer.DetectionPostProcessLayer.addAnchors = function(builder, anchorsOffset) { + builder.addFieldOffset(2, anchorsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DetectionPostProcessLayer.endDetectionPostProcessLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @param {flatbuffers.Offset} anchorsOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DetectionPostProcessLayer.createDetectionPostProcessLayer = function(builder, baseOffset, descriptorOffset, anchorsOffset) { + armnnSerializer.DetectionPostProcessLayer.startDetectionPostProcessLayer(builder); + armnnSerializer.DetectionPostProcessLayer.addBase(builder, baseOffset); + armnnSerializer.DetectionPostProcessLayer.addDescriptor(builder, descriptorOffset); + armnnSerializer.DetectionPostProcessLayer.addAnchors(builder, anchorsOffset); + return armnnSerializer.DetectionPostProcessLayer.endDetectionPostProcessLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.DetectionPostProcessDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.DetectionPostProcessDescriptor} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DetectionPostProcessDescriptor=} obj + * @returns {armnnSerializer.DetectionPostProcessDescriptor} + */ +armnnSerializer.DetectionPostProcessDescriptor.getRootAsDetectionPostProcessDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.DetectionPostProcessDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DetectionPostProcessDescriptor=} obj + * @returns {armnnSerializer.DetectionPostProcessDescriptor} + */ +armnnSerializer.DetectionPostProcessDescriptor.getSizePrefixedRootAsDetectionPostProcessDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.DetectionPostProcessDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.maxDetections = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.maxClassesPerDetection = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.detectionsPerClass = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.nmsScoreThreshold = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.nmsIouThreshold = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.numClasses = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.useRegularNms = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {number} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.scaleX = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.scaleY = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.scaleW = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.DetectionPostProcessDescriptor.prototype.scaleH = function() { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.DetectionPostProcessDescriptor.startDetectionPostProcessDescriptor = function(builder) { + builder.startObject(11); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} maxDetections + */ +armnnSerializer.DetectionPostProcessDescriptor.addMaxDetections = function(builder, maxDetections) { + builder.addFieldInt32(0, maxDetections, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} maxClassesPerDetection + */ +armnnSerializer.DetectionPostProcessDescriptor.addMaxClassesPerDetection = function(builder, maxClassesPerDetection) { + builder.addFieldInt32(1, maxClassesPerDetection, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} detectionsPerClass + */ +armnnSerializer.DetectionPostProcessDescriptor.addDetectionsPerClass = function(builder, detectionsPerClass) { + builder.addFieldInt32(2, detectionsPerClass, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} nmsScoreThreshold + */ +armnnSerializer.DetectionPostProcessDescriptor.addNmsScoreThreshold = function(builder, nmsScoreThreshold) { + builder.addFieldFloat32(3, nmsScoreThreshold, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} nmsIouThreshold + */ +armnnSerializer.DetectionPostProcessDescriptor.addNmsIouThreshold = function(builder, nmsIouThreshold) { + builder.addFieldFloat32(4, nmsIouThreshold, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numClasses + */ +armnnSerializer.DetectionPostProcessDescriptor.addNumClasses = function(builder, numClasses) { + builder.addFieldInt32(5, numClasses, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} useRegularNms + */ +armnnSerializer.DetectionPostProcessDescriptor.addUseRegularNms = function(builder, useRegularNms) { + builder.addFieldInt8(6, +useRegularNms, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} scaleX + */ +armnnSerializer.DetectionPostProcessDescriptor.addScaleX = function(builder, scaleX) { + builder.addFieldFloat32(7, scaleX, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} scaleY + */ +armnnSerializer.DetectionPostProcessDescriptor.addScaleY = function(builder, scaleY) { + builder.addFieldFloat32(8, scaleY, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} scaleW + */ +armnnSerializer.DetectionPostProcessDescriptor.addScaleW = function(builder, scaleW) { + builder.addFieldFloat32(9, scaleW, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} scaleH + */ +armnnSerializer.DetectionPostProcessDescriptor.addScaleH = function(builder, scaleH) { + builder.addFieldFloat32(10, scaleH, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DetectionPostProcessDescriptor.endDetectionPostProcessDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} maxDetections + * @param {number} maxClassesPerDetection + * @param {number} detectionsPerClass + * @param {number} nmsScoreThreshold + * @param {number} nmsIouThreshold + * @param {number} numClasses + * @param {boolean} useRegularNms + * @param {number} scaleX + * @param {number} scaleY + * @param {number} scaleW + * @param {number} scaleH + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DetectionPostProcessDescriptor.createDetectionPostProcessDescriptor = function(builder, maxDetections, maxClassesPerDetection, detectionsPerClass, nmsScoreThreshold, nmsIouThreshold, numClasses, useRegularNms, scaleX, scaleY, scaleW, scaleH) { + armnnSerializer.DetectionPostProcessDescriptor.startDetectionPostProcessDescriptor(builder); + armnnSerializer.DetectionPostProcessDescriptor.addMaxDetections(builder, maxDetections); + armnnSerializer.DetectionPostProcessDescriptor.addMaxClassesPerDetection(builder, maxClassesPerDetection); + armnnSerializer.DetectionPostProcessDescriptor.addDetectionsPerClass(builder, detectionsPerClass); + armnnSerializer.DetectionPostProcessDescriptor.addNmsScoreThreshold(builder, nmsScoreThreshold); + armnnSerializer.DetectionPostProcessDescriptor.addNmsIouThreshold(builder, nmsIouThreshold); + armnnSerializer.DetectionPostProcessDescriptor.addNumClasses(builder, numClasses); + armnnSerializer.DetectionPostProcessDescriptor.addUseRegularNms(builder, useRegularNms); + armnnSerializer.DetectionPostProcessDescriptor.addScaleX(builder, scaleX); + armnnSerializer.DetectionPostProcessDescriptor.addScaleY(builder, scaleY); + armnnSerializer.DetectionPostProcessDescriptor.addScaleW(builder, scaleW); + armnnSerializer.DetectionPostProcessDescriptor.addScaleH(builder, scaleH); + return armnnSerializer.DetectionPostProcessDescriptor.endDetectionPostProcessDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.LstmInputParams = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.LstmInputParams} + */ +armnnSerializer.LstmInputParams.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LstmInputParams=} obj + * @returns {armnnSerializer.LstmInputParams} + */ +armnnSerializer.LstmInputParams.getRootAsLstmInputParams = function(bb, obj) { + return (obj || new armnnSerializer.LstmInputParams).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LstmInputParams=} obj + * @returns {armnnSerializer.LstmInputParams} + */ +armnnSerializer.LstmInputParams.getSizePrefixedRootAsLstmInputParams = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.LstmInputParams).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.inputToForgetWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.inputToCellWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.inputToOutputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.recurrentToForgetWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.recurrentToCellWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.recurrentToOutputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.forgetGateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.cellBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.outputGateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.inputToInputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.recurrentToInputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.cellToInputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 26); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.inputGateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 28); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.projectionWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 30); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.projectionBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 32); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.cellToForgetWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 34); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.cellToOutputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 36); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.inputLayerNormWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 38); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.forgetLayerNormWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 40); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.cellLayerNormWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 42); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.LstmInputParams.prototype.outputLayerNormWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 44); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.LstmInputParams.startLstmInputParams = function(builder) { + builder.startObject(21); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToForgetWeightsOffset + */ +armnnSerializer.LstmInputParams.addInputToForgetWeights = function(builder, inputToForgetWeightsOffset) { + builder.addFieldOffset(0, inputToForgetWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToCellWeightsOffset + */ +armnnSerializer.LstmInputParams.addInputToCellWeights = function(builder, inputToCellWeightsOffset) { + builder.addFieldOffset(1, inputToCellWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToOutputWeightsOffset + */ +armnnSerializer.LstmInputParams.addInputToOutputWeights = function(builder, inputToOutputWeightsOffset) { + builder.addFieldOffset(2, inputToOutputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToForgetWeightsOffset + */ +armnnSerializer.LstmInputParams.addRecurrentToForgetWeights = function(builder, recurrentToForgetWeightsOffset) { + builder.addFieldOffset(3, recurrentToForgetWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToCellWeightsOffset + */ +armnnSerializer.LstmInputParams.addRecurrentToCellWeights = function(builder, recurrentToCellWeightsOffset) { + builder.addFieldOffset(4, recurrentToCellWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToOutputWeightsOffset + */ +armnnSerializer.LstmInputParams.addRecurrentToOutputWeights = function(builder, recurrentToOutputWeightsOffset) { + builder.addFieldOffset(5, recurrentToOutputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} forgetGateBiasOffset + */ +armnnSerializer.LstmInputParams.addForgetGateBias = function(builder, forgetGateBiasOffset) { + builder.addFieldOffset(6, forgetGateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cellBiasOffset + */ +armnnSerializer.LstmInputParams.addCellBias = function(builder, cellBiasOffset) { + builder.addFieldOffset(7, cellBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputGateBiasOffset + */ +armnnSerializer.LstmInputParams.addOutputGateBias = function(builder, outputGateBiasOffset) { + builder.addFieldOffset(8, outputGateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToInputWeightsOffset + */ +armnnSerializer.LstmInputParams.addInputToInputWeights = function(builder, inputToInputWeightsOffset) { + builder.addFieldOffset(9, inputToInputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToInputWeightsOffset + */ +armnnSerializer.LstmInputParams.addRecurrentToInputWeights = function(builder, recurrentToInputWeightsOffset) { + builder.addFieldOffset(10, recurrentToInputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cellToInputWeightsOffset + */ +armnnSerializer.LstmInputParams.addCellToInputWeights = function(builder, cellToInputWeightsOffset) { + builder.addFieldOffset(11, cellToInputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputGateBiasOffset + */ +armnnSerializer.LstmInputParams.addInputGateBias = function(builder, inputGateBiasOffset) { + builder.addFieldOffset(12, inputGateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} projectionWeightsOffset + */ +armnnSerializer.LstmInputParams.addProjectionWeights = function(builder, projectionWeightsOffset) { + builder.addFieldOffset(13, projectionWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} projectionBiasOffset + */ +armnnSerializer.LstmInputParams.addProjectionBias = function(builder, projectionBiasOffset) { + builder.addFieldOffset(14, projectionBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cellToForgetWeightsOffset + */ +armnnSerializer.LstmInputParams.addCellToForgetWeights = function(builder, cellToForgetWeightsOffset) { + builder.addFieldOffset(15, cellToForgetWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cellToOutputWeightsOffset + */ +armnnSerializer.LstmInputParams.addCellToOutputWeights = function(builder, cellToOutputWeightsOffset) { + builder.addFieldOffset(16, cellToOutputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputLayerNormWeightsOffset + */ +armnnSerializer.LstmInputParams.addInputLayerNormWeights = function(builder, inputLayerNormWeightsOffset) { + builder.addFieldOffset(17, inputLayerNormWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} forgetLayerNormWeightsOffset + */ +armnnSerializer.LstmInputParams.addForgetLayerNormWeights = function(builder, forgetLayerNormWeightsOffset) { + builder.addFieldOffset(18, forgetLayerNormWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cellLayerNormWeightsOffset + */ +armnnSerializer.LstmInputParams.addCellLayerNormWeights = function(builder, cellLayerNormWeightsOffset) { + builder.addFieldOffset(19, cellLayerNormWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputLayerNormWeightsOffset + */ +armnnSerializer.LstmInputParams.addOutputLayerNormWeights = function(builder, outputLayerNormWeightsOffset) { + builder.addFieldOffset(20, outputLayerNormWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LstmInputParams.endLstmInputParams = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToForgetWeightsOffset + * @param {flatbuffers.Offset} inputToCellWeightsOffset + * @param {flatbuffers.Offset} inputToOutputWeightsOffset + * @param {flatbuffers.Offset} recurrentToForgetWeightsOffset + * @param {flatbuffers.Offset} recurrentToCellWeightsOffset + * @param {flatbuffers.Offset} recurrentToOutputWeightsOffset + * @param {flatbuffers.Offset} forgetGateBiasOffset + * @param {flatbuffers.Offset} cellBiasOffset + * @param {flatbuffers.Offset} outputGateBiasOffset + * @param {flatbuffers.Offset} inputToInputWeightsOffset + * @param {flatbuffers.Offset} recurrentToInputWeightsOffset + * @param {flatbuffers.Offset} cellToInputWeightsOffset + * @param {flatbuffers.Offset} inputGateBiasOffset + * @param {flatbuffers.Offset} projectionWeightsOffset + * @param {flatbuffers.Offset} projectionBiasOffset + * @param {flatbuffers.Offset} cellToForgetWeightsOffset + * @param {flatbuffers.Offset} cellToOutputWeightsOffset + * @param {flatbuffers.Offset} inputLayerNormWeightsOffset + * @param {flatbuffers.Offset} forgetLayerNormWeightsOffset + * @param {flatbuffers.Offset} cellLayerNormWeightsOffset + * @param {flatbuffers.Offset} outputLayerNormWeightsOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LstmInputParams.createLstmInputParams = function(builder, inputToForgetWeightsOffset, inputToCellWeightsOffset, inputToOutputWeightsOffset, recurrentToForgetWeightsOffset, recurrentToCellWeightsOffset, recurrentToOutputWeightsOffset, forgetGateBiasOffset, cellBiasOffset, outputGateBiasOffset, inputToInputWeightsOffset, recurrentToInputWeightsOffset, cellToInputWeightsOffset, inputGateBiasOffset, projectionWeightsOffset, projectionBiasOffset, cellToForgetWeightsOffset, cellToOutputWeightsOffset, inputLayerNormWeightsOffset, forgetLayerNormWeightsOffset, cellLayerNormWeightsOffset, outputLayerNormWeightsOffset) { + armnnSerializer.LstmInputParams.startLstmInputParams(builder); + armnnSerializer.LstmInputParams.addInputToForgetWeights(builder, inputToForgetWeightsOffset); + armnnSerializer.LstmInputParams.addInputToCellWeights(builder, inputToCellWeightsOffset); + armnnSerializer.LstmInputParams.addInputToOutputWeights(builder, inputToOutputWeightsOffset); + armnnSerializer.LstmInputParams.addRecurrentToForgetWeights(builder, recurrentToForgetWeightsOffset); + armnnSerializer.LstmInputParams.addRecurrentToCellWeights(builder, recurrentToCellWeightsOffset); + armnnSerializer.LstmInputParams.addRecurrentToOutputWeights(builder, recurrentToOutputWeightsOffset); + armnnSerializer.LstmInputParams.addForgetGateBias(builder, forgetGateBiasOffset); + armnnSerializer.LstmInputParams.addCellBias(builder, cellBiasOffset); + armnnSerializer.LstmInputParams.addOutputGateBias(builder, outputGateBiasOffset); + armnnSerializer.LstmInputParams.addInputToInputWeights(builder, inputToInputWeightsOffset); + armnnSerializer.LstmInputParams.addRecurrentToInputWeights(builder, recurrentToInputWeightsOffset); + armnnSerializer.LstmInputParams.addCellToInputWeights(builder, cellToInputWeightsOffset); + armnnSerializer.LstmInputParams.addInputGateBias(builder, inputGateBiasOffset); + armnnSerializer.LstmInputParams.addProjectionWeights(builder, projectionWeightsOffset); + armnnSerializer.LstmInputParams.addProjectionBias(builder, projectionBiasOffset); + armnnSerializer.LstmInputParams.addCellToForgetWeights(builder, cellToForgetWeightsOffset); + armnnSerializer.LstmInputParams.addCellToOutputWeights(builder, cellToOutputWeightsOffset); + armnnSerializer.LstmInputParams.addInputLayerNormWeights(builder, inputLayerNormWeightsOffset); + armnnSerializer.LstmInputParams.addForgetLayerNormWeights(builder, forgetLayerNormWeightsOffset); + armnnSerializer.LstmInputParams.addCellLayerNormWeights(builder, cellLayerNormWeightsOffset); + armnnSerializer.LstmInputParams.addOutputLayerNormWeights(builder, outputLayerNormWeightsOffset); + return armnnSerializer.LstmInputParams.endLstmInputParams(builder); +} + +/** + * @constructor + */ +armnnSerializer.LstmDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.LstmDescriptor} + */ +armnnSerializer.LstmDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LstmDescriptor=} obj + * @returns {armnnSerializer.LstmDescriptor} + */ +armnnSerializer.LstmDescriptor.getRootAsLstmDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.LstmDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LstmDescriptor=} obj + * @returns {armnnSerializer.LstmDescriptor} + */ +armnnSerializer.LstmDescriptor.getSizePrefixedRootAsLstmDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.LstmDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.LstmDescriptor.prototype.activationFunc = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.LstmDescriptor.prototype.clippingThresCell = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.LstmDescriptor.prototype.clippingThresProj = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.LstmDescriptor.prototype.cifgEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : true; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.LstmDescriptor.prototype.peepholeEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.LstmDescriptor.prototype.projectionEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.LstmDescriptor.prototype.layerNormEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.LstmDescriptor.startLstmDescriptor = function(builder) { + builder.startObject(7); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} activationFunc + */ +armnnSerializer.LstmDescriptor.addActivationFunc = function(builder, activationFunc) { + builder.addFieldInt32(0, activationFunc, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} clippingThresCell + */ +armnnSerializer.LstmDescriptor.addClippingThresCell = function(builder, clippingThresCell) { + builder.addFieldFloat32(1, clippingThresCell, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} clippingThresProj + */ +armnnSerializer.LstmDescriptor.addClippingThresProj = function(builder, clippingThresProj) { + builder.addFieldFloat32(2, clippingThresProj, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} cifgEnabled + */ +armnnSerializer.LstmDescriptor.addCifgEnabled = function(builder, cifgEnabled) { + builder.addFieldInt8(3, +cifgEnabled, +true); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} peepholeEnabled + */ +armnnSerializer.LstmDescriptor.addPeepholeEnabled = function(builder, peepholeEnabled) { + builder.addFieldInt8(4, +peepholeEnabled, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} projectionEnabled + */ +armnnSerializer.LstmDescriptor.addProjectionEnabled = function(builder, projectionEnabled) { + builder.addFieldInt8(5, +projectionEnabled, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} layerNormEnabled + */ +armnnSerializer.LstmDescriptor.addLayerNormEnabled = function(builder, layerNormEnabled) { + builder.addFieldInt8(6, +layerNormEnabled, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LstmDescriptor.endLstmDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} activationFunc + * @param {number} clippingThresCell + * @param {number} clippingThresProj + * @param {boolean} cifgEnabled + * @param {boolean} peepholeEnabled + * @param {boolean} projectionEnabled + * @param {boolean} layerNormEnabled + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LstmDescriptor.createLstmDescriptor = function(builder, activationFunc, clippingThresCell, clippingThresProj, cifgEnabled, peepholeEnabled, projectionEnabled, layerNormEnabled) { + armnnSerializer.LstmDescriptor.startLstmDescriptor(builder); + armnnSerializer.LstmDescriptor.addActivationFunc(builder, activationFunc); + armnnSerializer.LstmDescriptor.addClippingThresCell(builder, clippingThresCell); + armnnSerializer.LstmDescriptor.addClippingThresProj(builder, clippingThresProj); + armnnSerializer.LstmDescriptor.addCifgEnabled(builder, cifgEnabled); + armnnSerializer.LstmDescriptor.addPeepholeEnabled(builder, peepholeEnabled); + armnnSerializer.LstmDescriptor.addProjectionEnabled(builder, projectionEnabled); + armnnSerializer.LstmDescriptor.addLayerNormEnabled(builder, layerNormEnabled); + return armnnSerializer.LstmDescriptor.endLstmDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.LstmLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.LstmLayer} + */ +armnnSerializer.LstmLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LstmLayer=} obj + * @returns {armnnSerializer.LstmLayer} + */ +armnnSerializer.LstmLayer.getRootAsLstmLayer = function(bb, obj) { + return (obj || new armnnSerializer.LstmLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.LstmLayer=} obj + * @returns {armnnSerializer.LstmLayer} + */ +armnnSerializer.LstmLayer.getSizePrefixedRootAsLstmLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.LstmLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.LstmLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.LstmDescriptor=} obj + * @returns {armnnSerializer.LstmDescriptor|null} + */ +armnnSerializer.LstmLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.LstmDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.LstmInputParams=} obj + * @returns {armnnSerializer.LstmInputParams|null} + */ +armnnSerializer.LstmLayer.prototype.inputParams = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new armnnSerializer.LstmInputParams).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.LstmLayer.startLstmLayer = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.LstmLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.LstmLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputParamsOffset + */ +armnnSerializer.LstmLayer.addInputParams = function(builder, inputParamsOffset) { + builder.addFieldOffset(2, inputParamsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LstmLayer.endLstmLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @param {flatbuffers.Offset} inputParamsOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.LstmLayer.createLstmLayer = function(builder, baseOffset, descriptorOffset, inputParamsOffset) { + armnnSerializer.LstmLayer.startLstmLayer(builder); + armnnSerializer.LstmLayer.addBase(builder, baseOffset); + armnnSerializer.LstmLayer.addDescriptor(builder, descriptorOffset); + armnnSerializer.LstmLayer.addInputParams(builder, inputParamsOffset); + return armnnSerializer.LstmLayer.endLstmLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.QLstmInputParams = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.QLstmInputParams} + */ +armnnSerializer.QLstmInputParams.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QLstmInputParams=} obj + * @returns {armnnSerializer.QLstmInputParams} + */ +armnnSerializer.QLstmInputParams.getRootAsQLstmInputParams = function(bb, obj) { + return (obj || new armnnSerializer.QLstmInputParams).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QLstmInputParams=} obj + * @returns {armnnSerializer.QLstmInputParams} + */ +armnnSerializer.QLstmInputParams.getSizePrefixedRootAsQLstmInputParams = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.QLstmInputParams).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.inputToForgetWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.inputToCellWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.inputToOutputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.recurrentToForgetWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.recurrentToCellWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.recurrentToOutputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.forgetGateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.cellBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.outputGateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.inputToInputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.recurrentToInputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.inputGateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 26); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.projectionWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 28); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.projectionBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 30); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.cellToInputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 32); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.cellToForgetWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 34); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.cellToOutputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 36); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.inputLayerNormWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 38); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.forgetLayerNormWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 40); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.cellLayerNormWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 42); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QLstmInputParams.prototype.outputLayerNormWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 44); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.QLstmInputParams.startQLstmInputParams = function(builder) { + builder.startObject(21); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToForgetWeightsOffset + */ +armnnSerializer.QLstmInputParams.addInputToForgetWeights = function(builder, inputToForgetWeightsOffset) { + builder.addFieldOffset(0, inputToForgetWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToCellWeightsOffset + */ +armnnSerializer.QLstmInputParams.addInputToCellWeights = function(builder, inputToCellWeightsOffset) { + builder.addFieldOffset(1, inputToCellWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToOutputWeightsOffset + */ +armnnSerializer.QLstmInputParams.addInputToOutputWeights = function(builder, inputToOutputWeightsOffset) { + builder.addFieldOffset(2, inputToOutputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToForgetWeightsOffset + */ +armnnSerializer.QLstmInputParams.addRecurrentToForgetWeights = function(builder, recurrentToForgetWeightsOffset) { + builder.addFieldOffset(3, recurrentToForgetWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToCellWeightsOffset + */ +armnnSerializer.QLstmInputParams.addRecurrentToCellWeights = function(builder, recurrentToCellWeightsOffset) { + builder.addFieldOffset(4, recurrentToCellWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToOutputWeightsOffset + */ +armnnSerializer.QLstmInputParams.addRecurrentToOutputWeights = function(builder, recurrentToOutputWeightsOffset) { + builder.addFieldOffset(5, recurrentToOutputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} forgetGateBiasOffset + */ +armnnSerializer.QLstmInputParams.addForgetGateBias = function(builder, forgetGateBiasOffset) { + builder.addFieldOffset(6, forgetGateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cellBiasOffset + */ +armnnSerializer.QLstmInputParams.addCellBias = function(builder, cellBiasOffset) { + builder.addFieldOffset(7, cellBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputGateBiasOffset + */ +armnnSerializer.QLstmInputParams.addOutputGateBias = function(builder, outputGateBiasOffset) { + builder.addFieldOffset(8, outputGateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToInputWeightsOffset + */ +armnnSerializer.QLstmInputParams.addInputToInputWeights = function(builder, inputToInputWeightsOffset) { + builder.addFieldOffset(9, inputToInputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToInputWeightsOffset + */ +armnnSerializer.QLstmInputParams.addRecurrentToInputWeights = function(builder, recurrentToInputWeightsOffset) { + builder.addFieldOffset(10, recurrentToInputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputGateBiasOffset + */ +armnnSerializer.QLstmInputParams.addInputGateBias = function(builder, inputGateBiasOffset) { + builder.addFieldOffset(11, inputGateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} projectionWeightsOffset + */ +armnnSerializer.QLstmInputParams.addProjectionWeights = function(builder, projectionWeightsOffset) { + builder.addFieldOffset(12, projectionWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} projectionBiasOffset + */ +armnnSerializer.QLstmInputParams.addProjectionBias = function(builder, projectionBiasOffset) { + builder.addFieldOffset(13, projectionBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cellToInputWeightsOffset + */ +armnnSerializer.QLstmInputParams.addCellToInputWeights = function(builder, cellToInputWeightsOffset) { + builder.addFieldOffset(14, cellToInputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cellToForgetWeightsOffset + */ +armnnSerializer.QLstmInputParams.addCellToForgetWeights = function(builder, cellToForgetWeightsOffset) { + builder.addFieldOffset(15, cellToForgetWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cellToOutputWeightsOffset + */ +armnnSerializer.QLstmInputParams.addCellToOutputWeights = function(builder, cellToOutputWeightsOffset) { + builder.addFieldOffset(16, cellToOutputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputLayerNormWeightsOffset + */ +armnnSerializer.QLstmInputParams.addInputLayerNormWeights = function(builder, inputLayerNormWeightsOffset) { + builder.addFieldOffset(17, inputLayerNormWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} forgetLayerNormWeightsOffset + */ +armnnSerializer.QLstmInputParams.addForgetLayerNormWeights = function(builder, forgetLayerNormWeightsOffset) { + builder.addFieldOffset(18, forgetLayerNormWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cellLayerNormWeightsOffset + */ +armnnSerializer.QLstmInputParams.addCellLayerNormWeights = function(builder, cellLayerNormWeightsOffset) { + builder.addFieldOffset(19, cellLayerNormWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputLayerNormWeightsOffset + */ +armnnSerializer.QLstmInputParams.addOutputLayerNormWeights = function(builder, outputLayerNormWeightsOffset) { + builder.addFieldOffset(20, outputLayerNormWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QLstmInputParams.endQLstmInputParams = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToForgetWeightsOffset + * @param {flatbuffers.Offset} inputToCellWeightsOffset + * @param {flatbuffers.Offset} inputToOutputWeightsOffset + * @param {flatbuffers.Offset} recurrentToForgetWeightsOffset + * @param {flatbuffers.Offset} recurrentToCellWeightsOffset + * @param {flatbuffers.Offset} recurrentToOutputWeightsOffset + * @param {flatbuffers.Offset} forgetGateBiasOffset + * @param {flatbuffers.Offset} cellBiasOffset + * @param {flatbuffers.Offset} outputGateBiasOffset + * @param {flatbuffers.Offset} inputToInputWeightsOffset + * @param {flatbuffers.Offset} recurrentToInputWeightsOffset + * @param {flatbuffers.Offset} inputGateBiasOffset + * @param {flatbuffers.Offset} projectionWeightsOffset + * @param {flatbuffers.Offset} projectionBiasOffset + * @param {flatbuffers.Offset} cellToInputWeightsOffset + * @param {flatbuffers.Offset} cellToForgetWeightsOffset + * @param {flatbuffers.Offset} cellToOutputWeightsOffset + * @param {flatbuffers.Offset} inputLayerNormWeightsOffset + * @param {flatbuffers.Offset} forgetLayerNormWeightsOffset + * @param {flatbuffers.Offset} cellLayerNormWeightsOffset + * @param {flatbuffers.Offset} outputLayerNormWeightsOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QLstmInputParams.createQLstmInputParams = function(builder, inputToForgetWeightsOffset, inputToCellWeightsOffset, inputToOutputWeightsOffset, recurrentToForgetWeightsOffset, recurrentToCellWeightsOffset, recurrentToOutputWeightsOffset, forgetGateBiasOffset, cellBiasOffset, outputGateBiasOffset, inputToInputWeightsOffset, recurrentToInputWeightsOffset, inputGateBiasOffset, projectionWeightsOffset, projectionBiasOffset, cellToInputWeightsOffset, cellToForgetWeightsOffset, cellToOutputWeightsOffset, inputLayerNormWeightsOffset, forgetLayerNormWeightsOffset, cellLayerNormWeightsOffset, outputLayerNormWeightsOffset) { + armnnSerializer.QLstmInputParams.startQLstmInputParams(builder); + armnnSerializer.QLstmInputParams.addInputToForgetWeights(builder, inputToForgetWeightsOffset); + armnnSerializer.QLstmInputParams.addInputToCellWeights(builder, inputToCellWeightsOffset); + armnnSerializer.QLstmInputParams.addInputToOutputWeights(builder, inputToOutputWeightsOffset); + armnnSerializer.QLstmInputParams.addRecurrentToForgetWeights(builder, recurrentToForgetWeightsOffset); + armnnSerializer.QLstmInputParams.addRecurrentToCellWeights(builder, recurrentToCellWeightsOffset); + armnnSerializer.QLstmInputParams.addRecurrentToOutputWeights(builder, recurrentToOutputWeightsOffset); + armnnSerializer.QLstmInputParams.addForgetGateBias(builder, forgetGateBiasOffset); + armnnSerializer.QLstmInputParams.addCellBias(builder, cellBiasOffset); + armnnSerializer.QLstmInputParams.addOutputGateBias(builder, outputGateBiasOffset); + armnnSerializer.QLstmInputParams.addInputToInputWeights(builder, inputToInputWeightsOffset); + armnnSerializer.QLstmInputParams.addRecurrentToInputWeights(builder, recurrentToInputWeightsOffset); + armnnSerializer.QLstmInputParams.addInputGateBias(builder, inputGateBiasOffset); + armnnSerializer.QLstmInputParams.addProjectionWeights(builder, projectionWeightsOffset); + armnnSerializer.QLstmInputParams.addProjectionBias(builder, projectionBiasOffset); + armnnSerializer.QLstmInputParams.addCellToInputWeights(builder, cellToInputWeightsOffset); + armnnSerializer.QLstmInputParams.addCellToForgetWeights(builder, cellToForgetWeightsOffset); + armnnSerializer.QLstmInputParams.addCellToOutputWeights(builder, cellToOutputWeightsOffset); + armnnSerializer.QLstmInputParams.addInputLayerNormWeights(builder, inputLayerNormWeightsOffset); + armnnSerializer.QLstmInputParams.addForgetLayerNormWeights(builder, forgetLayerNormWeightsOffset); + armnnSerializer.QLstmInputParams.addCellLayerNormWeights(builder, cellLayerNormWeightsOffset); + armnnSerializer.QLstmInputParams.addOutputLayerNormWeights(builder, outputLayerNormWeightsOffset); + return armnnSerializer.QLstmInputParams.endQLstmInputParams(builder); +} + +/** + * @constructor + */ +armnnSerializer.QLstmDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.QLstmDescriptor} + */ +armnnSerializer.QLstmDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QLstmDescriptor=} obj + * @returns {armnnSerializer.QLstmDescriptor} + */ +armnnSerializer.QLstmDescriptor.getRootAsQLstmDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.QLstmDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QLstmDescriptor=} obj + * @returns {armnnSerializer.QLstmDescriptor} + */ +armnnSerializer.QLstmDescriptor.getSizePrefixedRootAsQLstmDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.QLstmDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +armnnSerializer.QLstmDescriptor.prototype.cifgEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : true; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.QLstmDescriptor.prototype.peepholeEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.QLstmDescriptor.prototype.projectionEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.QLstmDescriptor.prototype.layerNormEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {number} + */ +armnnSerializer.QLstmDescriptor.prototype.cellClip = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.QLstmDescriptor.prototype.projectionClip = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.QLstmDescriptor.prototype.inputIntermediateScale = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.QLstmDescriptor.prototype.forgetIntermediateScale = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.QLstmDescriptor.prototype.cellIntermediateScale = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.QLstmDescriptor.prototype.outputIntermediateScale = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +armnnSerializer.QLstmDescriptor.prototype.hiddenStateZeroPoint = function() { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.QLstmDescriptor.prototype.hiddenStateScale = function() { + var offset = this.bb.__offset(this.bb_pos, 26); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.QLstmDescriptor.startQLstmDescriptor = function(builder) { + builder.startObject(12); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} cifgEnabled + */ +armnnSerializer.QLstmDescriptor.addCifgEnabled = function(builder, cifgEnabled) { + builder.addFieldInt8(0, +cifgEnabled, +true); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} peepholeEnabled + */ +armnnSerializer.QLstmDescriptor.addPeepholeEnabled = function(builder, peepholeEnabled) { + builder.addFieldInt8(1, +peepholeEnabled, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} projectionEnabled + */ +armnnSerializer.QLstmDescriptor.addProjectionEnabled = function(builder, projectionEnabled) { + builder.addFieldInt8(2, +projectionEnabled, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} layerNormEnabled + */ +armnnSerializer.QLstmDescriptor.addLayerNormEnabled = function(builder, layerNormEnabled) { + builder.addFieldInt8(3, +layerNormEnabled, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} cellClip + */ +armnnSerializer.QLstmDescriptor.addCellClip = function(builder, cellClip) { + builder.addFieldFloat32(4, cellClip, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} projectionClip + */ +armnnSerializer.QLstmDescriptor.addProjectionClip = function(builder, projectionClip) { + builder.addFieldFloat32(5, projectionClip, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} inputIntermediateScale + */ +armnnSerializer.QLstmDescriptor.addInputIntermediateScale = function(builder, inputIntermediateScale) { + builder.addFieldFloat32(6, inputIntermediateScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} forgetIntermediateScale + */ +armnnSerializer.QLstmDescriptor.addForgetIntermediateScale = function(builder, forgetIntermediateScale) { + builder.addFieldFloat32(7, forgetIntermediateScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} cellIntermediateScale + */ +armnnSerializer.QLstmDescriptor.addCellIntermediateScale = function(builder, cellIntermediateScale) { + builder.addFieldFloat32(8, cellIntermediateScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputIntermediateScale + */ +armnnSerializer.QLstmDescriptor.addOutputIntermediateScale = function(builder, outputIntermediateScale) { + builder.addFieldFloat32(9, outputIntermediateScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} hiddenStateZeroPoint + */ +armnnSerializer.QLstmDescriptor.addHiddenStateZeroPoint = function(builder, hiddenStateZeroPoint) { + builder.addFieldInt32(10, hiddenStateZeroPoint, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} hiddenStateScale + */ +armnnSerializer.QLstmDescriptor.addHiddenStateScale = function(builder, hiddenStateScale) { + builder.addFieldFloat32(11, hiddenStateScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QLstmDescriptor.endQLstmDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} cifgEnabled + * @param {boolean} peepholeEnabled + * @param {boolean} projectionEnabled + * @param {boolean} layerNormEnabled + * @param {number} cellClip + * @param {number} projectionClip + * @param {number} inputIntermediateScale + * @param {number} forgetIntermediateScale + * @param {number} cellIntermediateScale + * @param {number} outputIntermediateScale + * @param {number} hiddenStateZeroPoint + * @param {number} hiddenStateScale + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QLstmDescriptor.createQLstmDescriptor = function(builder, cifgEnabled, peepholeEnabled, projectionEnabled, layerNormEnabled, cellClip, projectionClip, inputIntermediateScale, forgetIntermediateScale, cellIntermediateScale, outputIntermediateScale, hiddenStateZeroPoint, hiddenStateScale) { + armnnSerializer.QLstmDescriptor.startQLstmDescriptor(builder); + armnnSerializer.QLstmDescriptor.addCifgEnabled(builder, cifgEnabled); + armnnSerializer.QLstmDescriptor.addPeepholeEnabled(builder, peepholeEnabled); + armnnSerializer.QLstmDescriptor.addProjectionEnabled(builder, projectionEnabled); + armnnSerializer.QLstmDescriptor.addLayerNormEnabled(builder, layerNormEnabled); + armnnSerializer.QLstmDescriptor.addCellClip(builder, cellClip); + armnnSerializer.QLstmDescriptor.addProjectionClip(builder, projectionClip); + armnnSerializer.QLstmDescriptor.addInputIntermediateScale(builder, inputIntermediateScale); + armnnSerializer.QLstmDescriptor.addForgetIntermediateScale(builder, forgetIntermediateScale); + armnnSerializer.QLstmDescriptor.addCellIntermediateScale(builder, cellIntermediateScale); + armnnSerializer.QLstmDescriptor.addOutputIntermediateScale(builder, outputIntermediateScale); + armnnSerializer.QLstmDescriptor.addHiddenStateZeroPoint(builder, hiddenStateZeroPoint); + armnnSerializer.QLstmDescriptor.addHiddenStateScale(builder, hiddenStateScale); + return armnnSerializer.QLstmDescriptor.endQLstmDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.QLstmLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.QLstmLayer} + */ +armnnSerializer.QLstmLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QLstmLayer=} obj + * @returns {armnnSerializer.QLstmLayer} + */ +armnnSerializer.QLstmLayer.getRootAsQLstmLayer = function(bb, obj) { + return (obj || new armnnSerializer.QLstmLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QLstmLayer=} obj + * @returns {armnnSerializer.QLstmLayer} + */ +armnnSerializer.QLstmLayer.getSizePrefixedRootAsQLstmLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.QLstmLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.QLstmLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.QLstmDescriptor=} obj + * @returns {armnnSerializer.QLstmDescriptor|null} + */ +armnnSerializer.QLstmLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.QLstmDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.QLstmInputParams=} obj + * @returns {armnnSerializer.QLstmInputParams|null} + */ +armnnSerializer.QLstmLayer.prototype.inputParams = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new armnnSerializer.QLstmInputParams).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.QLstmLayer.startQLstmLayer = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.QLstmLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.QLstmLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputParamsOffset + */ +armnnSerializer.QLstmLayer.addInputParams = function(builder, inputParamsOffset) { + builder.addFieldOffset(2, inputParamsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QLstmLayer.endQLstmLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @param {flatbuffers.Offset} inputParamsOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QLstmLayer.createQLstmLayer = function(builder, baseOffset, descriptorOffset, inputParamsOffset) { + armnnSerializer.QLstmLayer.startQLstmLayer(builder); + armnnSerializer.QLstmLayer.addBase(builder, baseOffset); + armnnSerializer.QLstmLayer.addDescriptor(builder, descriptorOffset); + armnnSerializer.QLstmLayer.addInputParams(builder, inputParamsOffset); + return armnnSerializer.QLstmLayer.endQLstmLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.QuantizedLstmInputParams = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.QuantizedLstmInputParams} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QuantizedLstmInputParams=} obj + * @returns {armnnSerializer.QuantizedLstmInputParams} + */ +armnnSerializer.QuantizedLstmInputParams.getRootAsQuantizedLstmInputParams = function(bb, obj) { + return (obj || new armnnSerializer.QuantizedLstmInputParams).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QuantizedLstmInputParams=} obj + * @returns {armnnSerializer.QuantizedLstmInputParams} + */ +armnnSerializer.QuantizedLstmInputParams.getSizePrefixedRootAsQuantizedLstmInputParams = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.QuantizedLstmInputParams).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.inputToInputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.inputToForgetWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.inputToCellWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.inputToOutputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.recurrentToInputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.recurrentToForgetWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.recurrentToCellWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.recurrentToOutputWeights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.inputGateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.forgetGateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.cellBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.QuantizedLstmInputParams.prototype.outputGateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 26); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.QuantizedLstmInputParams.startQuantizedLstmInputParams = function(builder) { + builder.startObject(12); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToInputWeightsOffset + */ +armnnSerializer.QuantizedLstmInputParams.addInputToInputWeights = function(builder, inputToInputWeightsOffset) { + builder.addFieldOffset(0, inputToInputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToForgetWeightsOffset + */ +armnnSerializer.QuantizedLstmInputParams.addInputToForgetWeights = function(builder, inputToForgetWeightsOffset) { + builder.addFieldOffset(1, inputToForgetWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToCellWeightsOffset + */ +armnnSerializer.QuantizedLstmInputParams.addInputToCellWeights = function(builder, inputToCellWeightsOffset) { + builder.addFieldOffset(2, inputToCellWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToOutputWeightsOffset + */ +armnnSerializer.QuantizedLstmInputParams.addInputToOutputWeights = function(builder, inputToOutputWeightsOffset) { + builder.addFieldOffset(3, inputToOutputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToInputWeightsOffset + */ +armnnSerializer.QuantizedLstmInputParams.addRecurrentToInputWeights = function(builder, recurrentToInputWeightsOffset) { + builder.addFieldOffset(4, recurrentToInputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToForgetWeightsOffset + */ +armnnSerializer.QuantizedLstmInputParams.addRecurrentToForgetWeights = function(builder, recurrentToForgetWeightsOffset) { + builder.addFieldOffset(5, recurrentToForgetWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToCellWeightsOffset + */ +armnnSerializer.QuantizedLstmInputParams.addRecurrentToCellWeights = function(builder, recurrentToCellWeightsOffset) { + builder.addFieldOffset(6, recurrentToCellWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} recurrentToOutputWeightsOffset + */ +armnnSerializer.QuantizedLstmInputParams.addRecurrentToOutputWeights = function(builder, recurrentToOutputWeightsOffset) { + builder.addFieldOffset(7, recurrentToOutputWeightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputGateBiasOffset + */ +armnnSerializer.QuantizedLstmInputParams.addInputGateBias = function(builder, inputGateBiasOffset) { + builder.addFieldOffset(8, inputGateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} forgetGateBiasOffset + */ +armnnSerializer.QuantizedLstmInputParams.addForgetGateBias = function(builder, forgetGateBiasOffset) { + builder.addFieldOffset(9, forgetGateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} cellBiasOffset + */ +armnnSerializer.QuantizedLstmInputParams.addCellBias = function(builder, cellBiasOffset) { + builder.addFieldOffset(10, cellBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputGateBiasOffset + */ +armnnSerializer.QuantizedLstmInputParams.addOutputGateBias = function(builder, outputGateBiasOffset) { + builder.addFieldOffset(11, outputGateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QuantizedLstmInputParams.endQuantizedLstmInputParams = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputToInputWeightsOffset + * @param {flatbuffers.Offset} inputToForgetWeightsOffset + * @param {flatbuffers.Offset} inputToCellWeightsOffset + * @param {flatbuffers.Offset} inputToOutputWeightsOffset + * @param {flatbuffers.Offset} recurrentToInputWeightsOffset + * @param {flatbuffers.Offset} recurrentToForgetWeightsOffset + * @param {flatbuffers.Offset} recurrentToCellWeightsOffset + * @param {flatbuffers.Offset} recurrentToOutputWeightsOffset + * @param {flatbuffers.Offset} inputGateBiasOffset + * @param {flatbuffers.Offset} forgetGateBiasOffset + * @param {flatbuffers.Offset} cellBiasOffset + * @param {flatbuffers.Offset} outputGateBiasOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QuantizedLstmInputParams.createQuantizedLstmInputParams = function(builder, inputToInputWeightsOffset, inputToForgetWeightsOffset, inputToCellWeightsOffset, inputToOutputWeightsOffset, recurrentToInputWeightsOffset, recurrentToForgetWeightsOffset, recurrentToCellWeightsOffset, recurrentToOutputWeightsOffset, inputGateBiasOffset, forgetGateBiasOffset, cellBiasOffset, outputGateBiasOffset) { + armnnSerializer.QuantizedLstmInputParams.startQuantizedLstmInputParams(builder); + armnnSerializer.QuantizedLstmInputParams.addInputToInputWeights(builder, inputToInputWeightsOffset); + armnnSerializer.QuantizedLstmInputParams.addInputToForgetWeights(builder, inputToForgetWeightsOffset); + armnnSerializer.QuantizedLstmInputParams.addInputToCellWeights(builder, inputToCellWeightsOffset); + armnnSerializer.QuantizedLstmInputParams.addInputToOutputWeights(builder, inputToOutputWeightsOffset); + armnnSerializer.QuantizedLstmInputParams.addRecurrentToInputWeights(builder, recurrentToInputWeightsOffset); + armnnSerializer.QuantizedLstmInputParams.addRecurrentToForgetWeights(builder, recurrentToForgetWeightsOffset); + armnnSerializer.QuantizedLstmInputParams.addRecurrentToCellWeights(builder, recurrentToCellWeightsOffset); + armnnSerializer.QuantizedLstmInputParams.addRecurrentToOutputWeights(builder, recurrentToOutputWeightsOffset); + armnnSerializer.QuantizedLstmInputParams.addInputGateBias(builder, inputGateBiasOffset); + armnnSerializer.QuantizedLstmInputParams.addForgetGateBias(builder, forgetGateBiasOffset); + armnnSerializer.QuantizedLstmInputParams.addCellBias(builder, cellBiasOffset); + armnnSerializer.QuantizedLstmInputParams.addOutputGateBias(builder, outputGateBiasOffset); + return armnnSerializer.QuantizedLstmInputParams.endQuantizedLstmInputParams(builder); +} + +/** + * @constructor + */ +armnnSerializer.QuantizedLstmLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.QuantizedLstmLayer} + */ +armnnSerializer.QuantizedLstmLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QuantizedLstmLayer=} obj + * @returns {armnnSerializer.QuantizedLstmLayer} + */ +armnnSerializer.QuantizedLstmLayer.getRootAsQuantizedLstmLayer = function(bb, obj) { + return (obj || new armnnSerializer.QuantizedLstmLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.QuantizedLstmLayer=} obj + * @returns {armnnSerializer.QuantizedLstmLayer} + */ +armnnSerializer.QuantizedLstmLayer.getSizePrefixedRootAsQuantizedLstmLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.QuantizedLstmLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.QuantizedLstmLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.QuantizedLstmInputParams=} obj + * @returns {armnnSerializer.QuantizedLstmInputParams|null} + */ +armnnSerializer.QuantizedLstmLayer.prototype.inputParams = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.QuantizedLstmInputParams).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.QuantizedLstmLayer.startQuantizedLstmLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.QuantizedLstmLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputParamsOffset + */ +armnnSerializer.QuantizedLstmLayer.addInputParams = function(builder, inputParamsOffset) { + builder.addFieldOffset(1, inputParamsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QuantizedLstmLayer.endQuantizedLstmLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} inputParamsOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.QuantizedLstmLayer.createQuantizedLstmLayer = function(builder, baseOffset, inputParamsOffset) { + armnnSerializer.QuantizedLstmLayer.startQuantizedLstmLayer(builder); + armnnSerializer.QuantizedLstmLayer.addBase(builder, baseOffset); + armnnSerializer.QuantizedLstmLayer.addInputParams(builder, inputParamsOffset); + return armnnSerializer.QuantizedLstmLayer.endQuantizedLstmLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.DequantizeLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.DequantizeLayer} + */ +armnnSerializer.DequantizeLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DequantizeLayer=} obj + * @returns {armnnSerializer.DequantizeLayer} + */ +armnnSerializer.DequantizeLayer.getRootAsDequantizeLayer = function(bb, obj) { + return (obj || new armnnSerializer.DequantizeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.DequantizeLayer=} obj + * @returns {armnnSerializer.DequantizeLayer} + */ +armnnSerializer.DequantizeLayer.getSizePrefixedRootAsDequantizeLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.DequantizeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.DequantizeLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.DequantizeLayer.startDequantizeLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.DequantizeLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DequantizeLayer.endDequantizeLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.DequantizeLayer.createDequantizeLayer = function(builder, baseOffset) { + armnnSerializer.DequantizeLayer.startDequantizeLayer(builder); + armnnSerializer.DequantizeLayer.addBase(builder, baseOffset); + return armnnSerializer.DequantizeLayer.endDequantizeLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.MergeLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.MergeLayer} + */ +armnnSerializer.MergeLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MergeLayer=} obj + * @returns {armnnSerializer.MergeLayer} + */ +armnnSerializer.MergeLayer.getRootAsMergeLayer = function(bb, obj) { + return (obj || new armnnSerializer.MergeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.MergeLayer=} obj + * @returns {armnnSerializer.MergeLayer} + */ +armnnSerializer.MergeLayer.getSizePrefixedRootAsMergeLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.MergeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.MergeLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.MergeLayer.startMergeLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.MergeLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MergeLayer.endMergeLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.MergeLayer.createMergeLayer = function(builder, baseOffset) { + armnnSerializer.MergeLayer.startMergeLayer(builder); + armnnSerializer.MergeLayer.addBase(builder, baseOffset); + return armnnSerializer.MergeLayer.endMergeLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.SwitchLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SwitchLayer} + */ +armnnSerializer.SwitchLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SwitchLayer=} obj + * @returns {armnnSerializer.SwitchLayer} + */ +armnnSerializer.SwitchLayer.getRootAsSwitchLayer = function(bb, obj) { + return (obj || new armnnSerializer.SwitchLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SwitchLayer=} obj + * @returns {armnnSerializer.SwitchLayer} + */ +armnnSerializer.SwitchLayer.getSizePrefixedRootAsSwitchLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SwitchLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.SwitchLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SwitchLayer.startSwitchLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.SwitchLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SwitchLayer.endSwitchLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SwitchLayer.createSwitchLayer = function(builder, baseOffset) { + armnnSerializer.SwitchLayer.startSwitchLayer(builder); + armnnSerializer.SwitchLayer.addBase(builder, baseOffset); + return armnnSerializer.SwitchLayer.endSwitchLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.PreluLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.PreluLayer} + */ +armnnSerializer.PreluLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.PreluLayer=} obj + * @returns {armnnSerializer.PreluLayer} + */ +armnnSerializer.PreluLayer.getRootAsPreluLayer = function(bb, obj) { + return (obj || new armnnSerializer.PreluLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.PreluLayer=} obj + * @returns {armnnSerializer.PreluLayer} + */ +armnnSerializer.PreluLayer.getSizePrefixedRootAsPreluLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.PreluLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.PreluLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.PreluLayer.startPreluLayer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.PreluLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PreluLayer.endPreluLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.PreluLayer.createPreluLayer = function(builder, baseOffset) { + armnnSerializer.PreluLayer.startPreluLayer(builder); + armnnSerializer.PreluLayer.addBase(builder, baseOffset); + return armnnSerializer.PreluLayer.endPreluLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.TransposeConvolution2dLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.TransposeConvolution2dLayer} + */ +armnnSerializer.TransposeConvolution2dLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.TransposeConvolution2dLayer=} obj + * @returns {armnnSerializer.TransposeConvolution2dLayer} + */ +armnnSerializer.TransposeConvolution2dLayer.getRootAsTransposeConvolution2dLayer = function(bb, obj) { + return (obj || new armnnSerializer.TransposeConvolution2dLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.TransposeConvolution2dLayer=} obj + * @returns {armnnSerializer.TransposeConvolution2dLayer} + */ +armnnSerializer.TransposeConvolution2dLayer.getSizePrefixedRootAsTransposeConvolution2dLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.TransposeConvolution2dLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.TransposeConvolution2dLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.TransposeConvolution2dDescriptor=} obj + * @returns {armnnSerializer.TransposeConvolution2dDescriptor|null} + */ +armnnSerializer.TransposeConvolution2dLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.TransposeConvolution2dDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.TransposeConvolution2dLayer.prototype.weights = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ConstTensor=} obj + * @returns {armnnSerializer.ConstTensor|null} + */ +armnnSerializer.TransposeConvolution2dLayer.prototype.biases = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new armnnSerializer.ConstTensor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.TransposeConvolution2dLayer.startTransposeConvolution2dLayer = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.TransposeConvolution2dLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.TransposeConvolution2dLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightsOffset + */ +armnnSerializer.TransposeConvolution2dLayer.addWeights = function(builder, weightsOffset) { + builder.addFieldOffset(2, weightsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasesOffset + */ +armnnSerializer.TransposeConvolution2dLayer.addBiases = function(builder, biasesOffset) { + builder.addFieldOffset(3, biasesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TransposeConvolution2dLayer.endTransposeConvolution2dLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @param {flatbuffers.Offset} weightsOffset + * @param {flatbuffers.Offset} biasesOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TransposeConvolution2dLayer.createTransposeConvolution2dLayer = function(builder, baseOffset, descriptorOffset, weightsOffset, biasesOffset) { + armnnSerializer.TransposeConvolution2dLayer.startTransposeConvolution2dLayer(builder); + armnnSerializer.TransposeConvolution2dLayer.addBase(builder, baseOffset); + armnnSerializer.TransposeConvolution2dLayer.addDescriptor(builder, descriptorOffset); + armnnSerializer.TransposeConvolution2dLayer.addWeights(builder, weightsOffset); + armnnSerializer.TransposeConvolution2dLayer.addBiases(builder, biasesOffset); + return armnnSerializer.TransposeConvolution2dLayer.endTransposeConvolution2dLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.TransposeConvolution2dDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.TransposeConvolution2dDescriptor} + */ +armnnSerializer.TransposeConvolution2dDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.TransposeConvolution2dDescriptor=} obj + * @returns {armnnSerializer.TransposeConvolution2dDescriptor} + */ +armnnSerializer.TransposeConvolution2dDescriptor.getRootAsTransposeConvolution2dDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.TransposeConvolution2dDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.TransposeConvolution2dDescriptor=} obj + * @returns {armnnSerializer.TransposeConvolution2dDescriptor} + */ +armnnSerializer.TransposeConvolution2dDescriptor.getSizePrefixedRootAsTransposeConvolution2dDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.TransposeConvolution2dDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.TransposeConvolution2dDescriptor.prototype.padLeft = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.TransposeConvolution2dDescriptor.prototype.padRight = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.TransposeConvolution2dDescriptor.prototype.padTop = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.TransposeConvolution2dDescriptor.prototype.padBottom = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.TransposeConvolution2dDescriptor.prototype.strideX = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.TransposeConvolution2dDescriptor.prototype.strideY = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +armnnSerializer.TransposeConvolution2dDescriptor.prototype.biasEnabled = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.TransposeConvolution2dDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NCHW; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.TransposeConvolution2dDescriptor.startTransposeConvolution2dDescriptor = function(builder) { + builder.startObject(8); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padLeft + */ +armnnSerializer.TransposeConvolution2dDescriptor.addPadLeft = function(builder, padLeft) { + builder.addFieldInt32(0, padLeft, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padRight + */ +armnnSerializer.TransposeConvolution2dDescriptor.addPadRight = function(builder, padRight) { + builder.addFieldInt32(1, padRight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padTop + */ +armnnSerializer.TransposeConvolution2dDescriptor.addPadTop = function(builder, padTop) { + builder.addFieldInt32(2, padTop, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padBottom + */ +armnnSerializer.TransposeConvolution2dDescriptor.addPadBottom = function(builder, padBottom) { + builder.addFieldInt32(3, padBottom, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideX + */ +armnnSerializer.TransposeConvolution2dDescriptor.addStrideX = function(builder, strideX) { + builder.addFieldInt32(4, strideX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideY + */ +armnnSerializer.TransposeConvolution2dDescriptor.addStrideY = function(builder, strideY) { + builder.addFieldInt32(5, strideY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} biasEnabled + */ +armnnSerializer.TransposeConvolution2dDescriptor.addBiasEnabled = function(builder, biasEnabled) { + builder.addFieldInt8(6, +biasEnabled, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.TransposeConvolution2dDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(7, dataLayout, armnnSerializer.DataLayout.NCHW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TransposeConvolution2dDescriptor.endTransposeConvolution2dDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padLeft + * @param {number} padRight + * @param {number} padTop + * @param {number} padBottom + * @param {number} strideX + * @param {number} strideY + * @param {boolean} biasEnabled + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TransposeConvolution2dDescriptor.createTransposeConvolution2dDescriptor = function(builder, padLeft, padRight, padTop, padBottom, strideX, strideY, biasEnabled, dataLayout) { + armnnSerializer.TransposeConvolution2dDescriptor.startTransposeConvolution2dDescriptor(builder); + armnnSerializer.TransposeConvolution2dDescriptor.addPadLeft(builder, padLeft); + armnnSerializer.TransposeConvolution2dDescriptor.addPadRight(builder, padRight); + armnnSerializer.TransposeConvolution2dDescriptor.addPadTop(builder, padTop); + armnnSerializer.TransposeConvolution2dDescriptor.addPadBottom(builder, padBottom); + armnnSerializer.TransposeConvolution2dDescriptor.addStrideX(builder, strideX); + armnnSerializer.TransposeConvolution2dDescriptor.addStrideY(builder, strideY); + armnnSerializer.TransposeConvolution2dDescriptor.addBiasEnabled(builder, biasEnabled); + armnnSerializer.TransposeConvolution2dDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.TransposeConvolution2dDescriptor.endTransposeConvolution2dDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.TransposeLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.TransposeLayer} + */ +armnnSerializer.TransposeLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.TransposeLayer=} obj + * @returns {armnnSerializer.TransposeLayer} + */ +armnnSerializer.TransposeLayer.getRootAsTransposeLayer = function(bb, obj) { + return (obj || new armnnSerializer.TransposeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.TransposeLayer=} obj + * @returns {armnnSerializer.TransposeLayer} + */ +armnnSerializer.TransposeLayer.getSizePrefixedRootAsTransposeLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.TransposeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.TransposeLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.TransposeDescriptor=} obj + * @returns {armnnSerializer.TransposeDescriptor|null} + */ +armnnSerializer.TransposeLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.TransposeDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.TransposeLayer.startTransposeLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.TransposeLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.TransposeLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TransposeLayer.endTransposeLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TransposeLayer.createTransposeLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.TransposeLayer.startTransposeLayer(builder); + armnnSerializer.TransposeLayer.addBase(builder, baseOffset); + armnnSerializer.TransposeLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.TransposeLayer.endTransposeLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.TransposeDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.TransposeDescriptor} + */ +armnnSerializer.TransposeDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.TransposeDescriptor=} obj + * @returns {armnnSerializer.TransposeDescriptor} + */ +armnnSerializer.TransposeDescriptor.getRootAsTransposeDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.TransposeDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.TransposeDescriptor=} obj + * @returns {armnnSerializer.TransposeDescriptor} + */ +armnnSerializer.TransposeDescriptor.getSizePrefixedRootAsTransposeDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.TransposeDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.TransposeDescriptor.prototype.dimMappings = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.TransposeDescriptor.prototype.dimMappingsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.TransposeDescriptor.prototype.dimMappingsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.TransposeDescriptor.startTransposeDescriptor = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimMappingsOffset + */ +armnnSerializer.TransposeDescriptor.addDimMappings = function(builder, dimMappingsOffset) { + builder.addFieldOffset(0, dimMappingsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TransposeDescriptor.createDimMappingsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.TransposeDescriptor.startDimMappingsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TransposeDescriptor.endTransposeDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimMappingsOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.TransposeDescriptor.createTransposeDescriptor = function(builder, dimMappingsOffset) { + armnnSerializer.TransposeDescriptor.startTransposeDescriptor(builder); + armnnSerializer.TransposeDescriptor.addDimMappings(builder, dimMappingsOffset); + return armnnSerializer.TransposeDescriptor.endTransposeDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.ResizeLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ResizeLayer} + */ +armnnSerializer.ResizeLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ResizeLayer=} obj + * @returns {armnnSerializer.ResizeLayer} + */ +armnnSerializer.ResizeLayer.getRootAsResizeLayer = function(bb, obj) { + return (obj || new armnnSerializer.ResizeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ResizeLayer=} obj + * @returns {armnnSerializer.ResizeLayer} + */ +armnnSerializer.ResizeLayer.getSizePrefixedRootAsResizeLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ResizeLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.ResizeLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.ResizeDescriptor=} obj + * @returns {armnnSerializer.ResizeDescriptor|null} + */ +armnnSerializer.ResizeLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.ResizeDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ResizeLayer.startResizeLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.ResizeLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.ResizeLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ResizeLayer.endResizeLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ResizeLayer.createResizeLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.ResizeLayer.startResizeLayer(builder); + armnnSerializer.ResizeLayer.addBase(builder, baseOffset); + armnnSerializer.ResizeLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.ResizeLayer.endResizeLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.ResizeDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.ResizeDescriptor} + */ +armnnSerializer.ResizeDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ResizeDescriptor=} obj + * @returns {armnnSerializer.ResizeDescriptor} + */ +armnnSerializer.ResizeDescriptor.getRootAsResizeDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.ResizeDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.ResizeDescriptor=} obj + * @returns {armnnSerializer.ResizeDescriptor} + */ +armnnSerializer.ResizeDescriptor.getSizePrefixedRootAsResizeDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.ResizeDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.ResizeDescriptor.prototype.targetHeight = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.ResizeDescriptor.prototype.targetWidth = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {armnnSerializer.ResizeMethod} + */ +armnnSerializer.ResizeDescriptor.prototype.method = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {armnnSerializer.ResizeMethod} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.ResizeMethod.NearestNeighbor; +}; + +/** + * @returns {armnnSerializer.DataLayout} + */ +armnnSerializer.ResizeDescriptor.prototype.dataLayout = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? /** @type {armnnSerializer.DataLayout} */ (this.bb.readInt8(this.bb_pos + offset)) : armnnSerializer.DataLayout.NHWC; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.ResizeDescriptor.startResizeDescriptor = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} targetHeight + */ +armnnSerializer.ResizeDescriptor.addTargetHeight = function(builder, targetHeight) { + builder.addFieldInt32(0, targetHeight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} targetWidth + */ +armnnSerializer.ResizeDescriptor.addTargetWidth = function(builder, targetWidth) { + builder.addFieldInt32(1, targetWidth, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.ResizeMethod} method + */ +armnnSerializer.ResizeDescriptor.addMethod = function(builder, method) { + builder.addFieldInt8(2, method, armnnSerializer.ResizeMethod.NearestNeighbor); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.DataLayout} dataLayout + */ +armnnSerializer.ResizeDescriptor.addDataLayout = function(builder, dataLayout) { + builder.addFieldInt8(3, dataLayout, armnnSerializer.DataLayout.NHWC); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ResizeDescriptor.endResizeDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} targetHeight + * @param {number} targetWidth + * @param {armnnSerializer.ResizeMethod} method + * @param {armnnSerializer.DataLayout} dataLayout + * @returns {flatbuffers.Offset} + */ +armnnSerializer.ResizeDescriptor.createResizeDescriptor = function(builder, targetHeight, targetWidth, method, dataLayout) { + armnnSerializer.ResizeDescriptor.startResizeDescriptor(builder); + armnnSerializer.ResizeDescriptor.addTargetHeight(builder, targetHeight); + armnnSerializer.ResizeDescriptor.addTargetWidth(builder, targetWidth); + armnnSerializer.ResizeDescriptor.addMethod(builder, method); + armnnSerializer.ResizeDescriptor.addDataLayout(builder, dataLayout); + return armnnSerializer.ResizeDescriptor.endResizeDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.StackLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.StackLayer} + */ +armnnSerializer.StackLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StackLayer=} obj + * @returns {armnnSerializer.StackLayer} + */ +armnnSerializer.StackLayer.getRootAsStackLayer = function(bb, obj) { + return (obj || new armnnSerializer.StackLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StackLayer=} obj + * @returns {armnnSerializer.StackLayer} + */ +armnnSerializer.StackLayer.getSizePrefixedRootAsStackLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.StackLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.StackLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.StackDescriptor=} obj + * @returns {armnnSerializer.StackDescriptor|null} + */ +armnnSerializer.StackLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.StackDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.StackLayer.startStackLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.StackLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.StackLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StackLayer.endStackLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StackLayer.createStackLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.StackLayer.startStackLayer(builder); + armnnSerializer.StackLayer.addBase(builder, baseOffset); + armnnSerializer.StackLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.StackLayer.endStackLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.StackDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.StackDescriptor} + */ +armnnSerializer.StackDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StackDescriptor=} obj + * @returns {armnnSerializer.StackDescriptor} + */ +armnnSerializer.StackDescriptor.getRootAsStackDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.StackDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StackDescriptor=} obj + * @returns {armnnSerializer.StackDescriptor} + */ +armnnSerializer.StackDescriptor.getSizePrefixedRootAsStackDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.StackDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.StackDescriptor.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.StackDescriptor.prototype.numInputs = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.StackDescriptor.prototype.inputShape = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.StackDescriptor.prototype.inputShapeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +armnnSerializer.StackDescriptor.prototype.inputShapeArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.StackDescriptor.startStackDescriptor = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +armnnSerializer.StackDescriptor.addAxis = function(builder, axis) { + builder.addFieldInt32(0, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numInputs + */ +armnnSerializer.StackDescriptor.addNumInputs = function(builder, numInputs) { + builder.addFieldInt32(1, numInputs, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputShapeOffset + */ +armnnSerializer.StackDescriptor.addInputShape = function(builder, inputShapeOffset) { + builder.addFieldOffset(2, inputShapeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StackDescriptor.createInputShapeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.StackDescriptor.startInputShapeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StackDescriptor.endStackDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + * @param {number} numInputs + * @param {flatbuffers.Offset} inputShapeOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StackDescriptor.createStackDescriptor = function(builder, axis, numInputs, inputShapeOffset) { + armnnSerializer.StackDescriptor.startStackDescriptor(builder); + armnnSerializer.StackDescriptor.addAxis(builder, axis); + armnnSerializer.StackDescriptor.addNumInputs(builder, numInputs); + armnnSerializer.StackDescriptor.addInputShape(builder, inputShapeOffset); + return armnnSerializer.StackDescriptor.endStackDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.StandInDescriptor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.StandInDescriptor} + */ +armnnSerializer.StandInDescriptor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StandInDescriptor=} obj + * @returns {armnnSerializer.StandInDescriptor} + */ +armnnSerializer.StandInDescriptor.getRootAsStandInDescriptor = function(bb, obj) { + return (obj || new armnnSerializer.StandInDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StandInDescriptor=} obj + * @returns {armnnSerializer.StandInDescriptor} + */ +armnnSerializer.StandInDescriptor.getSizePrefixedRootAsStandInDescriptor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.StandInDescriptor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.StandInDescriptor.prototype.numInputs = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.StandInDescriptor.prototype.numOutputs = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.StandInDescriptor.startStandInDescriptor = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numInputs + */ +armnnSerializer.StandInDescriptor.addNumInputs = function(builder, numInputs) { + builder.addFieldInt32(0, numInputs, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numOutputs + */ +armnnSerializer.StandInDescriptor.addNumOutputs = function(builder, numOutputs) { + builder.addFieldInt32(1, numOutputs, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StandInDescriptor.endStandInDescriptor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numInputs + * @param {number} numOutputs + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StandInDescriptor.createStandInDescriptor = function(builder, numInputs, numOutputs) { + armnnSerializer.StandInDescriptor.startStandInDescriptor(builder); + armnnSerializer.StandInDescriptor.addNumInputs(builder, numInputs); + armnnSerializer.StandInDescriptor.addNumOutputs(builder, numOutputs); + return armnnSerializer.StandInDescriptor.endStandInDescriptor(builder); +} + +/** + * @constructor + */ +armnnSerializer.StandInLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.StandInLayer} + */ +armnnSerializer.StandInLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StandInLayer=} obj + * @returns {armnnSerializer.StandInLayer} + */ +armnnSerializer.StandInLayer.getRootAsStandInLayer = function(bb, obj) { + return (obj || new armnnSerializer.StandInLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.StandInLayer=} obj + * @returns {armnnSerializer.StandInLayer} + */ +armnnSerializer.StandInLayer.getSizePrefixedRootAsStandInLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.StandInLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {armnnSerializer.LayerBase=} obj + * @returns {armnnSerializer.LayerBase|null} + */ +armnnSerializer.StandInLayer.prototype.base = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.LayerBase).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {armnnSerializer.StandInDescriptor=} obj + * @returns {armnnSerializer.StandInDescriptor|null} + */ +armnnSerializer.StandInLayer.prototype.descriptor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new armnnSerializer.StandInDescriptor).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.StandInLayer.startStandInLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + */ +armnnSerializer.StandInLayer.addBase = function(builder, baseOffset) { + builder.addFieldOffset(0, baseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptorOffset + */ +armnnSerializer.StandInLayer.addDescriptor = function(builder, descriptorOffset) { + builder.addFieldOffset(1, descriptorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StandInLayer.endStandInLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} baseOffset + * @param {flatbuffers.Offset} descriptorOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.StandInLayer.createStandInLayer = function(builder, baseOffset, descriptorOffset) { + armnnSerializer.StandInLayer.startStandInLayer(builder); + armnnSerializer.StandInLayer.addBase(builder, baseOffset); + armnnSerializer.StandInLayer.addDescriptor(builder, descriptorOffset); + return armnnSerializer.StandInLayer.endStandInLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.AnyLayer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.AnyLayer} + */ +armnnSerializer.AnyLayer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.AnyLayer=} obj + * @returns {armnnSerializer.AnyLayer} + */ +armnnSerializer.AnyLayer.getRootAsAnyLayer = function(bb, obj) { + return (obj || new armnnSerializer.AnyLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.AnyLayer=} obj + * @returns {armnnSerializer.AnyLayer} + */ +armnnSerializer.AnyLayer.getSizePrefixedRootAsAnyLayer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.AnyLayer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {armnnSerializer.Layer} + */ +armnnSerializer.AnyLayer.prototype.layerType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {armnnSerializer.Layer} */ (this.bb.readUint8(this.bb_pos + offset)) : armnnSerializer.Layer.NONE; +}; + +/** + * @param {flatbuffers.Table} obj + * @returns {?flatbuffers.Table} + */ +armnnSerializer.AnyLayer.prototype.layer = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__union(obj, this.bb_pos + offset) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.AnyLayer.startAnyLayer = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.Layer} layerType + */ +armnnSerializer.AnyLayer.addLayerType = function(builder, layerType) { + builder.addFieldInt8(0, layerType, armnnSerializer.Layer.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} layerOffset + */ +armnnSerializer.AnyLayer.addLayer = function(builder, layerOffset) { + builder.addFieldOffset(1, layerOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.AnyLayer.endAnyLayer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {armnnSerializer.Layer} layerType + * @param {flatbuffers.Offset} layerOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.AnyLayer.createAnyLayer = function(builder, layerType, layerOffset) { + armnnSerializer.AnyLayer.startAnyLayer(builder); + armnnSerializer.AnyLayer.addLayerType(builder, layerType); + armnnSerializer.AnyLayer.addLayer(builder, layerOffset); + return armnnSerializer.AnyLayer.endAnyLayer(builder); +} + +/** + * @constructor + */ +armnnSerializer.FeatureCompatibilityVersions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.FeatureCompatibilityVersions} + */ +armnnSerializer.FeatureCompatibilityVersions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.FeatureCompatibilityVersions=} obj + * @returns {armnnSerializer.FeatureCompatibilityVersions} + */ +armnnSerializer.FeatureCompatibilityVersions.getRootAsFeatureCompatibilityVersions = function(bb, obj) { + return (obj || new armnnSerializer.FeatureCompatibilityVersions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.FeatureCompatibilityVersions=} obj + * @returns {armnnSerializer.FeatureCompatibilityVersions} + */ +armnnSerializer.FeatureCompatibilityVersions.getSizePrefixedRootAsFeatureCompatibilityVersions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.FeatureCompatibilityVersions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +armnnSerializer.FeatureCompatibilityVersions.prototype.bindingIdsScheme = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.FeatureCompatibilityVersions.startFeatureCompatibilityVersions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} bindingIdsScheme + */ +armnnSerializer.FeatureCompatibilityVersions.addBindingIdsScheme = function(builder, bindingIdsScheme) { + builder.addFieldInt32(0, bindingIdsScheme, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.FeatureCompatibilityVersions.endFeatureCompatibilityVersions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} bindingIdsScheme + * @returns {flatbuffers.Offset} + */ +armnnSerializer.FeatureCompatibilityVersions.createFeatureCompatibilityVersions = function(builder, bindingIdsScheme) { + armnnSerializer.FeatureCompatibilityVersions.startFeatureCompatibilityVersions(builder); + armnnSerializer.FeatureCompatibilityVersions.addBindingIdsScheme(builder, bindingIdsScheme); + return armnnSerializer.FeatureCompatibilityVersions.endFeatureCompatibilityVersions(builder); +} + +/** + * @constructor + */ +armnnSerializer.SerializedGraph = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {armnnSerializer.SerializedGraph} + */ +armnnSerializer.SerializedGraph.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SerializedGraph=} obj + * @returns {armnnSerializer.SerializedGraph} + */ +armnnSerializer.SerializedGraph.getRootAsSerializedGraph = function(bb, obj) { + return (obj || new armnnSerializer.SerializedGraph).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {armnnSerializer.SerializedGraph=} obj + * @returns {armnnSerializer.SerializedGraph} + */ +armnnSerializer.SerializedGraph.getSizePrefixedRootAsSerializedGraph = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new armnnSerializer.SerializedGraph).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @returns {boolean} + */ +armnnSerializer.SerializedGraph.bufferHasIdentifier = function(bb) { + return bb.__has_identifier('ARMN'); +}; + +/** + * @param {number} index + * @param {armnnSerializer.AnyLayer=} obj + * @returns {armnnSerializer.AnyLayer} + */ +armnnSerializer.SerializedGraph.prototype.layers = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new armnnSerializer.AnyLayer).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +armnnSerializer.SerializedGraph.prototype.layersLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.SerializedGraph.prototype.inputIds = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.SerializedGraph.prototype.inputIdsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +armnnSerializer.SerializedGraph.prototype.inputIdsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +armnnSerializer.SerializedGraph.prototype.outputIds = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +armnnSerializer.SerializedGraph.prototype.outputIdsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +armnnSerializer.SerializedGraph.prototype.outputIdsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {armnnSerializer.FeatureCompatibilityVersions=} obj + * @returns {armnnSerializer.FeatureCompatibilityVersions|null} + */ +armnnSerializer.SerializedGraph.prototype.featureVersions = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new armnnSerializer.FeatureCompatibilityVersions).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +armnnSerializer.SerializedGraph.startSerializedGraph = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} layersOffset + */ +armnnSerializer.SerializedGraph.addLayers = function(builder, layersOffset) { + builder.addFieldOffset(0, layersOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SerializedGraph.createLayersVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.SerializedGraph.startLayersVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputIdsOffset + */ +armnnSerializer.SerializedGraph.addInputIds = function(builder, inputIdsOffset) { + builder.addFieldOffset(1, inputIdsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SerializedGraph.createInputIdsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.SerializedGraph.startInputIdsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputIdsOffset + */ +armnnSerializer.SerializedGraph.addOutputIds = function(builder, outputIdsOffset) { + builder.addFieldOffset(2, outputIdsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SerializedGraph.createOutputIdsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +armnnSerializer.SerializedGraph.startOutputIdsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} featureVersionsOffset + */ +armnnSerializer.SerializedGraph.addFeatureVersions = function(builder, featureVersionsOffset) { + builder.addFieldOffset(3, featureVersionsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SerializedGraph.endSerializedGraph = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} offset + */ +armnnSerializer.SerializedGraph.finishSerializedGraphBuffer = function(builder, offset) { + builder.finish(offset, 'ARMN'); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} offset + */ +armnnSerializer.SerializedGraph.finishSizePrefixedSerializedGraphBuffer = function(builder, offset) { + builder.finish(offset, 'ARMN', true); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} layersOffset + * @param {flatbuffers.Offset} inputIdsOffset + * @param {flatbuffers.Offset} outputIdsOffset + * @param {flatbuffers.Offset} featureVersionsOffset + * @returns {flatbuffers.Offset} + */ +armnnSerializer.SerializedGraph.createSerializedGraph = function(builder, layersOffset, inputIdsOffset, outputIdsOffset, featureVersionsOffset) { + armnnSerializer.SerializedGraph.startSerializedGraph(builder); + armnnSerializer.SerializedGraph.addLayers(builder, layersOffset); + armnnSerializer.SerializedGraph.addInputIds(builder, inputIdsOffset); + armnnSerializer.SerializedGraph.addOutputIds(builder, outputIdsOffset); + armnnSerializer.SerializedGraph.addFeatureVersions(builder, featureVersionsOffset); + return armnnSerializer.SerializedGraph.endSerializedGraph(builder); +} + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports = { armnn_schema: armnnSerializer }; +} diff --git a/frontend/packages/core/public/netron/armnn.js b/frontend/packages/core/public/netron/armnn.js new file mode 100644 index 00000000..9d4ffd8b --- /dev/null +++ b/frontend/packages/core/public/netron/armnn.js @@ -0,0 +1,617 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var armnn = armnn || {}; +var base = base || require('./base'); +var flatbuffers = flatbuffers || require('flatbuffers').flatbuffers; +var long = long || { Long: require('long') }; + +armnn.ModelFactory = class { + + match(context) { + const extension = context.identifier.split('.').pop().toLowerCase(); + if (extension == 'armnn') { + return true; + } + return false; + } + + open(context, host) { + return host.require('./armnn-schema').then((schema) => { + const identifier = context.identifier; + let model = null; + try { + const buffer = context.buffer; + const byteBuffer = new flatbuffers.ByteBuffer(buffer); + armnn.schema = schema.armnn_schema; + model = armnn.schema.SerializedGraph.getRootAsSerializedGraph(byteBuffer); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new armnn.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + + return armnn.Metadata.open(host).then((metadata) => { + try { + return new armnn.Model(model, metadata); + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new new armnn.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + }); + } +}; + +armnn.Model = class { + + constructor(model, metadata) { + this._graphs = []; + this._graphs.push(new armnn.Graph(model, metadata)); + } + + get format() { + return 'Arm NN'; + } + + get description() { + return ''; + } + + get graphs() { + return this._graphs; + } +}; + +armnn.Graph = class { + + constructor(graph, metadata) { + this._name = ''; + this._nodes = []; + this._inputs = []; + this._outputs = []; + + // generate parameters + const args = {}; + for (let j = 0; j < graph.layersLength(); j++) { + let base = armnn.Node.getBase(graph.layers(j)); + for (let i = 0 ; i < base.outputSlotsLength() ; i++) { + const key = base.index().toString() + ':' + i.toString(); + args[key] = new armnn.Argument(key, base.outputSlots(i).tensorInfo(), null); + } + } + for (let j = 0; j < graph.layersLength(); j++) { + this._nodes.push(new armnn.Node(graph.layers(j), args, metadata)); + } + for (let k = 0; k < graph.inputIdsLength(); k++) { + // need to do something? + } + for (let l = 0; l < graph.outputIdsLength(); l++) { + // need to do something? + } + } + + get name() { + return this._name; + } + + get groups() { + return false; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +armnn.Node = class { + + constructor(layer, args, metadata) { + this._metadata = metadata; + this._type = armnn.schema.LayerName[layer.layerType()]; + + this._name = ''; + this._outputs = []; + this._inputs = []; + this._attributes = []; + + const base = armnn.Node.getBase(layer); + if (base) { + this._name = base.layerName(); + + for (let i = 0; i < base.inputSlotsLength(); i++) { + const connection = base.inputSlots(i).connection(); + const sourceLayerIndex = connection.sourceLayerIndex(); + const sourceOutputIndex = connection.outputSlotIndex(); + const argument = args[sourceLayerIndex.toString() + ':' + sourceOutputIndex.toString()]; + this._inputs.push(new armnn.Parameter('input', [ argument ])); + } + + for (let j = 0; j < base.outputSlotsLength(); j++) { + const argument = args[base.index().toString() + ':' + j.toString()]; + this._outputs.push(new armnn.Parameter('output', [ argument ])); + } + } + + const schema = this._metadata.type(this._type); + if (schema) { + const _layer = armnn.Node.castLayer(layer); + + if (schema.bindings) { + for (let i = 0 ; i < schema.bindings.length ; i++) { + const binding = schema.bindings[i]; + const value = _layer.base()[binding.src](); + this._attributes.push(new armnn.Attribute(binding.name, binding.type, value)); + } + } + if (schema.attributes) { + for (const attribute of schema.attributes) { + const value = this.packAttr(_layer, attribute); + this._attributes.push(new armnn.Attribute(attribute.name, attribute.type, value)); + } + } + if (schema.inputs) { + for (let i = 0 ; i < schema.inputs.length ; i++) { + const input = schema.inputs[i]; + const initializer = _layer[input.src](); + if (initializer) { + const args = [ new armnn.Argument('', null, initializer) ]; + this._inputs.push(new armnn.Parameter(input.name, args)); + } + } + } + } + } + + get type() { + return this._type.replace(/Layer$/, ''); + } + + get name() { + return this._name; + } + + get domain() { + return null; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get group() { + return null; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } + + static castLayer(layer) { + let layerType = layer.layerType(); + for (const k of Object.keys(armnn.schema.Layer)) { + if (layerType == armnn.schema.Layer[k]) { + return layer.layer(new armnn.schema[k]); + } + } + return null; + } + + static getBase(layer) { + layer = armnn.Node.castLayer(layer); + return (layer.base().base)? layer.base().base() : layer.base(); + } + + getAttr(descriptor, key) { + if (typeof descriptor[key] == "undefined") + return "undefined"; + + const values = descriptor[key](); + if (Array.isArray(values)) { + return values.join(", "); + } + else { + return values; + } + } + + packAttr(layer, attr) { + const descriptor = layer === null ? null : layer.descriptor(); + const key = attr.src; + const type = attr.src_type; + + if (typeof type != "undefined") { + let value = this.getAttr(descriptor, key); + if (typeof armnn.schema[type + "Name"] != "undefined") { + return armnn.schema[type + "Name"][value]; + } + else { + return value; + } + } + else if (Array.isArray(key)) { + let values = []; + for (let i = 0 ; i < key.length ; i++) { + values.push(this.getAttr(descriptor, key[i])); + } + return values.join(", "); + } + else { + return this.getAttr(descriptor, key); + } + } + + static makeKey(layer_id, index) { + return layer_id.toString() + "_" + index.toString(); + } +}; + +armnn.Attribute = class { + + constructor(name, type, value) { + this._name = name; + this._value = value; + this._visible = true; + switch (type) { + case 'int': this._type = 'int32'; break; + case 'uint': this._type = 'uint32'; break; + case 'float': this._type = 'float32'; break; + case 'string': this._type = 'string'; break; + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +armnn.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +armnn.Argument = class { + + constructor(name, tensorInfo, initializer) { + if (typeof name !== 'string') { + throw new armnn.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + const info = initializer ? initializer.info() : tensorInfo; + this._name = name; + this._type = new armnn.TensorType(info); + this._initializer = initializer ? new armnn.Tensor(info, initializer) : null; + + if (this._type.dataType.startsWith('q') && info) { + this._scale = info.quantizationScale(); + this._zeroPoint = info.quantizationOffset(); + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get quantization() { + if (this._scale !== undefined && this._zeroPoint !== undefined) { + return this._scale.toString() + ' * ' + (this._zeroPoint == 0 ? 'q' : ('(q - ' + this._zeroPoint.toString() + ')')); + } + return undefined; + } + + get initializer() { + return this._initializer; + } +}; + +armnn.Tensor = class { + + constructor(tensorInfo, tensor) { + this._name = ''; + this._type = new armnn.TensorType(tensorInfo); + this._kind = 'Initializer'; + + let data = null; + if (tensor.dataType() == armnn.schema.ConstTensorData.ByteData) + data = tensor.data(new armnn.schema.ByteData); + else if (tensor.dataType() == armnn.schema.ConstTensorData.ShortData) + data = tensor.data(new armnn.schema.ShortData); + else if (tensor.dataType() == armnn.schema.ConstTensorData.IntData) + data = tensor.data(new armnn.schema.IntData); + else if (tensor.dataType() == armnn.schema.ConstTensorData.LongData) + data = tensor.data(new armnn.schema.LongData); + + this._data = data.dataLength() > 0 ? data.dataArray() : null; + } + + get name() { + return this._name; + } + + get kind() { + return this._kind; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state; + } + + get value() { + let context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + let context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + let value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + let context = {}; + context.state = null; + context.index = 0; + context.count = 0; + + if (this._data == null) { + context.state = 'Tensor data is empty.'; + return context; + } + + context.dataType = this._type.dataType; + context.shape = this._type.shape.dimensions; + context.data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + + return context; + } + + _decode(context, dimension) { + let shape = context.shape; + if (shape.length == 0) { + shape = [ 1 ]; + } + let size = shape[dimension]; + let results = []; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (context.dataType) { + case 'float16': + results.push(context.data.getFloat16(context.index, true)); + context.index += 2; + context.count++; + break; + case 'float32': + results.push(context.data.getFloat32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'quint8': + results.push(context.data.getUint8(context.index)); + context.index += 1; + context.count++; + break; + case 'qint16': + results.push(context.data.getInt16(context.index, true)); + context.index += 2; + context.count++; + break; + case 'int32': + results.push(context.data.getInt32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'boolean': + results.push(context.data.getInt8(context.index)); + context.index += 1; + context.count++; + break; + default: + break; + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + if (context.shape.length == 0) { + return results[0]; + } + return results; + } +}; + +armnn.TensorType = class { + + constructor(tensorInfo) { + + const dataType = tensorInfo.dataType(); + switch (dataType) { + case 0: this._dataType = 'float16'; break; + case 1: this._dataType = 'float32'; break; + case 2: this._dataType = 'quint8'; break; // QuantisedAsymm8 + case 3: this._dataType = 'int32'; break; + case 4: this._dataType = 'boolean'; break; + case 5: this._dataType = 'qint16'; break; // QuantisedSymm16 + case 6: this._dataType = 'quint8'; break; // QAsymmU8 + case 7: this._dataType = 'qint16'; break; // QSymmS16 + default: throw new armnn.Error("Unknown data type '" + dataType + "'."); + } + + let dimensions = []; + let dimensionsLength = tensorInfo.dimensionsLength(); + if (dimensionsLength > 0) { + for (let i = 0; i < dimensionsLength; i++) { + dimensions.push(tensorInfo.dimensions(i)); + } + } + this._shape = new armnn.TensorShape(dimensions); + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +armnn.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']'; + } +}; + +armnn.Metadata = class { + + static open(host) { + if (armnn.Metadata._metadata) { + return Promise.resolve(armnn.Metadata._metadata); + } + return host.request(null, 'armnn-metadata.json', 'utf-8').then((data) => { + armnn.Metadata._metadata = new armnn.Metadata(data); + return armnn.Metadata._metadata; + }).catch(() => { + armnn.Metadata._metadata = new armnn.Metadata(null); + return armnn.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + if (data) { + let items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map[item.name] = item.schema; + } + } + } + } + } + + type(name) { + return this._map[name]; + } + + attribute(type, name) { + const schema = this.type(type); + if (schema) { + let attributeMap = schema.attributeMap; + if (!attributeMap) { + attributeMap = {}; + if (schema.attributes) { + for (const attribute of schema.attributes) { + attributeMap[attribute.name] = attribute; + } + } + schema.attributeMap = attributeMap; + } + let attributeSchema = attributeMap[name]; + if (attributeSchema) { + return attributeSchema; + } + } + return null; + } +}; + +armnn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Arm NN model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = armnn.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/barracuda.js b/frontend/packages/core/public/netron/barracuda.js new file mode 100755 index 00000000..a9b7f731 --- /dev/null +++ b/frontend/packages/core/public/netron/barracuda.js @@ -0,0 +1,650 @@ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental + +var barracuda = barracuda || {}; +var base = base || require('./base'); +var long = long || { Long: require('long') }; + +barracuda.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'nn') { + const buffer = context.buffer; + if (buffer.length > 12 && buffer[0] <= 0x10 && buffer.subarray(1, 8).every((v) => v == 0x00)) { + return true; + } + } + return false; + } + + open(context /*, host */) { + return barracuda.Metadata.open().then((metadata) => { + try { + const nn = new barracuda.NNModel(context.buffer); + return new barracuda.Model(metadata, nn); + } + catch (error) { + const identifier = context.identifier.toLowerCase(); + const message = error && error.message ? error.message : error.toString(); + throw new barracuda.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + } +}; + +barracuda.Model = class { + + constructor(metadata, nn) { + this._version = nn.version.toString(); + this._graphs = [ new barracuda.Graph(metadata, nn) ]; + } + + get format() { + return "Barracuda v" + this._version; + } + + get graphs() { + return this._graphs; + } +}; + +barracuda.Graph = class { + + constructor(metadata, nn) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + for (const input of nn.inputs) { + this._inputs.push(new barracuda.Parameter(input.name, [ + new barracuda.Argument(input.name, new barracuda.TensorType(4, new barracuda.TensorShape(input.shape))) + ])); + } + for (const output of nn.outputs) { + this._outputs.push(new barracuda.Parameter(output, [ + new barracuda.Argument(output) + ])); + } + const layers = []; + const initializers = new Map(); + for (const layer of nn.layers) { + if (layer.type !== 255 || layer.inputs.length > 0) { + layers.push(layer); + } + else { + for (const tensor of layer.tensors) { + initializers.set(tensor.name, new barracuda.Tensor(tensor)); + } + } + } + + for (const layer of layers) { + this._nodes.push(new barracuda.Node(metadata, layer, initializers)); + } + } + + get name() { + return ''; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +barracuda.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +barracuda.Argument = class { + + constructor(name, type, initializer) { + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + + +barracuda.Node = class { + + constructor(metadata, layer, initializers) { + + this._name = layer.name; + this._metadata = metadata.type(layer.type) || { name: layer.type.toString() }; + this._type = this._metadata.name; + + this._inputs = []; + this._outputs = []; + this._attributes = []; + const inputs = Array.prototype.slice.call(this._metadata.inputs || [ 'input' ]); + if (this._metadata.inputs && this._metadata.inputs.length === 1 && this._metadata.inputs[0] === 'inputs') { + this._inputs.push(new barracuda.Parameter('inputs', layer.inputs.map((input) => { + const initializer = initializers.has(input) ? initializers.get(input) : null; + return new barracuda.Argument(input, initializer ? initializer.type : null, initializer); + }))); + } + else { + for (let i = 0; i < layer.inputs.length; i++) { + const input = layer.inputs[i]; + const initializer = initializers.has(input) ? initializers.get(input) : null; + this._inputs.push(new barracuda.Parameter(inputs.length > 0 ? inputs.shift() : i.toString(), [ + new barracuda.Argument(input, initializer ? initializer.type : null, initializer) + ])); + } + } + for (let i = 0; i < layer.tensors.length; i++) { + const tensor = layer.tensors[i]; + const initializer = new barracuda.Tensor(tensor); + this._inputs.push(new barracuda.Parameter(inputs.length > 0 ? inputs.shift() : i.toString(), [ + new barracuda.Argument(tensor.name, initializer.type, initializer) + ])); + } + this._outputs.push(new barracuda.Parameter('output', [ + new barracuda.Argument(this._name) + ])); + if (this._type === 'Activation') { + if (!barracuda.Activation[layer.activation]) { + throw new barracuda.Error("Unknown activation '" + layer.activation + "'."); + } + this._type = barracuda.Activation[layer.activation]; + } + else if (layer.activation !== 0) { + throw new barracuda.Error("Unsupported activation '" + layer.activation + "' for type '" + this._type + "'."); + } + const attribute = (name, type, value, defaultValue) => { + if (Array.isArray(defaultValue) && Array.isArray(value) && value.length == defaultValue.length && value.every((v, i) => v === defaultValue[i])) { + return; + } + if (typeof defaultValue == 'function' && defaultValue(value)) { + return; + } + if (defaultValue === value) { + return; + } + this._attributes.push(new barracuda.Attribute(name, type, value)); + }; + attribute('strides', 'int32[]', layer.strides, []); + attribute('pads', 'int32[]', layer.pads, (value) => Array.isArray(value) && (value.every((v) => v === 0) || value.every((v) => v === -1))); + attribute('size', 'int32[]', layer.pool_size, []); + attribute('alpha', 'float32', layer.alpha, 1); + attribute('beta', 'float32', layer.beta, 0); + attribute('axis', 'int32', layer.axis, -1); + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get metadata() { + return this._metadata; + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } +}; + +barracuda.Attribute = class { + + constructor(name, type, value) { + this._name = name; + this._type = type; + this._value = value; + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return true; + } +}; + +barracuda.Tensor = class { + + constructor(tensor) { + this._type = new barracuda.TensorType(tensor.itemsize, new barracuda.TensorShape(tensor.shape)); + this._data = tensor.data; + } + + get kind() { + return ''; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state || null; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + const context = {}; + context.index = 0; + context.count = 0; + context.state = null; + + if (this._type.dataType == '?') { + context.state = 'Tensor has unknown data type.'; + return context; + } + if (!this._type.shape || (this._type.shape.dimensions && this._type.shape.dimensions.length == 0)) { + context.state = 'Tensor has no dimensions.'; + return context; + } + + if (!this._data) { + context.state = 'Tensor data is empty.'; + return context; + } + + switch (this._type.dataType) { + case 'float32': + context.data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + break; + default: + context.state = 'Tensor data type is not implemented.'; + break; + } + + context.dataType = this._type.dataType; + context.shape = this._type.shape.dimensions; + return context; + } + + _decode(context, dimension) { + const shape = context.shape.length == 0 ? [ 1 ] : context.shape; + const results = []; + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (this._type.dataType) { + case 'float32': + results.push(context.data.getFloat32(context.index, true)); + context.index += 4; + context.count++; + break; + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + if (context.shape.length == 0) { + return results[0]; + } + return results; + } +}; + +barracuda.TensorType = class { + + constructor(itemsize, shape) { + switch (itemsize) { + case 4: this._dataType = 'float32'; break; + default: throw new barracuda.Error("Unsupported data type size '" + itemsize.toString() + "'."); + } + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +barracuda.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return this._dimensions ? ('[' + this._dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',') + ']') : ''; + } +}; + +barracuda.NNModel = class { + + constructor(buffer) { + + // https://github.com/Unity-Technologies/ml-agents/blob/master/ml-agents/mlagents/trainers/barracuda.py + // https://github.com/Unity-Technologies/ml-agents/blob/master/ml-agents/mlagents/trainers/tensorflow_to_barracuda.py + + const reader = new barracuda.BinaryReader(buffer); + this._version = reader.int32(); + reader.int32(); + + this._inputs = []; + const modelInputsLength = reader.int32(); + for (let i = 0; i < modelInputsLength; i++) { + this._inputs.push({ + name: reader.string(), + shape: reader.shape() + }); + } + this._outputs = reader.strings(); + + this._memories = []; + const memoriesLength = reader.int32(); + for (let i = 0; i < memoriesLength; i++) { + // debugger; + this._memories.push({ + shape: reader.shape(), + in: reader.string(), + out: reader.string() + }); + } + + this._layers = []; + const layersLength = reader.int32(); + for (let i = 0; i < layersLength; i++) { + const layer = {}; + layer.name = reader.string(); + layer.type = reader.int32(); + layer.activation = reader.int32(); + reader.int32(); + reader.int32(); + layer.pads = reader.int32s(); + layer.strides = reader.int32s(); + layer.pool_size = reader.int32s(); + layer.axis = reader.int32(); + layer.alpha = reader.float32(); + layer.beta = reader.float32(); + reader.int32(); + layer.inputs = reader.strings(); + layer.tensors = []; + const tensorsLength = reader.int32(); + for (let j = 0; j < tensorsLength; j++) { + layer.tensors.push({ + name: reader.string(), + shape: reader.shape(), + offset: reader.int64(), + itemsize: reader.int32(), + length: reader.int32() + }); + } + this._layers.push(layer); + } + for (const layer of this._layers) { + for (const tensor of layer.tensors) { + tensor.data = reader.bytes(tensor.offset * tensor.itemsize, tensor.length * tensor.itemsize); + } + } + } + + get version() { + return this._version; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get memories() { + return this._memories; + } + + get layers() { + return this._layers; + } +}; + +barracuda.Activation = { + 0: "Linear", 1: "Relu", 2: "Softmax", 3: "Tanh", 4: "Sigmoid", 5: "Elu", 6: "Relu6", 7: "LeakyRelu", + 8: "Selu", 9: "Swish", 10: "LogSoftmax", 11: "Softplus", 12: "Softsign", + 100: "Abs", 101: "Neg", 102: "Ceil", 104: "Floor", 111: "Sqrt", 113: "Exp", 114: "Log", + 200: "Acos", 201: "Acosh", 202: "Asin", 203: "Asinh", 204: "Atan", 205: "Atanh", 206: "Cos", 207: "Cosh", 208: "Sin", 209: "Sinh", 210: "Tan" +}; + +barracuda.BinaryReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._dataView = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._position = 0; + } + + skip(offset) { + this._position += offset; + if (this._position > this._buffer.length) { + throw new barracuda.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + bytes(offset, length) { + const start = this._position + offset; + const end = start + length; + if (end > this._buffer.length) { + throw new barracuda.Error('Expected ' + (end - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + return this._buffer.slice(start, end); + } + + int32() { + const position = this._position; + this.skip(4); + return this._dataView.getInt32(position, true); + } + + int32s() { + const values = []; + const count = this.int32(); + for (let i = 0; i < count; i++) { + values.push(this.int32()); + } + return values; + } + + int64() { + const value = this.int32(); + if (this.int32() !== 0) { + throw new barracuda.Error('Invalid int64 value.'); + } + return value; + } + + float32() { + const position = this._position; + this.skip(4); + return this._dataView.getFloat32(position, true); + } + + string() { + let text = ''; + const size = this.int32(); + let position = this._position; + this.skip(size); + for (let i = 0; i < size; i++) { + text += String.fromCharCode(this._buffer[position++]); + } + return text; + } + + strings() { + const values = []; + const length = this.int32(); + for (let i = 0; i < length; i++) { + values.push(this.string()); + } + return values; + } + + shape() { + return this.int32s(); + } +}; + +barracuda.Metadata = class { + + static open() { + barracuda.Metadata._metadata = barracuda.Metadata._metadata || new barracuda.Metadata(); + return Promise.resolve(barracuda.Metadata._metadata); + } + + constructor() { + this._map = new Map(); + this._register(0, 'Nop', ''); + this._register(1, 'Dense', 'Layer', [ 'input', 'kernel', 'bias' ]); + this._register(2, 'MatMul', '', [ 'input', 'kernel', 'bias' ]); + this._register(20, 'Conv2D', 'Layer', [ 'input', 'kernel', 'bias' ]); + this._register(21, 'DepthwiseConv2dNative', 'Layer', [ 'input', 'kernel', 'bias' ]); + this._register(22, 'Conv2DBackpropInput', ''); + this._register(23, 'Upsample2D', ''); + this._register(25, 'MaxPool', 'Pool'); + this._register(26, 'AvgPool', 'Pool'); + this._register(28, 'GlobalAvgPool', 'Pool'); + this._register(29, 'Pad', ''); + this._register(50, 'Activation', 'Activation'); + this._register(51, 'ScaleBias', 'Normalization', [ 'input', 'scale', 'bias' ]); + this._register(52, 'InstanceNormalization', 'Normalization'); + this._register(53, 'LRN', 'Normalization'); + this._register(64, 'RandomStandardNormal', ''); + this._register(65, 'RandomUniform', ''); + this._register(67, 'OneHot', ''); + this._register(100, 'Add', '', [ 'inputs' ]); + this._register(101, 'Sub', '', [ 'inputs' ]); + this._register(102, 'Mul', '', [ 'inputs' ]); + this._register(103, 'RealDiv', '', [ 'inputs' ]); + this._register(104, 'Pow', '', [ 'inputs' ]); + this._register(110, 'Minimum', '', [ 'inputs' ]); + this._register(111, 'Maximum', '', [ 'inputs' ]); + this._register(124, 'Max', '', [ 'inputs' ]); + this._register(125, 'Mean', '', [ 'inputs' ]); + this._register(126, 'Min', '', [ 'inputs' ]); + this._register(127, 'Prod', '', [ 'inputs' ]); + this._register(128, 'Sum', '', [ 'inputs' ]); + this._register(200, 'Flatten', 'Shape'); + this._register(201, 'Reshape', 'Shape'); + this._register(210, 'Concat', 'Tensor', [ 'inputs' ]); + this._register(211, 'StridedSlice', 'Shape'); + } + + _register(id, name, category, inputs) { + this._map.set(id, { name: name, category: category, inputs: inputs }); + } + + type(name) { + if (this._map.has(name)) { + return this._map.get(name); + } + return null; + } +}; + +barracuda.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Barracuda model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = barracuda.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/base.js b/frontend/packages/core/public/netron/base.js new file mode 100644 index 00000000..fa39c6f5 --- /dev/null +++ b/frontend/packages/core/public/netron/base.js @@ -0,0 +1,92 @@ +/* jshint esversion: 6 */ + +var base = base || {}; + +if (typeof window !== 'undefined' && typeof window.Long != 'undefined') { + window.long = { Long: window.Long }; +} + +if (!DataView.prototype.getFloat16) { + DataView.prototype.getFloat16 = function(byteOffset, littleEndian) { + const value = this.getUint16(byteOffset, littleEndian); + const e = (value & 0x7C00) >> 10; + let f = value & 0x03FF; + if (e == 0) { + f = 0.00006103515625 * (f / 1024); + } + else if (e == 0x1F) { + f = f ? NaN : Infinity; + } + else { + f = DataView.__float16_pow[e] * (1 + (f / 1024)); + } + return value & 0x8000 ? -f : f; + }; + DataView.__float16_pow = { + 1: 1/16384, 2: 1/8192, 3: 1/4096, 4: 1/2048, 5: 1/1024, 6: 1/512, 7: 1/256, 8: 1/128, + 9: 1/64, 10: 1/32, 11: 1/16, 12: 1/8, 13: 1/4, 14: 1/2, 15: 1, 16: 2, + 17: 4, 18: 8, 19: 16, 20: 32, 21: 64, 22: 128, 23: 256, 24: 512, + 25: 1024, 26: 2048, 27: 4096, 28: 8192, 29: 16384, 30: 32768, 31: 65536 + }; +} + +if (!DataView.prototype.setFloat16) { + DataView.prototype.setFloat16 = function(byteOffset, value, littleEndian) { + DataView.__float16_float[0] = value; + value = DataView.__float16_int[0]; + const s = (value >>> 16) & 0x8000; + const e = (value >>> 23) & 0xff; + const f = value & 0x7fffff; + const v = s | DataView.__float16_base[e] | (f >> DataView.__float16_shift[e]); + this.setUint16(byteOffset, v, littleEndian); + }; + DataView.__float16_float = new Float32Array(1); + DataView.__float16_int = new Uint32Array(DataView.__float16_float.buffer, 0, DataView.__float16_float.length); + DataView.__float16_base = new Uint32Array(256); + DataView.__float16_shift = new Uint32Array(256); + for (let i = 0; i < 256; ++i) { + let e = i - 127; + if (e < -27) { + DataView.__float16_base[i] = 0x0000; + DataView.__float16_shift[i] = 24; + } + else if (e < -14) { + DataView.__float16_base[i] = 0x0400 >> -e - 14; + DataView.__float16_shift[i] = -e - 1; + } + else if (e <= 15) { + DataView.__float16_base[i] = e + 15 << 10; + DataView.__float16_shift[i] = 13; + } + else if (e < 128) { + DataView.__float16_base[i] = 0x7c00; + DataView.__float16_shift[i] = 24; + } + else { + DataView.__float16_base[i] = 0x7c00; + DataView.__float16_shift[i] = 13; + } + } +} + +if (!DataView.prototype.getBits) { + DataView.prototype.getBits = function(offset, bits /*, signed */) { + offset = offset * bits; + const available = (this.byteLength << 3) - offset; + if (bits > available) { + throw new RangeError(); + } + let value = 0; + let index = 0; + while (index < bits) { + const remainder = offset & 7; + const size = Math.min(bits - index, 8 - remainder); + value <<= size; + value |= (this.getUint8(offset >> 3) >> (8 - size - remainder)) & ~(0xff << size); + offset += size; + index += size; + } + return value; + }; +} + diff --git a/frontend/packages/core/public/netron/bigdl-metadata.json b/frontend/packages/core/public/netron/bigdl-metadata.json new file mode 100644 index 00000000..8ea23a83 --- /dev/null +++ b/frontend/packages/core/public/netron/bigdl-metadata.json @@ -0,0 +1,110 @@ +[ + { + "name": "Dropout", + "schema": { + "category": "Dropout" + } + }, + { + "name": "Linear", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "inputs" }, + { "name": "weight" }, + { "name": "bias" } + ] + } + }, + { + "name": "NormalizeScale", + "schema": { + "category": "Normalization", + "inputs": [ + { "name": "inputs" }, + { "name": "w" } + ] + } + }, + { + "name": "ReLU", + "schema": { + "category": "Activation" + } + }, + { + "name": "Scale", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "inputs" }, + { "name": "weight" }, + { "name": "bias" } + ] + } + }, + { + "name": "SoftMax", + "schema": { + "category": "Activation" + } + }, + { + "name": "SpatialBatchNormalization", + "schema": { + "category": "Normalization" + } + }, + { + "name": "SpatialConvolution", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "inputs" }, + { "name": "weight" }, + { "name": "bias" } + ] + } + }, + { + "name": "SpatialCrossMapLRN", + "schema": { + "category": "Normalization" + } + }, + { + "name": "SpatialDilatedConvolution", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "inputs" }, + { "name": "weight" }, + { "name": "bias" } + ] + } + }, + { + "name": "SpatialAveragePooling", + "schema": { + "category": "Pool" + } + }, + { + "name": "SpatialMaxPooling", + "schema": { + "category": "Pool" + } + }, + { + "name": "Transpose", + "schema": { + "category": "Shape" + } + }, + { + "name": "InferReshape", + "schema": { + "category": "Shape" + } + } +] \ No newline at end of file diff --git a/frontend/packages/core/public/netron/bigdl-proto.js b/frontend/packages/core/public/netron/bigdl-proto.js new file mode 100644 index 00000000..5abe7e38 --- /dev/null +++ b/frontend/packages/core/public/netron/bigdl-proto.js @@ -0,0 +1,986 @@ +/*eslint-disable block-scoped-var, id-length, no-control-regex, no-magic-numbers, no-prototype-builtins, no-redeclare, no-shadow, no-var, sort-vars*/ +(function($protobuf) { + "use strict"; + + var $Reader = $protobuf.Reader, $util = $protobuf.util; + + var $root = $protobuf.roots.bigdl || ($protobuf.roots.bigdl = {}); + + $root.com = (function() { + + var com = {}; + + com.intel = (function() { + + var intel = {}; + + intel.analytics = (function() { + + var analytics = {}; + + analytics.bigdl = (function() { + + var bigdl = {}; + + bigdl.serialization = (function() { + + var serialization = {}; + + serialization.BigDLModule = (function() { + + function BigDLModule(properties) { + this.subModules = []; + this.preModules = []; + this.nextModules = []; + this.attr = {}; + this.parameters = []; + this.inputScales = []; + this.outputScales = []; + this.weightScales = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BigDLModule.prototype.name = ""; + BigDLModule.prototype.subModules = $util.emptyArray; + BigDLModule.prototype.weight = null; + BigDLModule.prototype.bias = null; + BigDLModule.prototype.preModules = $util.emptyArray; + BigDLModule.prototype.nextModules = $util.emptyArray; + BigDLModule.prototype.moduleType = ""; + BigDLModule.prototype.attr = $util.emptyObject; + BigDLModule.prototype.version = ""; + BigDLModule.prototype.train = false; + BigDLModule.prototype.namePostfix = ""; + BigDLModule.prototype.id = 0; + BigDLModule.prototype.inputShape = null; + BigDLModule.prototype.outputShape = null; + BigDLModule.prototype.hasParameters = false; + BigDLModule.prototype.parameters = $util.emptyArray; + BigDLModule.prototype.isMklInt8Enabled = false; + BigDLModule.prototype.inputDimMasks = 0; + BigDLModule.prototype.inputScales = $util.emptyArray; + BigDLModule.prototype.outputDimMasks = 0; + BigDLModule.prototype.outputScales = $util.emptyArray; + BigDLModule.prototype.weightDimMasks = 0; + BigDLModule.prototype.weightScales = $util.emptyArray; + + BigDLModule.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.com.intel.analytics.bigdl.serialization.BigDLModule(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + if (!(message.subModules && message.subModules.length)) + message.subModules = []; + message.subModules.push($root.com.intel.analytics.bigdl.serialization.BigDLModule.decode(reader, reader.uint32())); + break; + case 3: + message.weight = $root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32()); + break; + case 4: + message.bias = $root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32()); + break; + case 5: + if (!(message.preModules && message.preModules.length)) + message.preModules = []; + message.preModules.push(reader.string()); + break; + case 6: + if (!(message.nextModules && message.nextModules.length)) + message.nextModules = []; + message.nextModules.push(reader.string()); + break; + case 7: + message.moduleType = reader.string(); + break; + case 8: + reader.skip().pos++; + if (message.attr === $util.emptyObject) + message.attr = {}; + key = reader.string(); + reader.pos++; + message.attr[key] = $root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32()); + break; + case 9: + message.version = reader.string(); + break; + case 10: + message.train = reader.bool(); + break; + case 11: + message.namePostfix = reader.string(); + break; + case 12: + message.id = reader.int32(); + break; + case 13: + message.inputShape = $root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32()); + break; + case 14: + message.outputShape = $root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32()); + break; + case 15: + message.hasParameters = reader.bool(); + break; + case 16: + if (!(message.parameters && message.parameters.length)) + message.parameters = []; + message.parameters.push($root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32())); + break; + case 17: + message.isMklInt8Enabled = reader.bool(); + break; + case 18: + message.inputDimMasks = reader.int32(); + break; + case 19: + if (!(message.inputScales && message.inputScales.length)) + message.inputScales = []; + message.inputScales.push($root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32())); + break; + case 20: + message.outputDimMasks = reader.int32(); + break; + case 21: + if (!(message.outputScales && message.outputScales.length)) + message.outputScales = []; + message.outputScales.push($root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32())); + break; + case 22: + message.weightDimMasks = reader.int32(); + break; + case 23: + if (!(message.weightScales && message.weightScales.length)) + message.weightScales = []; + message.weightScales.push($root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BigDLModule; + })(); + + serialization.VarFormat = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "EMPTY_FORMAT"] = 0; + values[valuesById[1] = "DEFAULT"] = 1; + values[valuesById[2] = "ONE_D"] = 2; + values[valuesById[3] = "IN_OUT"] = 3; + values[valuesById[4] = "OUT_IN"] = 4; + values[valuesById[5] = "IN_OUT_KW_KH"] = 5; + values[valuesById[6] = "OUT_IN_KW_KH"] = 6; + values[valuesById[7] = "GP_OUT_IN_KW_KH"] = 7; + values[valuesById[8] = "GP_IN_OUT_KW_KH"] = 8; + values[valuesById[9] = "OUT_IN_KT_KH_KW"] = 9; + return values; + })(); + + serialization.InitMethodType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "EMPTY_INITIALIZATION"] = 0; + values[valuesById[1] = "RANDOM_UNIFORM"] = 1; + values[valuesById[2] = "RANDOM_UNIFORM_PARAM"] = 2; + values[valuesById[3] = "RANDOM_NORMAL"] = 3; + values[valuesById[4] = "ZEROS"] = 4; + values[valuesById[5] = "ONES"] = 5; + values[valuesById[6] = "CONST"] = 6; + values[valuesById[7] = "XAVIER"] = 7; + values[valuesById[8] = "BILINEARFILLER"] = 8; + return values; + })(); + + serialization.RegularizerType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "L1L2Regularizer"] = 0; + values[valuesById[1] = "L1Regularizer"] = 1; + values[valuesById[2] = "L2Regularizer"] = 2; + return values; + })(); + + serialization.InputDataFormat = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "NCHW"] = 0; + values[valuesById[1] = "NHWC"] = 1; + return values; + })(); + + serialization.TensorType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DENSE"] = 0; + values[valuesById[1] = "QUANT"] = 1; + return values; + })(); + + serialization.InitMethod = (function() { + + function InitMethod(properties) { + this.data = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + InitMethod.prototype.methodType = 0; + InitMethod.prototype.data = $util.emptyArray; + + InitMethod.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.com.intel.analytics.bigdl.serialization.InitMethod(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.methodType = reader.int32(); + break; + case 2: + if (!(message.data && message.data.length)) + message.data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.data.push(reader.double()); + } else + message.data.push(reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return InitMethod; + })(); + + serialization.BigDLTensor = (function() { + + function BigDLTensor(properties) { + this.size = []; + this.stride = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BigDLTensor.prototype.datatype = 0; + BigDLTensor.prototype.size = $util.emptyArray; + BigDLTensor.prototype.stride = $util.emptyArray; + BigDLTensor.prototype.offset = 0; + BigDLTensor.prototype.dimension = 0; + BigDLTensor.prototype.nElements = 0; + BigDLTensor.prototype.isScalar = false; + BigDLTensor.prototype.storage = null; + BigDLTensor.prototype.id = 0; + BigDLTensor.prototype.tensorType = 0; + + BigDLTensor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.com.intel.analytics.bigdl.serialization.BigDLTensor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.datatype = reader.int32(); + break; + case 2: + if (!(message.size && message.size.length)) + message.size = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.size.push(reader.int32()); + } else + message.size.push(reader.int32()); + break; + case 3: + if (!(message.stride && message.stride.length)) + message.stride = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.stride.push(reader.int32()); + } else + message.stride.push(reader.int32()); + break; + case 4: + message.offset = reader.int32(); + break; + case 5: + message.dimension = reader.int32(); + break; + case 6: + message.nElements = reader.int32(); + break; + case 7: + message.isScalar = reader.bool(); + break; + case 8: + message.storage = $root.com.intel.analytics.bigdl.serialization.TensorStorage.decode(reader, reader.uint32()); + break; + case 9: + message.id = reader.int32(); + break; + case 10: + message.tensorType = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BigDLTensor; + })(); + + serialization.TensorStorage = (function() { + + function TensorStorage(properties) { + this.float_data = []; + this.double_data = []; + this.bool_data = []; + this.string_data = []; + this.int_data = []; + this.long_data = []; + this.bytes_data = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorStorage.prototype.datatype = 0; + TensorStorage.prototype.float_data = $util.emptyArray; + TensorStorage.prototype.double_data = $util.emptyArray; + TensorStorage.prototype.bool_data = $util.emptyArray; + TensorStorage.prototype.string_data = $util.emptyArray; + TensorStorage.prototype.int_data = $util.emptyArray; + TensorStorage.prototype.long_data = $util.emptyArray; + TensorStorage.prototype.bytes_data = $util.emptyArray; + TensorStorage.prototype.id = 0; + + TensorStorage.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.com.intel.analytics.bigdl.serialization.TensorStorage(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.datatype = reader.int32(); + break; + case 2: + if (!(message.float_data && message.float_data.length)) + message.float_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.float_data.push(reader.float()); + } else + message.float_data.push(reader.float()); + break; + case 3: + if (!(message.double_data && message.double_data.length)) + message.double_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.double_data.push(reader.double()); + } else + message.double_data.push(reader.double()); + break; + case 4: + if (!(message.bool_data && message.bool_data.length)) + message.bool_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.bool_data.push(reader.bool()); + } else + message.bool_data.push(reader.bool()); + break; + case 5: + if (!(message.string_data && message.string_data.length)) + message.string_data = []; + message.string_data.push(reader.string()); + break; + case 6: + if (!(message.int_data && message.int_data.length)) + message.int_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.int_data.push(reader.int32()); + } else + message.int_data.push(reader.int32()); + break; + case 7: + if (!(message.long_data && message.long_data.length)) + message.long_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.long_data.push(reader.int64()); + } else + message.long_data.push(reader.int64()); + break; + case 8: + if (!(message.bytes_data && message.bytes_data.length)) + message.bytes_data = []; + message.bytes_data.push(reader.bytes()); + break; + case 9: + message.id = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return TensorStorage; + })(); + + serialization.Regularizer = (function() { + + function Regularizer(properties) { + this.regularData = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Regularizer.prototype.regularizerType = 0; + Regularizer.prototype.regularData = $util.emptyArray; + + Regularizer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.com.intel.analytics.bigdl.serialization.Regularizer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.regularizerType = reader.int32(); + break; + case 2: + if (!(message.regularData && message.regularData.length)) + message.regularData = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.regularData.push(reader.double()); + } else + message.regularData.push(reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Regularizer; + })(); + + serialization.DataType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "INT32"] = 0; + values[valuesById[1] = "INT64"] = 1; + values[valuesById[2] = "FLOAT"] = 2; + values[valuesById[3] = "DOUBLE"] = 3; + values[valuesById[4] = "STRING"] = 4; + values[valuesById[5] = "BOOL"] = 5; + values[valuesById[6] = "CHAR"] = 6; + values[valuesById[7] = "SHORT"] = 7; + values[valuesById[8] = "BYTES"] = 8; + values[valuesById[9] = "REGULARIZER"] = 9; + values[valuesById[10] = "TENSOR"] = 10; + values[valuesById[11] = "VARIABLE_FORMAT"] = 11; + values[valuesById[12] = "INITMETHOD"] = 12; + values[valuesById[13] = "MODULE"] = 13; + values[valuesById[14] = "NAME_ATTR_LIST"] = 14; + values[valuesById[15] = "ARRAY_VALUE"] = 15; + values[valuesById[16] = "DATA_FORMAT"] = 16; + values[valuesById[17] = "CUSTOM"] = 17; + values[valuesById[18] = "SHAPE"] = 18; + return values; + })(); + + serialization.AttrValue = (function() { + + function AttrValue(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AttrValue.prototype.dataType = 0; + AttrValue.prototype.subType = ""; + AttrValue.prototype.int32Value = 0; + AttrValue.prototype.int64Value = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + AttrValue.prototype.floatValue = 0; + AttrValue.prototype.doubleValue = 0; + AttrValue.prototype.stringValue = ""; + AttrValue.prototype.boolValue = false; + AttrValue.prototype.regularizerValue = null; + AttrValue.prototype.tensorValue = null; + AttrValue.prototype.variableFormatValue = 0; + AttrValue.prototype.initMethodValue = null; + AttrValue.prototype.bigDLModuleValue = null; + AttrValue.prototype.nameAttrListValue = null; + AttrValue.prototype.arrayValue = null; + AttrValue.prototype.dataFormatValue = 0; + AttrValue.prototype.customValue = null; + AttrValue.prototype.shape = null; + + var $oneOfFields; + + Object.defineProperty(AttrValue.prototype, "value", { + get: $util.oneOfGetter($oneOfFields = ["int32Value", "int64Value", "floatValue", "doubleValue", "stringValue", "boolValue", "regularizerValue", "tensorValue", "variableFormatValue", "initMethodValue", "bigDLModuleValue", "nameAttrListValue", "arrayValue", "dataFormatValue", "customValue", "shape"]), + set: $util.oneOfSetter($oneOfFields) + }); + + AttrValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.com.intel.analytics.bigdl.serialization.AttrValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dataType = reader.int32(); + break; + case 2: + message.subType = reader.string(); + break; + case 3: + message.int32Value = reader.int32(); + break; + case 4: + message.int64Value = reader.int64(); + break; + case 5: + message.floatValue = reader.float(); + break; + case 6: + message.doubleValue = reader.double(); + break; + case 7: + message.stringValue = reader.string(); + break; + case 8: + message.boolValue = reader.bool(); + break; + case 9: + message.regularizerValue = $root.com.intel.analytics.bigdl.serialization.Regularizer.decode(reader, reader.uint32()); + break; + case 10: + message.tensorValue = $root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32()); + break; + case 11: + message.variableFormatValue = reader.int32(); + break; + case 12: + message.initMethodValue = $root.com.intel.analytics.bigdl.serialization.InitMethod.decode(reader, reader.uint32()); + break; + case 13: + message.bigDLModuleValue = $root.com.intel.analytics.bigdl.serialization.BigDLModule.decode(reader, reader.uint32()); + break; + case 14: + message.nameAttrListValue = $root.com.intel.analytics.bigdl.serialization.NameAttrList.decode(reader, reader.uint32()); + break; + case 15: + message.arrayValue = $root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue.decode(reader, reader.uint32()); + break; + case 16: + message.dataFormatValue = reader.int32(); + break; + case 17: + message.customValue = $root.google.protobuf.Any.decode(reader, reader.uint32()); + break; + case 18: + message.shape = $root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + AttrValue.ArrayValue = (function() { + + function ArrayValue(properties) { + this.i32 = []; + this.i64 = []; + this.flt = []; + this.dbl = []; + this.str = []; + this.boolean = []; + this.Regularizer = []; + this.tensor = []; + this.variableFormat = []; + this.initMethod = []; + this.bigDLModule = []; + this.nameAttrList = []; + this.dataFormat = []; + this.custom = []; + this.shape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ArrayValue.prototype.size = 0; + ArrayValue.prototype.datatype = 0; + ArrayValue.prototype.i32 = $util.emptyArray; + ArrayValue.prototype.i64 = $util.emptyArray; + ArrayValue.prototype.flt = $util.emptyArray; + ArrayValue.prototype.dbl = $util.emptyArray; + ArrayValue.prototype.str = $util.emptyArray; + ArrayValue.prototype.boolean = $util.emptyArray; + ArrayValue.prototype.Regularizer = $util.emptyArray; + ArrayValue.prototype.tensor = $util.emptyArray; + ArrayValue.prototype.variableFormat = $util.emptyArray; + ArrayValue.prototype.initMethod = $util.emptyArray; + ArrayValue.prototype.bigDLModule = $util.emptyArray; + ArrayValue.prototype.nameAttrList = $util.emptyArray; + ArrayValue.prototype.dataFormat = $util.emptyArray; + ArrayValue.prototype.custom = $util.emptyArray; + ArrayValue.prototype.shape = $util.emptyArray; + + ArrayValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = reader.int32(); + break; + case 2: + message.datatype = reader.int32(); + break; + case 3: + if (!(message.i32 && message.i32.length)) + message.i32 = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.i32.push(reader.int32()); + } else + message.i32.push(reader.int32()); + break; + case 4: + if (!(message.i64 && message.i64.length)) + message.i64 = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.i64.push(reader.int64()); + } else + message.i64.push(reader.int64()); + break; + case 5: + if (!(message.flt && message.flt.length)) + message.flt = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.flt.push(reader.float()); + } else + message.flt.push(reader.float()); + break; + case 6: + if (!(message.dbl && message.dbl.length)) + message.dbl = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dbl.push(reader.double()); + } else + message.dbl.push(reader.double()); + break; + case 7: + if (!(message.str && message.str.length)) + message.str = []; + message.str.push(reader.string()); + break; + case 8: + if (!(message.boolean && message.boolean.length)) + message.boolean = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.boolean.push(reader.bool()); + } else + message.boolean.push(reader.bool()); + break; + case 9: + if (!(message.Regularizer && message.Regularizer.length)) + message.Regularizer = []; + message.Regularizer.push($root.com.intel.analytics.bigdl.serialization.Regularizer.decode(reader, reader.uint32())); + break; + case 10: + if (!(message.tensor && message.tensor.length)) + message.tensor = []; + message.tensor.push($root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32())); + break; + case 11: + if (!(message.variableFormat && message.variableFormat.length)) + message.variableFormat = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.variableFormat.push(reader.int32()); + } else + message.variableFormat.push(reader.int32()); + break; + case 12: + if (!(message.initMethod && message.initMethod.length)) + message.initMethod = []; + message.initMethod.push($root.com.intel.analytics.bigdl.serialization.InitMethod.decode(reader, reader.uint32())); + break; + case 13: + if (!(message.bigDLModule && message.bigDLModule.length)) + message.bigDLModule = []; + message.bigDLModule.push($root.com.intel.analytics.bigdl.serialization.BigDLModule.decode(reader, reader.uint32())); + break; + case 14: + if (!(message.nameAttrList && message.nameAttrList.length)) + message.nameAttrList = []; + message.nameAttrList.push($root.com.intel.analytics.bigdl.serialization.NameAttrList.decode(reader, reader.uint32())); + break; + case 15: + if (!(message.dataFormat && message.dataFormat.length)) + message.dataFormat = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dataFormat.push(reader.int32()); + } else + message.dataFormat.push(reader.int32()); + break; + case 16: + if (!(message.custom && message.custom.length)) + message.custom = []; + message.custom.push($root.google.protobuf.Any.decode(reader, reader.uint32())); + break; + case 17: + if (!(message.shape && message.shape.length)) + message.shape = []; + message.shape.push($root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ArrayValue; + })(); + + return AttrValue; + })(); + + serialization.NameAttrList = (function() { + + function NameAttrList(properties) { + this.attr = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NameAttrList.prototype.name = ""; + NameAttrList.prototype.attr = $util.emptyObject; + + NameAttrList.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.com.intel.analytics.bigdl.serialization.NameAttrList(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + reader.skip().pos++; + if (message.attr === $util.emptyObject) + message.attr = {}; + key = reader.string(); + reader.pos++; + message.attr[key] = $root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NameAttrList; + })(); + + serialization.Shape = (function() { + + function Shape(properties) { + this.shapeValue = []; + this.shape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Shape.prototype.shapeType = 0; + Shape.prototype.ssize = 0; + Shape.prototype.shapeValue = $util.emptyArray; + Shape.prototype.shape = $util.emptyArray; + + Shape.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.com.intel.analytics.bigdl.serialization.Shape(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shapeType = reader.int32(); + break; + case 2: + message.ssize = reader.int32(); + break; + case 3: + if (!(message.shapeValue && message.shapeValue.length)) + message.shapeValue = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.shapeValue.push(reader.int32()); + } else + message.shapeValue.push(reader.int32()); + break; + case 4: + if (!(message.shape && message.shape.length)) + message.shape = []; + message.shape.push($root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Shape.ShapeType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "SINGLE"] = 0; + values[valuesById[1] = "MULTI"] = 1; + return values; + })(); + + return Shape; + })(); + + return serialization; + })(); + + return bigdl; + })(); + + return analytics; + })(); + + return intel; + })(); + + return com; + })(); + + $root.google = (function() { + + var google = {}; + + google.protobuf = (function() { + + var protobuf = {}; + + protobuf.Any = (function() { + + function Any(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Any.prototype.type_url = ""; + Any.prototype.value = $util.newBuffer([]); + + Any.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.Any(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_url = reader.string(); + break; + case 2: + message.value = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Any; + })(); + + return protobuf; + })(); + + return google; + })(); + + return $root; +})(protobuf); diff --git a/frontend/packages/core/public/netron/bigdl.js b/frontend/packages/core/public/netron/bigdl.js new file mode 100644 index 00000000..68db449f --- /dev/null +++ b/frontend/packages/core/public/netron/bigdl.js @@ -0,0 +1,492 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental + +var bigdl = bigdl || {}; +var long = long || { Long: require('long') }; +var protobuf = protobuf || require('protobufjs'); + +bigdl.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension == 'model' || extension == 'bigdl') { + const tags = context.tags('pb'); + if (tags.has(2) && tags.has(7) && tags.has(8) && tags.has(9) && tags.has(10) && tags.has(11) && tags.has(12)) { + return true; + } + } + } + + open(context, host) { + return host.require('./bigdl-proto').then(() => { + return bigdl.Metadata.open(host).then((metadata) => { + const identifier = context.identifier; + try { + // https://github.com/intel-analytics/BigDL/blob/master/spark/dl/src/main/resources/serialization/bigdl.proto + bigdl.proto = protobuf.roots.bigdl.com.intel.analytics.bigdl.serialization; + const module = bigdl.proto.BigDLModule.decode(context.buffer); + return new bigdl.Model(metadata, module); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new bigdl.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + }); + } +}; + +bigdl.Model = class { + + constructor(metadata, module) { + this._version = module && module.version ? module.version : ''; + this._graphs = []; + this._graphs.push(new bigdl.Graph(metadata, module)); + } + + get format() { + return 'BigDL' + (this._version ? ' v' + this._version : ''); + } + + get graphs() { + return this._graphs; + } +}; + +bigdl.Graph = class { + + constructor(metadata, module) { + this._type = module.moduleType; + this._inputs = []; + this._outputs = []; + this._nodes = []; + this._loadModule(metadata, '', module); + } + + _loadModule(metadata, group, module) { + switch (module.moduleType) { + case 'com.intel.analytics.bigdl.nn.StaticGraph': { + this._loadStaticGraph(metadata, group, module); + break; + } + case 'com.intel.analytics.bigdl.nn.Sequential': { + this._loadSequential(metadata, group, module); + break; + } + case 'com.intel.analytics.bigdl.nn.Input': { + this._inputs.push(new bigdl.Parameter(module.name, [ + new bigdl.Argument(module.name) + ])); + break; + } + default: { + this._nodes.push(new bigdl.Node(metadata, group, module)); + break; + } + } + } + + _loadSequential(metadata, group, module) { + group = group.length > 0 ? group + '.' + module.namePostfix : module.namePostfix; + for (const submodule of module.subModules) { + this._loadModule(metadata, group, submodule); + } + } + + _loadStaticGraph(metadata, group, module) { + group = group.length > 0 ? group + '.' + module.namePostfix : module.namePostfix; + for (const submodule of module.subModules) { + this._loadModule(metadata, group, submodule); + } + } + + get groups() { + return this._groups || false; + } + + get type() { + return this._type; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +bigdl.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +bigdl.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new bigdl.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +bigdl.Node = class { + + constructor(metadata, group, module) { + this._metadata = metadata; + this._group = group; + this._type = module.moduleType.split('.').pop(); + this._name = module.name; + this._attributes = []; + this._inputs = []; + this._outputs = []; + this._inputs.push(new bigdl.Parameter('input', module.preModules.map((id) => new bigdl.Argument(id, null, null)))); + const schema = metadata.type(this.type); + const inputs = (schema && schema.inputs) ? schema.inputs.slice() : []; + inputs.shift(); + if (module.weight) { + inputs.shift(); + this._inputs.push(new bigdl.Parameter('weight', [ + new bigdl.Argument('', null, new bigdl.Tensor(module.weight)) + ])); + } + if (module.bias) { + inputs.shift(); + this._inputs.push(new bigdl.Parameter('bias', [ + new bigdl.Argument('', null, new bigdl.Tensor(module.bias)) + ])); + } + if (module.parameters && module.parameters.length > 0) { + for (const parameter of module.parameters) { + const input = inputs.shift(); + const inputName = input ? input.name : this._inputs.length.toString(); + this._inputs.push(new bigdl.Parameter(inputName, [ + new bigdl.Argument('', null, new bigdl.Tensor(parameter)) + ])); + } + } + for (const key of Object.keys(module.attr)) { + const value = module.attr[key]; + if (key === 'module_numerics' || key === 'module_tags') { + continue; + } + if (value.dataType === bigdl.proto.DataType.TENSOR) { + if (value.value) { + this._inputs.push(new bigdl.Parameter(key, [ new bigdl.Argument('', null, new bigdl.Tensor(value.tensorValue)) ])); + } + continue; + } + if (value.dataType === bigdl.proto.DataType.REGULARIZER && value.value === undefined) { + continue; + } + if (value.dataType === bigdl.proto.DataType.ARRAY_VALUE && value.arrayValue.datatype === bigdl.proto.DataType.TENSOR) { + this._inputs.push(new bigdl.Parameter(key, value.arrayValue.tensor.map((tensor) => new bigdl.Argument('', null, new bigdl.Tensor(tensor))))); + continue; + } + this._attributes.push(new bigdl.Attribute(metadata.attribute(this._type, key), key, value)); + } + const output = this._name || this._type + module.namePostfix; + this._outputs.push(new bigdl.Parameter('output', [ + new bigdl.Argument(output, null, null) + ])); + } + + get group() { + return this._group; + } + + get type() { + return this._type; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } +}; + +bigdl.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + switch (value.dataType) { + case bigdl.proto.DataType.INT32: { + this._type = 'int32'; + this._value = value.int32Value; + break; + } + case bigdl.proto.DataType.FLOAT: { + this._type = 'float32'; + this._value = value.floatValue; + break; + } + case bigdl.proto.DataType.DOUBLE: { + this._type = 'float64'; + this._value = value.doubleValue; + break; + } + case bigdl.proto.DataType.BOOL: { + this._type = 'boolean'; + this._value = value.boolValue; + break; + } + case bigdl.proto.DataType.REGULARIZER: { + this._value = value.value; + break; + } + case bigdl.proto.DataType.MODULE: { + this._value = value.bigDLModule; + break; + } + case bigdl.proto.DataType.NAME_ATTR_LIST: { + this._value = value.nameAttrListValue; + break; + } + case bigdl.proto.DataType.ARRAY_VALUE: { + switch (value.arrayValue.datatype) { + case bigdl.proto.DataType.INT32: { + this._type = 'int32[]'; + this._value = value.arrayValue.i32; + break; + } + case bigdl.proto.DataType.FLOAT: { + this._type = 'float32[]'; + this._value = value.arrayValue.flt; + break; + } + case bigdl.proto.DataType.STRING: { + this._type = 'string[]'; + this._value = value.arrayValue.str; + break; + } + case bigdl.proto.DataType.TENSOR: { + this._type = 'tensor[]'; + this._value = value.arrayValue.tensor; + break; + } + default: { + throw new bigdl.Error("Unsupported attribute array data type '" + value.arrayValue.datatype + "'."); + } + } + break; + } + case bigdl.proto.DataType.DATA_FORMAT: { + this._dataType = 'InputDataFormat'; + switch (value.dataFormatValue) { + case 0: this._value = 'NCHW'; break; + case 1: this._value = 'NHWC'; break; + } + break; + } + default: { + throw new bigdl.Error("Unsupported attribute data type '" + value.dataType + "'."); + } + } + } + + get type() { + return ''; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return true; + } +}; + +bigdl.Tensor = class { + + constructor(tensor) { + this._type = new bigdl.TensorType(tensor.datatype, new bigdl.TensorShape(tensor.size)); + } + + get kind() { + return 'Parameter'; + } + + get type() { + return this._type; + } + + get state() { + return 'Not supported.'; + } + + get value() { + return null; + } + + toString() { + return ''; + } +}; + +bigdl.TensorType = class { + + constructor(dataType, shape) { + switch (dataType) { + case bigdl.proto.DataType.FLOAT: this._dataType = 'float32'; break; + case bigdl.proto.DataType.DOUBLE: this._dataType = 'float64'; break; + default: throw new bigdl.Error("Unsupported tensor type '" + dataType + "'."); + } + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return (this.dataType || '?') + this._shape.toString(); + } +}; + +bigdl.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions.map((dimension) => { + if (dimension && long.Long.isLong(dimension)) { + return dimension.toNumber(); + } + return dimension; + }); + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return this._dimensions ? ('[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']') : ''; + } +}; + +bigdl.Metadata = class { + + static open(host) { + if (bigdl.Metadata._metadata) { + return Promise.resolve(bigdl.Metadata._metadata); + } + return host.request(null, 'bigdl-metadata.json', 'utf-8').then((data) => { + bigdl.Metadata._metadata = new bigdl.Metadata(data); + return bigdl.Metadata._metadata; + }).catch(() => { + bigdl.Metadata._metadata = new bigdl.Metadata(null); + return bigdl.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + this._attributeCache = {}; + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map[item.name] = item.schema; + } + } + } + } + } + + type(name) { + return this._map[name] || null; + } + + attribute(type, name) { + let map = this._attributeCache[type]; + if (!map) { + map = {}; + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[type] = map; + } + return map[name] || null; + } +}; + +bigdl.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Error loading BigDL model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = bigdl.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/bson.js b/frontend/packages/core/public/netron/bson.js new file mode 100644 index 00000000..4f8bc0ac --- /dev/null +++ b/frontend/packages/core/public/netron/bson.js @@ -0,0 +1,161 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental BSON JavaScript reader + +var bson = {}; +var long = long || { Long: require('long') }; + +// http://bsonspec.org/spec.html +bson.Reader = class { + + constructor(buffer) { + this._asciiDecoder = new TextDecoder('ascii'); + this._utf8Decoder = new TextDecoder('utf-8'); + this._buffer = buffer; + this._position = 0; + this._view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + } + + read() { + return this.document(); + } + + document(isArray) { + const start = this._position; + const size = this.int32(); + if (size < 5 || start + size > this._buffer.length || this._buffer[start + size - 1] != 0x00) { + throw new bson.Reader('Invalid BSON size.'); + } + let element = isArray ? [] : {}; + let index = 0; + for (;;) { + const type = this.byte(); + if (type == 0x00) { + break; + } + const key = this.cstring(); + let value = null; + switch (type) { + case 0x01: + value = this.double(); + break; + case 0x02: + value = this.string(); + break; + case 0x03: + value = this.document(false); + break; + case 0x04: + value = this.document(true); + break; + case 0x05: + value = this.binary(); + break; + case 0x08: + value = this.boolean(); + break; + case 0x0A: + value = null; + break; + case 0x10: + value = this.int32(); + break; + case 0x11: + value = this.uint64(); + break; + case 0x12: + value = this.int64(); + break; + default: + throw new bson.Error("Unknown value type '" + type + "'."); + } + if (isArray) { + if (index !== parseInt(key, 10)) { + throw new bson.Error("Invalid array index '" + key + "'."); + } + element.push(value); + index++; + } + else { + element[key] = value; + } + } + return element; + } + + cstring() { + const end = this._buffer.indexOf(0x00, this._position); + const value = this._asciiDecoder.decode(this._buffer.subarray(this._position, end)); + this._position = end + 1; + return value; + } + + string() { + const end = this.int32() + this._position - 1; + const value = this._utf8Decoder.decode(this._buffer.subarray(this._position, end)); + this._position = end; + if (this.byte() != '0x00') { + throw new bson.Error('String missing terminal 0.'); + } + return value; + } + + binary() { + const size = this.int32(); + const subtype = this.byte(); + const data = this._buffer.subarray(this._position, this._position + size); + this._position += size; + switch (subtype) { + case 0x00: + return data; + default: + throw new bson.Error("Unknown binary subtype '" + subtype + "'."); + } + } + + boolean() { + const value = this.byte(); + switch (value) { + case 0x00: return false; + case 0x01: return true; + default: throw new bson.Error("Invalid boolean value '" + value + "'."); + } + } + + byte() { + return this._buffer[this._position++]; + } + + int32() { + const value = this._view.getInt32(this._position, true); + this._position += 4; + return value; + } + + int64() { + const low = this._view.getUint32(this._position, true); + const hi = this._view.getUint32(this._position + 4, true); + this._position += 8; + return new long.Long(low, hi, false).toNumber(); + } + + uint64() { + const low = this._view.getUint32(this._position, true); + const hi = this._view.getUint32(this._position + 4, true); + this._position += 8; + return new long.Long(low, hi, true).toNumber(); + } +}; + +bson.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'BSON Error'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.Reader = bson.Reader; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/caffe-metadata.json b/frontend/packages/core/public/netron/caffe-metadata.json new file mode 100644 index 00000000..db68a3b9 --- /dev/null +++ b/frontend/packages/core/public/netron/caffe-metadata.json @@ -0,0 +1,525 @@ +[ + { + "name": "Convolution", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "filter" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "bias_term", "visible": false }, + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false }, + { "name": "pad", "default": [0] }, + { "name": "kernel_size", "default": [] }, + { "name": "stride", "default": [1] }, + { "name": "dilation", "default": [] }, + { "name": "group", "default": 1 } + ] + } + }, + { + "name": "Deconvolution", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "filter" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "bias_term", "visible": false }, + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false }, + { "name": "pad", "default": [] }, + { "name": "kernel_size", "default": [] }, + { "name": "stride", "default": [] }, + { "name": "dilation", "default": [] } + ] + } + }, + { + "name": "DepthwiseConvolution", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "bias_term", "visible": false }, + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "filter" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "ConvolutionDepthwise", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "pad", "default": [0] }, + { "name": "kernel_size", "default": [] }, + { "name": "stride", "default": [1] }, + { "name": "bias_term", "visible": false }, + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "filter" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "InnerProduct", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "bias_term", "visible": false }, + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false } + ] + } + }, + { + "name": "Scale", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "scale" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "filler", "visible": false }, + { "name": "bias_term", "visible": false }, + { "name": "bias_filler", "visible": false } + ] + } + }, + { + "name": "Dropout", + "schema": { + "category": "Dropout", + "attributes": [ + { "name": "dropout_ratio", "default": 0.5 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Flatten", + "schema": { + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "LRN", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "local_size", "type": "uint32", "default": 5 }, + { "name": "alpha", "type": "float32", "default": 0.0001 }, + { "name": "beta", "type": "float32", "default": 0.75 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "BatchNorm", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "use_global_stats", "visible": false }, + { "name": "eps", "default": 1e-5 } + ], + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" }, + { "name": "mean" }, + { "name": "variance" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "BN", + "schema": { + "category": "Normalization", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Sigmoid", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Softmax", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "SoftmaxLoss", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "labels" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "SoftmaxWithLoss", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "labels" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "ContrastiveLossParameter", + "schema": { + "attributes": [ + { "name": "margin", "default": 1.0 }, + { "name": "legacy_version", "default": false } + ] + } + }, + { + "name": "ReLU", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "PReLU", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "slope" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Concat", + "schema": { + "category": "Tensor", + "inputs": [ + { "name": "inputs", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Split", + "schema": { + "category": "Tensor", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "outputs", "option": "variadic" } + ] + } + }, + { + "name": "Eltwise", + "schema": { + "attributes": [ + { "name": "operation", "default": 1 } + ], + "inputs": [ + { "name": "inputs", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Pooling", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "pool", "default": 0 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Crop", + "schema": { + "category": "Data", + "inputs": [ + { "name": "data" }, + { "name": "size" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Data", + "schema": { + "category": "Data", + "outputs": [ + { "name": "data" }, + { "name": "label" } + ] + } + }, + { + "name": "DummyData", + "schema": { + "category": "Data", + "outputs": [ + { "name": "data" } + ] + } + }, + { + "name": "AnnotatedData", + "schema": { + "category": "Data", + "outputs": [ + { "name": "data" } + ] + } + }, + { + "name": "HDF5Data", + "schema": { + "category": "Data", + "outputs": [ + { "name": "data" } + ] + } + }, + { + "name": "ImageData", + "schema": { + "category": "Data", + "outputs": [ + { "name": "data" }, + { "name": "label" } + ] + } + }, + { + "name": "WindowData", + "schema": { + "category": "Data", + "outputs": [ + { "name": "data" }, + { "name": "label" } + ] + } + }, + { + "name": "Slice", + "schema": { + "category": "Tensor", + "attributes": [ + { "name": "axis", "default": 1 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "outputs", "option": "variadic" } + ] + } + }, + { + "name": "EuclideanLoss", + "schema": { + "inputs": [ + { "name": "predictions" }, + { "name": "targets" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Accuracy", + "schema": { + "inputs": [ + { "name": "predictions" }, + { "name": "labels" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "LSTM", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "h_0" }, + { "name": "c_0" } + ], + "outputs": [ + { "name": "output" }, + { "name": "h_T" }, + { "name": "c_T" } + ], + "attributes": [ + { "name": "weight_filler", "visible": false }, + { "name": "bias_filler", "visible": false }, + { "name": "num_output", "visible": false } + ] + } + }, + { + "name": "Reshape", + "schema": { + "category": "Shape", + "inputs": [ + { "name": "data" } + ], + "outputs": [ + { "name": "reshaped" } + ] + } + }, + { + "name": "ColorConv", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Permute", + "schema": { + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Parameter", + "schema": { + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Python", + "schema": { + } + } +] \ No newline at end of file diff --git a/frontend/packages/core/public/netron/caffe-proto.js b/frontend/packages/core/public/netron/caffe-proto.js new file mode 100644 index 00000000..6a078fb0 --- /dev/null +++ b/frontend/packages/core/public/netron/caffe-proto.js @@ -0,0 +1,6553 @@ +/*eslint-disable block-scoped-var, id-length, no-control-regex, no-magic-numbers, no-prototype-builtins, no-redeclare, no-shadow, no-var, sort-vars*/ +(function($protobuf) { + "use strict"; + + var $Reader = $protobuf.Reader, $util = $protobuf.util; + + var $root = $protobuf.roots.caffe || ($protobuf.roots.caffe = {}); + + $root.caffe = (function() { + + var caffe = {}; + + caffe.BlobShape = (function() { + + function BlobShape(properties) { + this.dim = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BlobShape.prototype.dim = $util.emptyArray; + + BlobShape.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.BlobShape(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.dim && message.dim.length)) + message.dim = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dim.push(reader.int64()); + } else + message.dim.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BlobShape.decodeText = function decodeText(reader) { + var message = new $root.caffe.BlobShape(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dim": + if (!(message.dim && message.dim.length)) + message.dim = []; + if (reader.first()) + while (!reader.last()) { + message.dim.push(reader.int64()); + reader.next(); + } + else + message.dim.push(reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return BlobShape; + })(); + + caffe.BlobProto = (function() { + + function BlobProto(properties) { + this.data = []; + this.diff = []; + this.double_data = []; + this.double_diff = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BlobProto.prototype.shape = null; + BlobProto.prototype.data = $util.emptyArray; + BlobProto.prototype.diff = $util.emptyArray; + BlobProto.prototype.double_data = $util.emptyArray; + BlobProto.prototype.double_diff = $util.emptyArray; + BlobProto.prototype.num = 0; + BlobProto.prototype.channels = 0; + BlobProto.prototype.height = 0; + BlobProto.prototype.width = 0; + + BlobProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.BlobProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 7: + message.shape = $root.caffe.BlobShape.decode(reader, reader.uint32()); + break; + case 5: + if (!(message.data && message.data.length)) + message.data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + if (message.data.length == 0 && (end2 - reader.pos) > 1048576) { + var dataLength = end2 - reader.pos; + var dataView = new DataView(reader.buf.buffer, reader.buf.byteOffset + reader.pos, dataLength); + dataLength = dataLength >>> 2; + var data = new Float32Array(dataLength); + for (var i = 0; i < dataLength; i++) { + data[i] = dataView.getFloat32(i << 2, true); + } + message.data = data; + reader.pos = end2; + } + else { + while (reader.pos < end2) + message.data.push(reader.float()); + } + } else + message.data.push(reader.float()); + break; + case 6: + if (!(message.diff && message.diff.length)) + message.diff = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.diff.push(reader.float()); + } else + message.diff.push(reader.float()); + break; + case 8: + if (!(message.double_data && message.double_data.length)) + message.double_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.double_data.push(reader.double()); + } else + message.double_data.push(reader.double()); + break; + case 9: + if (!(message.double_diff && message.double_diff.length)) + message.double_diff = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.double_diff.push(reader.double()); + } else + message.double_diff.push(reader.double()); + break; + case 1: + message.num = reader.int32(); + break; + case 2: + message.channels = reader.int32(); + break; + case 3: + message.height = reader.int32(); + break; + case 4: + message.width = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BlobProto.decodeText = function decodeText(reader) { + var message = new $root.caffe.BlobProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.caffe.BlobShape.decodeText(reader, true); + break; + case "data": + if (!(message.data && message.data.length)) + message.data = []; + if (reader.first()) + while (!reader.last()) { + message.data.push(reader.float()); + reader.next(); + } + else + message.data.push(reader.float()); + break; + case "diff": + if (!(message.diff && message.diff.length)) + message.diff = []; + if (reader.first()) + while (!reader.last()) { + message.diff.push(reader.float()); + reader.next(); + } + else + message.diff.push(reader.float()); + break; + case "double_data": + if (!(message.double_data && message.double_data.length)) + message.double_data = []; + if (reader.first()) + while (!reader.last()) { + message.double_data.push(reader.double()); + reader.next(); + } + else + message.double_data.push(reader.double()); + break; + case "double_diff": + if (!(message.double_diff && message.double_diff.length)) + message.double_diff = []; + if (reader.first()) + while (!reader.last()) { + message.double_diff.push(reader.double()); + reader.next(); + } + else + message.double_diff.push(reader.double()); + break; + case "num": + message.num = reader.int32(); + break; + case "channels": + message.channels = reader.int32(); + break; + case "height": + message.height = reader.int32(); + break; + case "width": + message.width = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return BlobProto; + })(); + + caffe.BlobProtoVector = (function() { + + function BlobProtoVector(properties) { + this.blobs = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BlobProtoVector.prototype.blobs = $util.emptyArray; + + BlobProtoVector.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.BlobProtoVector(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.blobs && message.blobs.length)) + message.blobs = []; + message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BlobProtoVector.decodeText = function decodeText(reader) { + var message = new $root.caffe.BlobProtoVector(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "blobs": + if (!(message.blobs && message.blobs.length)) + message.blobs = []; + message.blobs.push($root.caffe.BlobProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return BlobProtoVector; + })(); + + caffe.Datum = (function() { + + function Datum(properties) { + this.float_data = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Datum.prototype.channels = 0; + Datum.prototype.height = 0; + Datum.prototype.width = 0; + Datum.prototype.data = $util.newBuffer([]); + Datum.prototype.label = 0; + Datum.prototype.float_data = $util.emptyArray; + Datum.prototype.encoded = false; + + Datum.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.Datum(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.channels = reader.int32(); + break; + case 2: + message.height = reader.int32(); + break; + case 3: + message.width = reader.int32(); + break; + case 4: + message.data = reader.bytes(); + break; + case 5: + message.label = reader.int32(); + break; + case 6: + if (!(message.float_data && message.float_data.length)) + message.float_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.float_data.push(reader.float()); + } else + message.float_data.push(reader.float()); + break; + case 7: + message.encoded = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Datum.decodeText = function decodeText(reader) { + var message = new $root.caffe.Datum(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "channels": + message.channels = reader.int32(); + break; + case "height": + message.height = reader.int32(); + break; + case "width": + message.width = reader.int32(); + break; + case "data": + message.data = reader.bytes(); + break; + case "label": + message.label = reader.int32(); + break; + case "float_data": + if (!(message.float_data && message.float_data.length)) + message.float_data = []; + if (reader.first()) + while (!reader.last()) { + message.float_data.push(reader.float()); + reader.next(); + } + else + message.float_data.push(reader.float()); + break; + case "encoded": + message.encoded = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Datum; + })(); + + caffe.FillerParameter = (function() { + + function FillerParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FillerParameter.prototype.type = "constant"; + FillerParameter.prototype.value = 0; + FillerParameter.prototype.min = 0; + FillerParameter.prototype.max = 1; + FillerParameter.prototype.mean = 0; + FillerParameter.prototype.std = 1; + FillerParameter.prototype.sparse = -1; + FillerParameter.prototype.variance_norm = 0; + + FillerParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.FillerParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.string(); + break; + case 2: + message.value = reader.float(); + break; + case 3: + message.min = reader.float(); + break; + case 4: + message.max = reader.float(); + break; + case 5: + message.mean = reader.float(); + break; + case 6: + message.std = reader.float(); + break; + case 7: + message.sparse = reader.int32(); + break; + case 8: + message.variance_norm = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + FillerParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.FillerParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "type": + message.type = reader.string(); + break; + case "value": + message.value = reader.float(); + break; + case "min": + message.min = reader.float(); + break; + case "max": + message.max = reader.float(); + break; + case "mean": + message.mean = reader.float(); + break; + case "std": + message.std = reader.float(); + break; + case "sparse": + message.sparse = reader.int32(); + break; + case "variance_norm": + message.variance_norm = reader.enum($root.caffe.FillerParameter.VarianceNorm); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + FillerParameter.VarianceNorm = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "FAN_IN"] = 0; + values[valuesById[1] = "FAN_OUT"] = 1; + values[valuesById[2] = "AVERAGE"] = 2; + return values; + })(); + + return FillerParameter; + })(); + + caffe.NetParameter = (function() { + + function NetParameter(properties) { + this.input = []; + this.input_shape = []; + this.input_dim = []; + this.layer = []; + this.layers = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NetParameter.prototype.name = ""; + NetParameter.prototype.input = $util.emptyArray; + NetParameter.prototype.input_shape = $util.emptyArray; + NetParameter.prototype.input_dim = $util.emptyArray; + NetParameter.prototype.force_backward = false; + NetParameter.prototype.state = null; + NetParameter.prototype.debug_info = false; + NetParameter.prototype.layer = $util.emptyArray; + NetParameter.prototype.layers = $util.emptyArray; + + NetParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.NetParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 3: + if (!(message.input && message.input.length)) + message.input = []; + message.input.push(reader.string()); + break; + case 8: + if (!(message.input_shape && message.input_shape.length)) + message.input_shape = []; + message.input_shape.push($root.caffe.BlobShape.decode(reader, reader.uint32())); + break; + case 4: + if (!(message.input_dim && message.input_dim.length)) + message.input_dim = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.input_dim.push(reader.int32()); + } else + message.input_dim.push(reader.int32()); + break; + case 5: + message.force_backward = reader.bool(); + break; + case 6: + message.state = $root.caffe.NetState.decode(reader, reader.uint32()); + break; + case 7: + message.debug_info = reader.bool(); + break; + case 100: + if (!(message.layer && message.layer.length)) + message.layer = []; + message.layer.push($root.caffe.LayerParameter.decode(reader, reader.uint32())); + break; + case 2: + if (!(message.layers && message.layers.length)) + message.layers = []; + message.layers.push($root.caffe.V1LayerParameter.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NetParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.NetParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "input": + if (!(message.input && message.input.length)) + message.input = []; + if (reader.first()) + while (!reader.last()) { + message.input.push(reader.string()); + reader.next(); + } + else + message.input.push(reader.string()); + break; + case "input_shape": + if (!(message.input_shape && message.input_shape.length)) + message.input_shape = []; + message.input_shape.push($root.caffe.BlobShape.decodeText(reader, true)); + break; + case "input_dim": + if (!(message.input_dim && message.input_dim.length)) + message.input_dim = []; + if (reader.first()) + while (!reader.last()) { + message.input_dim.push(reader.int32()); + reader.next(); + } + else + message.input_dim.push(reader.int32()); + break; + case "force_backward": + message.force_backward = reader.bool(); + break; + case "state": + message.state = $root.caffe.NetState.decodeText(reader, true); + break; + case "debug_info": + message.debug_info = reader.bool(); + break; + case "layer": + if (!(message.layer && message.layer.length)) + message.layer = []; + message.layer.push($root.caffe.LayerParameter.decodeText(reader, true)); + break; + case "layers": + if (!(message.layers && message.layers.length)) + message.layers = []; + message.layers.push($root.caffe.V1LayerParameter.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return NetParameter; + })(); + + caffe.SolverParameter = (function() { + + function SolverParameter(properties) { + this.test_net = []; + this.test_net_param = []; + this.test_state = []; + this.test_iter = []; + this.stepvalue = []; + this.weights = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SolverParameter.prototype.net = ""; + SolverParameter.prototype.net_param = null; + SolverParameter.prototype.train_net = ""; + SolverParameter.prototype.test_net = $util.emptyArray; + SolverParameter.prototype.train_net_param = null; + SolverParameter.prototype.test_net_param = $util.emptyArray; + SolverParameter.prototype.train_state = null; + SolverParameter.prototype.test_state = $util.emptyArray; + SolverParameter.prototype.test_iter = $util.emptyArray; + SolverParameter.prototype.test_interval = 0; + SolverParameter.prototype.test_compute_loss = false; + SolverParameter.prototype.test_initialization = true; + SolverParameter.prototype.base_lr = 0; + SolverParameter.prototype.display = 0; + SolverParameter.prototype.average_loss = 1; + SolverParameter.prototype.max_iter = 0; + SolverParameter.prototype.iter_size = 1; + SolverParameter.prototype.lr_policy = ""; + SolverParameter.prototype.gamma = 0; + SolverParameter.prototype.power = 0; + SolverParameter.prototype.momentum = 0; + SolverParameter.prototype.weight_decay = 0; + SolverParameter.prototype.regularization_type = "L2"; + SolverParameter.prototype.stepsize = 0; + SolverParameter.prototype.stepvalue = $util.emptyArray; + SolverParameter.prototype.clip_gradients = -1; + SolverParameter.prototype.snapshot = 0; + SolverParameter.prototype.snapshot_prefix = ""; + SolverParameter.prototype.snapshot_diff = false; + SolverParameter.prototype.snapshot_format = 1; + SolverParameter.prototype.solver_mode = 1; + SolverParameter.prototype.device_id = 0; + SolverParameter.prototype.random_seed = $util.Long ? $util.Long.fromBits(-1,-1,false) : -1; + SolverParameter.prototype.type = "SGD"; + SolverParameter.prototype.delta = 1e-8; + SolverParameter.prototype.momentum2 = 0.999; + SolverParameter.prototype.rms_decay = 0.99; + SolverParameter.prototype.debug_info = false; + SolverParameter.prototype.snapshot_after_train = true; + SolverParameter.prototype.solver_type = 0; + SolverParameter.prototype.layer_wise_reduce = true; + SolverParameter.prototype.weights = $util.emptyArray; + + SolverParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.SolverParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 24: + message.net = reader.string(); + break; + case 25: + message.net_param = $root.caffe.NetParameter.decode(reader, reader.uint32()); + break; + case 1: + message.train_net = reader.string(); + break; + case 2: + if (!(message.test_net && message.test_net.length)) + message.test_net = []; + message.test_net.push(reader.string()); + break; + case 21: + message.train_net_param = $root.caffe.NetParameter.decode(reader, reader.uint32()); + break; + case 22: + if (!(message.test_net_param && message.test_net_param.length)) + message.test_net_param = []; + message.test_net_param.push($root.caffe.NetParameter.decode(reader, reader.uint32())); + break; + case 26: + message.train_state = $root.caffe.NetState.decode(reader, reader.uint32()); + break; + case 27: + if (!(message.test_state && message.test_state.length)) + message.test_state = []; + message.test_state.push($root.caffe.NetState.decode(reader, reader.uint32())); + break; + case 3: + if (!(message.test_iter && message.test_iter.length)) + message.test_iter = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.test_iter.push(reader.int32()); + } else + message.test_iter.push(reader.int32()); + break; + case 4: + message.test_interval = reader.int32(); + break; + case 19: + message.test_compute_loss = reader.bool(); + break; + case 32: + message.test_initialization = reader.bool(); + break; + case 5: + message.base_lr = reader.float(); + break; + case 6: + message.display = reader.int32(); + break; + case 33: + message.average_loss = reader.int32(); + break; + case 7: + message.max_iter = reader.int32(); + break; + case 36: + message.iter_size = reader.int32(); + break; + case 8: + message.lr_policy = reader.string(); + break; + case 9: + message.gamma = reader.float(); + break; + case 10: + message.power = reader.float(); + break; + case 11: + message.momentum = reader.float(); + break; + case 12: + message.weight_decay = reader.float(); + break; + case 29: + message.regularization_type = reader.string(); + break; + case 13: + message.stepsize = reader.int32(); + break; + case 34: + if (!(message.stepvalue && message.stepvalue.length)) + message.stepvalue = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.stepvalue.push(reader.int32()); + } else + message.stepvalue.push(reader.int32()); + break; + case 35: + message.clip_gradients = reader.float(); + break; + case 14: + message.snapshot = reader.int32(); + break; + case 15: + message.snapshot_prefix = reader.string(); + break; + case 16: + message.snapshot_diff = reader.bool(); + break; + case 37: + message.snapshot_format = reader.int32(); + break; + case 17: + message.solver_mode = reader.int32(); + break; + case 18: + message.device_id = reader.int32(); + break; + case 20: + message.random_seed = reader.int64(); + break; + case 40: + message.type = reader.string(); + break; + case 31: + message.delta = reader.float(); + break; + case 39: + message.momentum2 = reader.float(); + break; + case 38: + message.rms_decay = reader.float(); + break; + case 23: + message.debug_info = reader.bool(); + break; + case 28: + message.snapshot_after_train = reader.bool(); + break; + case 30: + message.solver_type = reader.int32(); + break; + case 41: + message.layer_wise_reduce = reader.bool(); + break; + case 42: + if (!(message.weights && message.weights.length)) + message.weights = []; + message.weights.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SolverParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.SolverParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "net": + message.net = reader.string(); + break; + case "net_param": + message.net_param = $root.caffe.NetParameter.decodeText(reader, true); + break; + case "train_net": + message.train_net = reader.string(); + break; + case "test_net": + if (!(message.test_net && message.test_net.length)) + message.test_net = []; + if (reader.first()) + while (!reader.last()) { + message.test_net.push(reader.string()); + reader.next(); + } + else + message.test_net.push(reader.string()); + break; + case "train_net_param": + message.train_net_param = $root.caffe.NetParameter.decodeText(reader, true); + break; + case "test_net_param": + if (!(message.test_net_param && message.test_net_param.length)) + message.test_net_param = []; + message.test_net_param.push($root.caffe.NetParameter.decodeText(reader, true)); + break; + case "train_state": + message.train_state = $root.caffe.NetState.decodeText(reader, true); + break; + case "test_state": + if (!(message.test_state && message.test_state.length)) + message.test_state = []; + message.test_state.push($root.caffe.NetState.decodeText(reader, true)); + break; + case "test_iter": + if (!(message.test_iter && message.test_iter.length)) + message.test_iter = []; + if (reader.first()) + while (!reader.last()) { + message.test_iter.push(reader.int32()); + reader.next(); + } + else + message.test_iter.push(reader.int32()); + break; + case "test_interval": + message.test_interval = reader.int32(); + break; + case "test_compute_loss": + message.test_compute_loss = reader.bool(); + break; + case "test_initialization": + message.test_initialization = reader.bool(); + break; + case "base_lr": + message.base_lr = reader.float(); + break; + case "display": + message.display = reader.int32(); + break; + case "average_loss": + message.average_loss = reader.int32(); + break; + case "max_iter": + message.max_iter = reader.int32(); + break; + case "iter_size": + message.iter_size = reader.int32(); + break; + case "lr_policy": + message.lr_policy = reader.string(); + break; + case "gamma": + message.gamma = reader.float(); + break; + case "power": + message.power = reader.float(); + break; + case "momentum": + message.momentum = reader.float(); + break; + case "weight_decay": + message.weight_decay = reader.float(); + break; + case "regularization_type": + message.regularization_type = reader.string(); + break; + case "stepsize": + message.stepsize = reader.int32(); + break; + case "stepvalue": + if (!(message.stepvalue && message.stepvalue.length)) + message.stepvalue = []; + if (reader.first()) + while (!reader.last()) { + message.stepvalue.push(reader.int32()); + reader.next(); + } + else + message.stepvalue.push(reader.int32()); + break; + case "clip_gradients": + message.clip_gradients = reader.float(); + break; + case "snapshot": + message.snapshot = reader.int32(); + break; + case "snapshot_prefix": + message.snapshot_prefix = reader.string(); + break; + case "snapshot_diff": + message.snapshot_diff = reader.bool(); + break; + case "snapshot_format": + message.snapshot_format = reader.enum($root.caffe.SolverParameter.SnapshotFormat); + break; + case "solver_mode": + message.solver_mode = reader.enum($root.caffe.SolverParameter.SolverMode); + break; + case "device_id": + message.device_id = reader.int32(); + break; + case "random_seed": + message.random_seed = reader.int64(); + break; + case "type": + message.type = reader.string(); + break; + case "delta": + message.delta = reader.float(); + break; + case "momentum2": + message.momentum2 = reader.float(); + break; + case "rms_decay": + message.rms_decay = reader.float(); + break; + case "debug_info": + message.debug_info = reader.bool(); + break; + case "snapshot_after_train": + message.snapshot_after_train = reader.bool(); + break; + case "solver_type": + message.solver_type = reader.enum($root.caffe.SolverParameter.SolverType); + break; + case "layer_wise_reduce": + message.layer_wise_reduce = reader.bool(); + break; + case "weights": + if (!(message.weights && message.weights.length)) + message.weights = []; + if (reader.first()) + while (!reader.last()) { + message.weights.push(reader.string()); + reader.next(); + } + else + message.weights.push(reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + SolverParameter.SnapshotFormat = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "HDF5"] = 0; + values[valuesById[1] = "BINARYPROTO"] = 1; + return values; + })(); + + SolverParameter.SolverMode = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "CPU"] = 0; + values[valuesById[1] = "GPU"] = 1; + return values; + })(); + + SolverParameter.SolverType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "SGD"] = 0; + values[valuesById[1] = "NESTEROV"] = 1; + values[valuesById[2] = "ADAGRAD"] = 2; + values[valuesById[3] = "RMSPROP"] = 3; + values[valuesById[4] = "ADADELTA"] = 4; + values[valuesById[5] = "ADAM"] = 5; + return values; + })(); + + return SolverParameter; + })(); + + caffe.SolverState = (function() { + + function SolverState(properties) { + this.history = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SolverState.prototype.iter = 0; + SolverState.prototype.learned_net = ""; + SolverState.prototype.history = $util.emptyArray; + SolverState.prototype.current_step = 0; + + SolverState.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.SolverState(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.iter = reader.int32(); + break; + case 2: + message.learned_net = reader.string(); + break; + case 3: + if (!(message.history && message.history.length)) + message.history = []; + message.history.push($root.caffe.BlobProto.decode(reader, reader.uint32())); + break; + case 4: + message.current_step = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SolverState.decodeText = function decodeText(reader) { + var message = new $root.caffe.SolverState(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "iter": + message.iter = reader.int32(); + break; + case "learned_net": + message.learned_net = reader.string(); + break; + case "history": + if (!(message.history && message.history.length)) + message.history = []; + message.history.push($root.caffe.BlobProto.decodeText(reader, true)); + break; + case "current_step": + message.current_step = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SolverState; + })(); + + caffe.Phase = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "TRAIN"] = 0; + values[valuesById[1] = "TEST"] = 1; + return values; + })(); + + caffe.NetState = (function() { + + function NetState(properties) { + this.stage = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NetState.prototype.phase = 1; + NetState.prototype.level = 0; + NetState.prototype.stage = $util.emptyArray; + + NetState.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.NetState(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.phase = reader.int32(); + break; + case 2: + message.level = reader.int32(); + break; + case 3: + if (!(message.stage && message.stage.length)) + message.stage = []; + message.stage.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NetState.decodeText = function decodeText(reader) { + var message = new $root.caffe.NetState(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "phase": + message.phase = reader.enum($root.caffe.Phase); + break; + case "level": + message.level = reader.int32(); + break; + case "stage": + if (!(message.stage && message.stage.length)) + message.stage = []; + if (reader.first()) + while (!reader.last()) { + message.stage.push(reader.string()); + reader.next(); + } + else + message.stage.push(reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return NetState; + })(); + + caffe.NetStateRule = (function() { + + function NetStateRule(properties) { + this.stage = []; + this.not_stage = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NetStateRule.prototype.phase = 0; + NetStateRule.prototype.min_level = 0; + NetStateRule.prototype.max_level = 0; + NetStateRule.prototype.stage = $util.emptyArray; + NetStateRule.prototype.not_stage = $util.emptyArray; + + NetStateRule.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.NetStateRule(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.phase = reader.int32(); + break; + case 2: + message.min_level = reader.int32(); + break; + case 3: + message.max_level = reader.int32(); + break; + case 4: + if (!(message.stage && message.stage.length)) + message.stage = []; + message.stage.push(reader.string()); + break; + case 5: + if (!(message.not_stage && message.not_stage.length)) + message.not_stage = []; + message.not_stage.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NetStateRule.decodeText = function decodeText(reader) { + var message = new $root.caffe.NetStateRule(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "phase": + message.phase = reader.enum($root.caffe.Phase); + break; + case "min_level": + message.min_level = reader.int32(); + break; + case "max_level": + message.max_level = reader.int32(); + break; + case "stage": + if (!(message.stage && message.stage.length)) + message.stage = []; + if (reader.first()) + while (!reader.last()) { + message.stage.push(reader.string()); + reader.next(); + } + else + message.stage.push(reader.string()); + break; + case "not_stage": + if (!(message.not_stage && message.not_stage.length)) + message.not_stage = []; + if (reader.first()) + while (!reader.last()) { + message.not_stage.push(reader.string()); + reader.next(); + } + else + message.not_stage.push(reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return NetStateRule; + })(); + + caffe.ParamSpec = (function() { + + function ParamSpec(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ParamSpec.prototype.name = ""; + ParamSpec.prototype.share_mode = 0; + ParamSpec.prototype.lr_mult = 1; + ParamSpec.prototype.decay_mult = 1; + + ParamSpec.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ParamSpec(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.share_mode = reader.int32(); + break; + case 3: + message.lr_mult = reader.float(); + break; + case 4: + message.decay_mult = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ParamSpec.decodeText = function decodeText(reader) { + var message = new $root.caffe.ParamSpec(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "share_mode": + message.share_mode = reader.enum($root.caffe.ParamSpec.DimCheckMode); + break; + case "lr_mult": + message.lr_mult = reader.float(); + break; + case "decay_mult": + message.decay_mult = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + ParamSpec.DimCheckMode = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "STRICT"] = 0; + values[valuesById[1] = "PERMISSIVE"] = 1; + return values; + })(); + + return ParamSpec; + })(); + + caffe.LayerParameter = (function() { + + function LayerParameter(properties) { + this.bottom = []; + this.top = []; + this.loss_weight = []; + this.param = []; + this.blobs = []; + this.propagate_down = []; + this.include = []; + this.exclude = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LayerParameter.prototype.name = ""; + LayerParameter.prototype.type = ""; + LayerParameter.prototype.bottom = $util.emptyArray; + LayerParameter.prototype.top = $util.emptyArray; + LayerParameter.prototype.phase = 0; + LayerParameter.prototype.loss_weight = $util.emptyArray; + LayerParameter.prototype.param = $util.emptyArray; + LayerParameter.prototype.blobs = $util.emptyArray; + LayerParameter.prototype.propagate_down = $util.emptyArray; + LayerParameter.prototype.include = $util.emptyArray; + LayerParameter.prototype.exclude = $util.emptyArray; + LayerParameter.prototype.transform_param = null; + LayerParameter.prototype.loss_param = null; + LayerParameter.prototype.accuracy_param = null; + LayerParameter.prototype.argmax_param = null; + LayerParameter.prototype.batch_norm_param = null; + LayerParameter.prototype.bias_param = null; + LayerParameter.prototype.clip_param = null; + LayerParameter.prototype.concat_param = null; + LayerParameter.prototype.contrastive_loss_param = null; + LayerParameter.prototype.convolution_param = null; + LayerParameter.prototype.crop_param = null; + LayerParameter.prototype.data_param = null; + LayerParameter.prototype.dropout_param = null; + LayerParameter.prototype.dummy_data_param = null; + LayerParameter.prototype.eltwise_param = null; + LayerParameter.prototype.elu_param = null; + LayerParameter.prototype.embed_param = null; + LayerParameter.prototype.exp_param = null; + LayerParameter.prototype.flatten_param = null; + LayerParameter.prototype.hdf5_data_param = null; + LayerParameter.prototype.hdf5_output_param = null; + LayerParameter.prototype.hinge_loss_param = null; + LayerParameter.prototype.image_data_param = null; + LayerParameter.prototype.infogain_loss_param = null; + LayerParameter.prototype.inner_product_param = null; + LayerParameter.prototype.input_param = null; + LayerParameter.prototype.log_param = null; + LayerParameter.prototype.lrn_param = null; + LayerParameter.prototype.memory_data_param = null; + LayerParameter.prototype.mvn_param = null; + LayerParameter.prototype.parameter_param = null; + LayerParameter.prototype.pooling_param = null; + LayerParameter.prototype.power_param = null; + LayerParameter.prototype.prelu_param = null; + LayerParameter.prototype.python_param = null; + LayerParameter.prototype.recurrent_param = null; + LayerParameter.prototype.reduction_param = null; + LayerParameter.prototype.relu_param = null; + LayerParameter.prototype.reshape_param = null; + LayerParameter.prototype.scale_param = null; + LayerParameter.prototype.sigmoid_param = null; + LayerParameter.prototype.softmax_param = null; + LayerParameter.prototype.spp_param = null; + LayerParameter.prototype.slice_param = null; + LayerParameter.prototype.swish_param = null; + LayerParameter.prototype.tanh_param = null; + LayerParameter.prototype.threshold_param = null; + LayerParameter.prototype.tile_param = null; + LayerParameter.prototype.window_data_param = null; + + LayerParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.LayerParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + if (!(message.bottom && message.bottom.length)) + message.bottom = []; + message.bottom.push(reader.string()); + break; + case 4: + if (!(message.top && message.top.length)) + message.top = []; + message.top.push(reader.string()); + break; + case 10: + message.phase = reader.int32(); + break; + case 5: + if (!(message.loss_weight && message.loss_weight.length)) + message.loss_weight = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.loss_weight.push(reader.float()); + } else + message.loss_weight.push(reader.float()); + break; + case 6: + if (!(message.param && message.param.length)) + message.param = []; + message.param.push($root.caffe.ParamSpec.decode(reader, reader.uint32())); + break; + case 7: + if (!(message.blobs && message.blobs.length)) + message.blobs = []; + message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32())); + break; + case 11: + if (!(message.propagate_down && message.propagate_down.length)) + message.propagate_down = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.propagate_down.push(reader.bool()); + } else + message.propagate_down.push(reader.bool()); + break; + case 8: + if (!(message.include && message.include.length)) + message.include = []; + message.include.push($root.caffe.NetStateRule.decode(reader, reader.uint32())); + break; + case 9: + if (!(message.exclude && message.exclude.length)) + message.exclude = []; + message.exclude.push($root.caffe.NetStateRule.decode(reader, reader.uint32())); + break; + case 100: + message.transform_param = $root.caffe.TransformationParameter.decode(reader, reader.uint32()); + break; + case 101: + message.loss_param = $root.caffe.LossParameter.decode(reader, reader.uint32()); + break; + case 102: + message.accuracy_param = $root.caffe.AccuracyParameter.decode(reader, reader.uint32()); + break; + case 103: + message.argmax_param = $root.caffe.ArgMaxParameter.decode(reader, reader.uint32()); + break; + case 139: + message.batch_norm_param = $root.caffe.BatchNormParameter.decode(reader, reader.uint32()); + break; + case 141: + message.bias_param = $root.caffe.BiasParameter.decode(reader, reader.uint32()); + break; + case 148: + message.clip_param = $root.caffe.ClipParameter.decode(reader, reader.uint32()); + break; + case 104: + message.concat_param = $root.caffe.ConcatParameter.decode(reader, reader.uint32()); + break; + case 105: + message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decode(reader, reader.uint32()); + break; + case 106: + message.convolution_param = $root.caffe.ConvolutionParameter.decode(reader, reader.uint32()); + break; + case 144: + message.crop_param = $root.caffe.CropParameter.decode(reader, reader.uint32()); + break; + case 107: + message.data_param = $root.caffe.DataParameter.decode(reader, reader.uint32()); + break; + case 108: + message.dropout_param = $root.caffe.DropoutParameter.decode(reader, reader.uint32()); + break; + case 109: + message.dummy_data_param = $root.caffe.DummyDataParameter.decode(reader, reader.uint32()); + break; + case 110: + message.eltwise_param = $root.caffe.EltwiseParameter.decode(reader, reader.uint32()); + break; + case 140: + message.elu_param = $root.caffe.ELUParameter.decode(reader, reader.uint32()); + break; + case 137: + message.embed_param = $root.caffe.EmbedParameter.decode(reader, reader.uint32()); + break; + case 111: + message.exp_param = $root.caffe.ExpParameter.decode(reader, reader.uint32()); + break; + case 135: + message.flatten_param = $root.caffe.FlattenParameter.decode(reader, reader.uint32()); + break; + case 112: + message.hdf5_data_param = $root.caffe.HDF5DataParameter.decode(reader, reader.uint32()); + break; + case 113: + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decode(reader, reader.uint32()); + break; + case 114: + message.hinge_loss_param = $root.caffe.HingeLossParameter.decode(reader, reader.uint32()); + break; + case 115: + message.image_data_param = $root.caffe.ImageDataParameter.decode(reader, reader.uint32()); + break; + case 116: + message.infogain_loss_param = $root.caffe.InfogainLossParameter.decode(reader, reader.uint32()); + break; + case 117: + message.inner_product_param = $root.caffe.InnerProductParameter.decode(reader, reader.uint32()); + break; + case 143: + message.input_param = $root.caffe.InputParameter.decode(reader, reader.uint32()); + break; + case 134: + message.log_param = $root.caffe.LogParameter.decode(reader, reader.uint32()); + break; + case 118: + message.lrn_param = $root.caffe.LRNParameter.decode(reader, reader.uint32()); + break; + case 119: + message.memory_data_param = $root.caffe.MemoryDataParameter.decode(reader, reader.uint32()); + break; + case 120: + message.mvn_param = $root.caffe.MVNParameter.decode(reader, reader.uint32()); + break; + case 145: + message.parameter_param = $root.caffe.ParameterParameter.decode(reader, reader.uint32()); + break; + case 121: + message.pooling_param = $root.caffe.PoolingParameter.decode(reader, reader.uint32()); + break; + case 122: + message.power_param = $root.caffe.PowerParameter.decode(reader, reader.uint32()); + break; + case 131: + message.prelu_param = $root.caffe.PReLUParameter.decode(reader, reader.uint32()); + break; + case 130: + message.python_param = $root.caffe.PythonParameter.decode(reader, reader.uint32()); + break; + case 146: + message.recurrent_param = $root.caffe.RecurrentParameter.decode(reader, reader.uint32()); + break; + case 136: + message.reduction_param = $root.caffe.ReductionParameter.decode(reader, reader.uint32()); + break; + case 123: + message.relu_param = $root.caffe.ReLUParameter.decode(reader, reader.uint32()); + break; + case 133: + message.reshape_param = $root.caffe.ReshapeParameter.decode(reader, reader.uint32()); + break; + case 142: + message.scale_param = $root.caffe.ScaleParameter.decode(reader, reader.uint32()); + break; + case 124: + message.sigmoid_param = $root.caffe.SigmoidParameter.decode(reader, reader.uint32()); + break; + case 125: + message.softmax_param = $root.caffe.SoftmaxParameter.decode(reader, reader.uint32()); + break; + case 132: + message.spp_param = $root.caffe.SPPParameter.decode(reader, reader.uint32()); + break; + case 126: + message.slice_param = $root.caffe.SliceParameter.decode(reader, reader.uint32()); + break; + case 147: + message.swish_param = $root.caffe.SwishParameter.decode(reader, reader.uint32()); + break; + case 127: + message.tanh_param = $root.caffe.TanHParameter.decode(reader, reader.uint32()); + break; + case 128: + message.threshold_param = $root.caffe.ThresholdParameter.decode(reader, reader.uint32()); + break; + case 138: + message.tile_param = $root.caffe.TileParameter.decode(reader, reader.uint32()); + break; + case 129: + message.window_data_param = $root.caffe.WindowDataParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + LayerParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.LayerParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "bottom": + if (!(message.bottom && message.bottom.length)) + message.bottom = []; + if (reader.first()) + while (!reader.last()) { + message.bottom.push(reader.string()); + reader.next(); + } + else + message.bottom.push(reader.string()); + break; + case "top": + if (!(message.top && message.top.length)) + message.top = []; + if (reader.first()) + while (!reader.last()) { + message.top.push(reader.string()); + reader.next(); + } + else + message.top.push(reader.string()); + break; + case "phase": + message.phase = reader.enum($root.caffe.Phase); + break; + case "loss_weight": + if (!(message.loss_weight && message.loss_weight.length)) + message.loss_weight = []; + if (reader.first()) + while (!reader.last()) { + message.loss_weight.push(reader.float()); + reader.next(); + } + else + message.loss_weight.push(reader.float()); + break; + case "param": + if (!(message.param && message.param.length)) + message.param = []; + message.param.push($root.caffe.ParamSpec.decodeText(reader, true)); + break; + case "blobs": + if (!(message.blobs && message.blobs.length)) + message.blobs = []; + message.blobs.push($root.caffe.BlobProto.decodeText(reader, true)); + break; + case "propagate_down": + if (!(message.propagate_down && message.propagate_down.length)) + message.propagate_down = []; + if (reader.first()) + while (!reader.last()) { + message.propagate_down.push(reader.bool()); + reader.next(); + } + else + message.propagate_down.push(reader.bool()); + break; + case "include": + if (!(message.include && message.include.length)) + message.include = []; + message.include.push($root.caffe.NetStateRule.decodeText(reader, true)); + break; + case "exclude": + if (!(message.exclude && message.exclude.length)) + message.exclude = []; + message.exclude.push($root.caffe.NetStateRule.decodeText(reader, true)); + break; + case "transform_param": + message.transform_param = $root.caffe.TransformationParameter.decodeText(reader, true); + break; + case "loss_param": + message.loss_param = $root.caffe.LossParameter.decodeText(reader, true); + break; + case "accuracy_param": + message.accuracy_param = $root.caffe.AccuracyParameter.decodeText(reader, true); + break; + case "argmax_param": + message.argmax_param = $root.caffe.ArgMaxParameter.decodeText(reader, true); + break; + case "batch_norm_param": + message.batch_norm_param = $root.caffe.BatchNormParameter.decodeText(reader, true); + break; + case "bias_param": + message.bias_param = $root.caffe.BiasParameter.decodeText(reader, true); + break; + case "clip_param": + message.clip_param = $root.caffe.ClipParameter.decodeText(reader, true); + break; + case "concat_param": + message.concat_param = $root.caffe.ConcatParameter.decodeText(reader, true); + break; + case "contrastive_loss_param": + message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decodeText(reader, true); + break; + case "convolution_param": + message.convolution_param = $root.caffe.ConvolutionParameter.decodeText(reader, true); + break; + case "crop_param": + message.crop_param = $root.caffe.CropParameter.decodeText(reader, true); + break; + case "data_param": + message.data_param = $root.caffe.DataParameter.decodeText(reader, true); + break; + case "dropout_param": + message.dropout_param = $root.caffe.DropoutParameter.decodeText(reader, true); + break; + case "dummy_data_param": + message.dummy_data_param = $root.caffe.DummyDataParameter.decodeText(reader, true); + break; + case "eltwise_param": + message.eltwise_param = $root.caffe.EltwiseParameter.decodeText(reader, true); + break; + case "elu_param": + message.elu_param = $root.caffe.ELUParameter.decodeText(reader, true); + break; + case "embed_param": + message.embed_param = $root.caffe.EmbedParameter.decodeText(reader, true); + break; + case "exp_param": + message.exp_param = $root.caffe.ExpParameter.decodeText(reader, true); + break; + case "flatten_param": + message.flatten_param = $root.caffe.FlattenParameter.decodeText(reader, true); + break; + case "hdf5_data_param": + message.hdf5_data_param = $root.caffe.HDF5DataParameter.decodeText(reader, true); + break; + case "hdf5_output_param": + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decodeText(reader, true); + break; + case "hinge_loss_param": + message.hinge_loss_param = $root.caffe.HingeLossParameter.decodeText(reader, true); + break; + case "image_data_param": + message.image_data_param = $root.caffe.ImageDataParameter.decodeText(reader, true); + break; + case "infogain_loss_param": + message.infogain_loss_param = $root.caffe.InfogainLossParameter.decodeText(reader, true); + break; + case "inner_product_param": + message.inner_product_param = $root.caffe.InnerProductParameter.decodeText(reader, true); + break; + case "input_param": + message.input_param = $root.caffe.InputParameter.decodeText(reader, true); + break; + case "log_param": + message.log_param = $root.caffe.LogParameter.decodeText(reader, true); + break; + case "lrn_param": + message.lrn_param = $root.caffe.LRNParameter.decodeText(reader, true); + break; + case "memory_data_param": + message.memory_data_param = $root.caffe.MemoryDataParameter.decodeText(reader, true); + break; + case "mvn_param": + message.mvn_param = $root.caffe.MVNParameter.decodeText(reader, true); + break; + case "parameter_param": + message.parameter_param = $root.caffe.ParameterParameter.decodeText(reader, true); + break; + case "pooling_param": + message.pooling_param = $root.caffe.PoolingParameter.decodeText(reader, true); + break; + case "power_param": + message.power_param = $root.caffe.PowerParameter.decodeText(reader, true); + break; + case "prelu_param": + message.prelu_param = $root.caffe.PReLUParameter.decodeText(reader, true); + break; + case "python_param": + message.python_param = $root.caffe.PythonParameter.decodeText(reader, true); + break; + case "recurrent_param": + message.recurrent_param = $root.caffe.RecurrentParameter.decodeText(reader, true); + break; + case "reduction_param": + message.reduction_param = $root.caffe.ReductionParameter.decodeText(reader, true); + break; + case "relu_param": + message.relu_param = $root.caffe.ReLUParameter.decodeText(reader, true); + break; + case "reshape_param": + message.reshape_param = $root.caffe.ReshapeParameter.decodeText(reader, true); + break; + case "scale_param": + message.scale_param = $root.caffe.ScaleParameter.decodeText(reader, true); + break; + case "sigmoid_param": + message.sigmoid_param = $root.caffe.SigmoidParameter.decodeText(reader, true); + break; + case "softmax_param": + message.softmax_param = $root.caffe.SoftmaxParameter.decodeText(reader, true); + break; + case "spp_param": + message.spp_param = $root.caffe.SPPParameter.decodeText(reader, true); + break; + case "slice_param": + message.slice_param = $root.caffe.SliceParameter.decodeText(reader, true); + break; + case "swish_param": + message.swish_param = $root.caffe.SwishParameter.decodeText(reader, true); + break; + case "tanh_param": + message.tanh_param = $root.caffe.TanHParameter.decodeText(reader, true); + break; + case "threshold_param": + message.threshold_param = $root.caffe.ThresholdParameter.decodeText(reader, true); + break; + case "tile_param": + message.tile_param = $root.caffe.TileParameter.decodeText(reader, true); + break; + case "window_data_param": + message.window_data_param = $root.caffe.WindowDataParameter.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return LayerParameter; + })(); + + caffe.TransformationParameter = (function() { + + function TransformationParameter(properties) { + this.mean_value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TransformationParameter.prototype.scale = 1; + TransformationParameter.prototype.mirror = false; + TransformationParameter.prototype.crop_size = 0; + TransformationParameter.prototype.mean_file = ""; + TransformationParameter.prototype.mean_value = $util.emptyArray; + TransformationParameter.prototype.force_color = false; + TransformationParameter.prototype.force_gray = false; + + TransformationParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.TransformationParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.scale = reader.float(); + break; + case 2: + message.mirror = reader.bool(); + break; + case 3: + message.crop_size = reader.uint32(); + break; + case 4: + message.mean_file = reader.string(); + break; + case 5: + if (!(message.mean_value && message.mean_value.length)) + message.mean_value = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.mean_value.push(reader.float()); + } else + message.mean_value.push(reader.float()); + break; + case 6: + message.force_color = reader.bool(); + break; + case 7: + message.force_gray = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TransformationParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.TransformationParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "scale": + message.scale = reader.float(); + break; + case "mirror": + message.mirror = reader.bool(); + break; + case "crop_size": + message.crop_size = reader.uint32(); + break; + case "mean_file": + message.mean_file = reader.string(); + break; + case "mean_value": + if (!(message.mean_value && message.mean_value.length)) + message.mean_value = []; + if (reader.first()) + while (!reader.last()) { + message.mean_value.push(reader.float()); + reader.next(); + } + else + message.mean_value.push(reader.float()); + break; + case "force_color": + message.force_color = reader.bool(); + break; + case "force_gray": + message.force_gray = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return TransformationParameter; + })(); + + caffe.LossParameter = (function() { + + function LossParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LossParameter.prototype.ignore_label = 0; + LossParameter.prototype.normalization = 1; + LossParameter.prototype.normalize = false; + + LossParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.LossParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.ignore_label = reader.int32(); + break; + case 3: + message.normalization = reader.int32(); + break; + case 2: + message.normalize = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + LossParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.LossParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "ignore_label": + message.ignore_label = reader.int32(); + break; + case "normalization": + message.normalization = reader.enum($root.caffe.LossParameter.NormalizationMode); + break; + case "normalize": + message.normalize = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + LossParameter.NormalizationMode = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "FULL"] = 0; + values[valuesById[1] = "VALID"] = 1; + values[valuesById[2] = "BATCH_SIZE"] = 2; + values[valuesById[3] = "NONE"] = 3; + return values; + })(); + + return LossParameter; + })(); + + caffe.AccuracyParameter = (function() { + + function AccuracyParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AccuracyParameter.prototype.top_k = 1; + AccuracyParameter.prototype.axis = 1; + AccuracyParameter.prototype.ignore_label = 0; + + AccuracyParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.AccuracyParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.top_k = reader.uint32(); + break; + case 2: + message.axis = reader.int32(); + break; + case 3: + message.ignore_label = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + AccuracyParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.AccuracyParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "top_k": + message.top_k = reader.uint32(); + break; + case "axis": + message.axis = reader.int32(); + break; + case "ignore_label": + message.ignore_label = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return AccuracyParameter; + })(); + + caffe.ArgMaxParameter = (function() { + + function ArgMaxParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ArgMaxParameter.prototype.out_max_val = false; + ArgMaxParameter.prototype.top_k = 1; + ArgMaxParameter.prototype.axis = 0; + + ArgMaxParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ArgMaxParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.out_max_val = reader.bool(); + break; + case 2: + message.top_k = reader.uint32(); + break; + case 3: + message.axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ArgMaxParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ArgMaxParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "out_max_val": + message.out_max_val = reader.bool(); + break; + case "top_k": + message.top_k = reader.uint32(); + break; + case "axis": + message.axis = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ArgMaxParameter; + })(); + + caffe.ClipParameter = (function() { + + function ClipParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ClipParameter.prototype.min = 0; + ClipParameter.prototype.max = 0; + + ClipParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ClipParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.min = reader.float(); + break; + case 2: + message.max = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ClipParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ClipParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "min": + message.min = reader.float(); + break; + case "max": + message.max = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ClipParameter; + })(); + + caffe.ConcatParameter = (function() { + + function ConcatParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ConcatParameter.prototype.axis = 1; + ConcatParameter.prototype.concat_dim = 1; + + ConcatParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ConcatParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.axis = reader.int32(); + break; + case 1: + message.concat_dim = reader.uint32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ConcatParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ConcatParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "concat_dim": + message.concat_dim = reader.uint32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ConcatParameter; + })(); + + caffe.BatchNormParameter = (function() { + + function BatchNormParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BatchNormParameter.prototype.use_global_stats = false; + BatchNormParameter.prototype.moving_average_fraction = 0.999; + BatchNormParameter.prototype.eps = 0.00001; + + BatchNormParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.BatchNormParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.use_global_stats = reader.bool(); + break; + case 2: + message.moving_average_fraction = reader.float(); + break; + case 3: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BatchNormParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.BatchNormParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "use_global_stats": + message.use_global_stats = reader.bool(); + break; + case "moving_average_fraction": + message.moving_average_fraction = reader.float(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return BatchNormParameter; + })(); + + caffe.BiasParameter = (function() { + + function BiasParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BiasParameter.prototype.axis = 1; + BiasParameter.prototype.num_axes = 1; + BiasParameter.prototype.filler = null; + + BiasParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.BiasParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int32(); + break; + case 2: + message.num_axes = reader.int32(); + break; + case 3: + message.filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BiasParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.BiasParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "num_axes": + message.num_axes = reader.int32(); + break; + case "filler": + message.filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return BiasParameter; + })(); + + caffe.ContrastiveLossParameter = (function() { + + function ContrastiveLossParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ContrastiveLossParameter.prototype.margin = 1; + ContrastiveLossParameter.prototype.legacy_version = false; + + ContrastiveLossParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ContrastiveLossParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.margin = reader.float(); + break; + case 2: + message.legacy_version = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ContrastiveLossParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ContrastiveLossParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "margin": + message.margin = reader.float(); + break; + case "legacy_version": + message.legacy_version = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ContrastiveLossParameter; + })(); + + caffe.ConvolutionParameter = (function() { + + function ConvolutionParameter(properties) { + this.pad = []; + this.kernel_size = []; + this.stride = []; + this.dilation = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ConvolutionParameter.prototype.num_output = 0; + ConvolutionParameter.prototype.bias_term = true; + ConvolutionParameter.prototype.pad = $util.emptyArray; + ConvolutionParameter.prototype.kernel_size = $util.emptyArray; + ConvolutionParameter.prototype.stride = $util.emptyArray; + ConvolutionParameter.prototype.dilation = $util.emptyArray; + ConvolutionParameter.prototype.pad_h = 0; + ConvolutionParameter.prototype.pad_w = 0; + ConvolutionParameter.prototype.kernel_h = 0; + ConvolutionParameter.prototype.kernel_w = 0; + ConvolutionParameter.prototype.stride_h = 0; + ConvolutionParameter.prototype.stride_w = 0; + ConvolutionParameter.prototype.group = 1; + ConvolutionParameter.prototype.weight_filler = null; + ConvolutionParameter.prototype.bias_filler = null; + ConvolutionParameter.prototype.engine = 0; + ConvolutionParameter.prototype.axis = 1; + ConvolutionParameter.prototype.force_nd_im2col = false; + + ConvolutionParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ConvolutionParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_output = reader.uint32(); + break; + case 2: + message.bias_term = reader.bool(); + break; + case 3: + if (!(message.pad && message.pad.length)) + message.pad = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.pad.push(reader.uint32()); + } else + message.pad.push(reader.uint32()); + break; + case 4: + if (!(message.kernel_size && message.kernel_size.length)) + message.kernel_size = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.kernel_size.push(reader.uint32()); + } else + message.kernel_size.push(reader.uint32()); + break; + case 6: + if (!(message.stride && message.stride.length)) + message.stride = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.stride.push(reader.uint32()); + } else + message.stride.push(reader.uint32()); + break; + case 18: + if (!(message.dilation && message.dilation.length)) + message.dilation = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dilation.push(reader.uint32()); + } else + message.dilation.push(reader.uint32()); + break; + case 9: + message.pad_h = reader.uint32(); + break; + case 10: + message.pad_w = reader.uint32(); + break; + case 11: + message.kernel_h = reader.uint32(); + break; + case 12: + message.kernel_w = reader.uint32(); + break; + case 13: + message.stride_h = reader.uint32(); + break; + case 14: + message.stride_w = reader.uint32(); + break; + case 5: + message.group = reader.uint32(); + break; + case 7: + message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 8: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 15: + message.engine = reader.int32(); + break; + case 16: + message.axis = reader.int32(); + break; + case 17: + message.force_nd_im2col = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ConvolutionParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ConvolutionParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "num_output": + message.num_output = reader.uint32(); + break; + case "bias_term": + message.bias_term = reader.bool(); + break; + case "pad": + if (!(message.pad && message.pad.length)) + message.pad = []; + if (reader.first()) + while (!reader.last()) { + message.pad.push(reader.uint32()); + reader.next(); + } + else + message.pad.push(reader.uint32()); + break; + case "kernel_size": + if (!(message.kernel_size && message.kernel_size.length)) + message.kernel_size = []; + if (reader.first()) + while (!reader.last()) { + message.kernel_size.push(reader.uint32()); + reader.next(); + } + else + message.kernel_size.push(reader.uint32()); + break; + case "stride": + if (!(message.stride && message.stride.length)) + message.stride = []; + if (reader.first()) + while (!reader.last()) { + message.stride.push(reader.uint32()); + reader.next(); + } + else + message.stride.push(reader.uint32()); + break; + case "dilation": + if (!(message.dilation && message.dilation.length)) + message.dilation = []; + if (reader.first()) + while (!reader.last()) { + message.dilation.push(reader.uint32()); + reader.next(); + } + else + message.dilation.push(reader.uint32()); + break; + case "pad_h": + message.pad_h = reader.uint32(); + break; + case "pad_w": + message.pad_w = reader.uint32(); + break; + case "kernel_h": + message.kernel_h = reader.uint32(); + break; + case "kernel_w": + message.kernel_w = reader.uint32(); + break; + case "stride_h": + message.stride_h = reader.uint32(); + break; + case "stride_w": + message.stride_w = reader.uint32(); + break; + case "group": + message.group = reader.uint32(); + break; + case "weight_filler": + message.weight_filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + case "engine": + message.engine = reader.enum($root.caffe.ConvolutionParameter.Engine); + break; + case "axis": + message.axis = reader.int32(); + break; + case "force_nd_im2col": + message.force_nd_im2col = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + ConvolutionParameter.Engine = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT"] = 0; + values[valuesById[1] = "CAFFE"] = 1; + values[valuesById[2] = "CUDNN"] = 2; + return values; + })(); + + return ConvolutionParameter; + })(); + + caffe.CropParameter = (function() { + + function CropParameter(properties) { + this.offset = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CropParameter.prototype.axis = 2; + CropParameter.prototype.offset = $util.emptyArray; + + CropParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.CropParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int32(); + break; + case 2: + if (!(message.offset && message.offset.length)) + message.offset = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.offset.push(reader.uint32()); + } else + message.offset.push(reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + CropParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.CropParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "offset": + if (!(message.offset && message.offset.length)) + message.offset = []; + if (reader.first()) + while (!reader.last()) { + message.offset.push(reader.uint32()); + reader.next(); + } + else + message.offset.push(reader.uint32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return CropParameter; + })(); + + caffe.DataParameter = (function() { + + function DataParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DataParameter.prototype.source = ""; + DataParameter.prototype.batch_size = 0; + DataParameter.prototype.rand_skip = 0; + DataParameter.prototype.backend = 0; + DataParameter.prototype.scale = 1; + DataParameter.prototype.mean_file = ""; + DataParameter.prototype.crop_size = 0; + DataParameter.prototype.mirror = false; + DataParameter.prototype.force_encoded_color = false; + DataParameter.prototype.prefetch = 4; + + DataParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.DataParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source = reader.string(); + break; + case 4: + message.batch_size = reader.uint32(); + break; + case 7: + message.rand_skip = reader.uint32(); + break; + case 8: + message.backend = reader.int32(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.mean_file = reader.string(); + break; + case 5: + message.crop_size = reader.uint32(); + break; + case 6: + message.mirror = reader.bool(); + break; + case 9: + message.force_encoded_color = reader.bool(); + break; + case 10: + message.prefetch = reader.uint32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + DataParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.DataParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "source": + message.source = reader.string(); + break; + case "batch_size": + message.batch_size = reader.uint32(); + break; + case "rand_skip": + message.rand_skip = reader.uint32(); + break; + case "backend": + message.backend = reader.enum($root.caffe.DataParameter.DB); + break; + case "scale": + message.scale = reader.float(); + break; + case "mean_file": + message.mean_file = reader.string(); + break; + case "crop_size": + message.crop_size = reader.uint32(); + break; + case "mirror": + message.mirror = reader.bool(); + break; + case "force_encoded_color": + message.force_encoded_color = reader.bool(); + break; + case "prefetch": + message.prefetch = reader.uint32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + DataParameter.DB = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "LEVELDB"] = 0; + values[valuesById[1] = "LMDB"] = 1; + return values; + })(); + + return DataParameter; + })(); + + caffe.DropoutParameter = (function() { + + function DropoutParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DropoutParameter.prototype.dropout_ratio = 0.5; + + DropoutParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.DropoutParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dropout_ratio = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + DropoutParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.DropoutParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dropout_ratio": + message.dropout_ratio = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return DropoutParameter; + })(); + + caffe.DummyDataParameter = (function() { + + function DummyDataParameter(properties) { + this.data_filler = []; + this.shape = []; + this.num = []; + this.channels = []; + this.height = []; + this.width = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DummyDataParameter.prototype.data_filler = $util.emptyArray; + DummyDataParameter.prototype.shape = $util.emptyArray; + DummyDataParameter.prototype.num = $util.emptyArray; + DummyDataParameter.prototype.channels = $util.emptyArray; + DummyDataParameter.prototype.height = $util.emptyArray; + DummyDataParameter.prototype.width = $util.emptyArray; + + DummyDataParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.DummyDataParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.data_filler && message.data_filler.length)) + message.data_filler = []; + message.data_filler.push($root.caffe.FillerParameter.decode(reader, reader.uint32())); + break; + case 6: + if (!(message.shape && message.shape.length)) + message.shape = []; + message.shape.push($root.caffe.BlobShape.decode(reader, reader.uint32())); + break; + case 2: + if (!(message.num && message.num.length)) + message.num = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.num.push(reader.uint32()); + } else + message.num.push(reader.uint32()); + break; + case 3: + if (!(message.channels && message.channels.length)) + message.channels = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.channels.push(reader.uint32()); + } else + message.channels.push(reader.uint32()); + break; + case 4: + if (!(message.height && message.height.length)) + message.height = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.height.push(reader.uint32()); + } else + message.height.push(reader.uint32()); + break; + case 5: + if (!(message.width && message.width.length)) + message.width = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.width.push(reader.uint32()); + } else + message.width.push(reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + DummyDataParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.DummyDataParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "data_filler": + if (!(message.data_filler && message.data_filler.length)) + message.data_filler = []; + message.data_filler.push($root.caffe.FillerParameter.decodeText(reader, true)); + break; + case "shape": + if (!(message.shape && message.shape.length)) + message.shape = []; + message.shape.push($root.caffe.BlobShape.decodeText(reader, true)); + break; + case "num": + if (!(message.num && message.num.length)) + message.num = []; + if (reader.first()) + while (!reader.last()) { + message.num.push(reader.uint32()); + reader.next(); + } + else + message.num.push(reader.uint32()); + break; + case "channels": + if (!(message.channels && message.channels.length)) + message.channels = []; + if (reader.first()) + while (!reader.last()) { + message.channels.push(reader.uint32()); + reader.next(); + } + else + message.channels.push(reader.uint32()); + break; + case "height": + if (!(message.height && message.height.length)) + message.height = []; + if (reader.first()) + while (!reader.last()) { + message.height.push(reader.uint32()); + reader.next(); + } + else + message.height.push(reader.uint32()); + break; + case "width": + if (!(message.width && message.width.length)) + message.width = []; + if (reader.first()) + while (!reader.last()) { + message.width.push(reader.uint32()); + reader.next(); + } + else + message.width.push(reader.uint32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return DummyDataParameter; + })(); + + caffe.EltwiseParameter = (function() { + + function EltwiseParameter(properties) { + this.coeff = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + EltwiseParameter.prototype.operation = 1; + EltwiseParameter.prototype.coeff = $util.emptyArray; + EltwiseParameter.prototype.stable_prod_grad = true; + + EltwiseParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.EltwiseParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operation = reader.int32(); + break; + case 2: + if (!(message.coeff && message.coeff.length)) + message.coeff = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.coeff.push(reader.float()); + } else + message.coeff.push(reader.float()); + break; + case 3: + message.stable_prod_grad = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + EltwiseParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.EltwiseParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "operation": + message.operation = reader.enum($root.caffe.EltwiseParameter.EltwiseOp); + break; + case "coeff": + if (!(message.coeff && message.coeff.length)) + message.coeff = []; + if (reader.first()) + while (!reader.last()) { + message.coeff.push(reader.float()); + reader.next(); + } + else + message.coeff.push(reader.float()); + break; + case "stable_prod_grad": + message.stable_prod_grad = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + EltwiseParameter.EltwiseOp = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "PROD"] = 0; + values[valuesById[1] = "SUM"] = 1; + values[valuesById[2] = "MAX"] = 2; + return values; + })(); + + return EltwiseParameter; + })(); + + caffe.ELUParameter = (function() { + + function ELUParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ELUParameter.prototype.alpha = 1; + + ELUParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ELUParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ELUParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ELUParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "alpha": + message.alpha = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ELUParameter; + })(); + + caffe.EmbedParameter = (function() { + + function EmbedParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + EmbedParameter.prototype.num_output = 0; + EmbedParameter.prototype.input_dim = 0; + EmbedParameter.prototype.bias_term = true; + EmbedParameter.prototype.weight_filler = null; + EmbedParameter.prototype.bias_filler = null; + + EmbedParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.EmbedParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_output = reader.uint32(); + break; + case 2: + message.input_dim = reader.uint32(); + break; + case 3: + message.bias_term = reader.bool(); + break; + case 4: + message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 5: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + EmbedParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.EmbedParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "num_output": + message.num_output = reader.uint32(); + break; + case "input_dim": + message.input_dim = reader.uint32(); + break; + case "bias_term": + message.bias_term = reader.bool(); + break; + case "weight_filler": + message.weight_filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return EmbedParameter; + })(); + + caffe.ExpParameter = (function() { + + function ExpParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ExpParameter.prototype.base = -1; + ExpParameter.prototype.scale = 1; + ExpParameter.prototype.shift = 0; + + ExpParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ExpParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base = reader.float(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.shift = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ExpParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ExpParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "base": + message.base = reader.float(); + break; + case "scale": + message.scale = reader.float(); + break; + case "shift": + message.shift = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ExpParameter; + })(); + + caffe.FlattenParameter = (function() { + + function FlattenParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FlattenParameter.prototype.axis = 1; + FlattenParameter.prototype.end_axis = -1; + + FlattenParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.FlattenParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int32(); + break; + case 2: + message.end_axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + FlattenParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.FlattenParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "end_axis": + message.end_axis = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return FlattenParameter; + })(); + + caffe.HDF5DataParameter = (function() { + + function HDF5DataParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + HDF5DataParameter.prototype.source = ""; + HDF5DataParameter.prototype.batch_size = 0; + HDF5DataParameter.prototype.shuffle = false; + + HDF5DataParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.HDF5DataParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source = reader.string(); + break; + case 2: + message.batch_size = reader.uint32(); + break; + case 3: + message.shuffle = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + HDF5DataParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.HDF5DataParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "source": + message.source = reader.string(); + break; + case "batch_size": + message.batch_size = reader.uint32(); + break; + case "shuffle": + message.shuffle = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return HDF5DataParameter; + })(); + + caffe.HDF5OutputParameter = (function() { + + function HDF5OutputParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + HDF5OutputParameter.prototype.file_name = ""; + + HDF5OutputParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.HDF5OutputParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.file_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + HDF5OutputParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.HDF5OutputParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "file_name": + message.file_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return HDF5OutputParameter; + })(); + + caffe.HingeLossParameter = (function() { + + function HingeLossParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + HingeLossParameter.prototype.norm = 1; + + HingeLossParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.HingeLossParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.norm = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + HingeLossParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.HingeLossParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "norm": + message.norm = reader.enum($root.caffe.HingeLossParameter.Norm); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + HingeLossParameter.Norm = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[1] = "L1"] = 1; + values[valuesById[2] = "L2"] = 2; + return values; + })(); + + return HingeLossParameter; + })(); + + caffe.ImageDataParameter = (function() { + + function ImageDataParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ImageDataParameter.prototype.source = ""; + ImageDataParameter.prototype.batch_size = 1; + ImageDataParameter.prototype.rand_skip = 0; + ImageDataParameter.prototype.shuffle = false; + ImageDataParameter.prototype.new_height = 0; + ImageDataParameter.prototype.new_width = 0; + ImageDataParameter.prototype.is_color = true; + ImageDataParameter.prototype.scale = 1; + ImageDataParameter.prototype.mean_file = ""; + ImageDataParameter.prototype.crop_size = 0; + ImageDataParameter.prototype.mirror = false; + ImageDataParameter.prototype.root_folder = ""; + + ImageDataParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ImageDataParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source = reader.string(); + break; + case 4: + message.batch_size = reader.uint32(); + break; + case 7: + message.rand_skip = reader.uint32(); + break; + case 8: + message.shuffle = reader.bool(); + break; + case 9: + message.new_height = reader.uint32(); + break; + case 10: + message.new_width = reader.uint32(); + break; + case 11: + message.is_color = reader.bool(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.mean_file = reader.string(); + break; + case 5: + message.crop_size = reader.uint32(); + break; + case 6: + message.mirror = reader.bool(); + break; + case 12: + message.root_folder = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ImageDataParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ImageDataParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "source": + message.source = reader.string(); + break; + case "batch_size": + message.batch_size = reader.uint32(); + break; + case "rand_skip": + message.rand_skip = reader.uint32(); + break; + case "shuffle": + message.shuffle = reader.bool(); + break; + case "new_height": + message.new_height = reader.uint32(); + break; + case "new_width": + message.new_width = reader.uint32(); + break; + case "is_color": + message.is_color = reader.bool(); + break; + case "scale": + message.scale = reader.float(); + break; + case "mean_file": + message.mean_file = reader.string(); + break; + case "crop_size": + message.crop_size = reader.uint32(); + break; + case "mirror": + message.mirror = reader.bool(); + break; + case "root_folder": + message.root_folder = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ImageDataParameter; + })(); + + caffe.InfogainLossParameter = (function() { + + function InfogainLossParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + InfogainLossParameter.prototype.source = ""; + InfogainLossParameter.prototype.axis = 1; + + InfogainLossParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.InfogainLossParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source = reader.string(); + break; + case 2: + message.axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + InfogainLossParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.InfogainLossParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "source": + message.source = reader.string(); + break; + case "axis": + message.axis = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return InfogainLossParameter; + })(); + + caffe.InnerProductParameter = (function() { + + function InnerProductParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + InnerProductParameter.prototype.num_output = 0; + InnerProductParameter.prototype.bias_term = true; + InnerProductParameter.prototype.weight_filler = null; + InnerProductParameter.prototype.bias_filler = null; + InnerProductParameter.prototype.axis = 1; + InnerProductParameter.prototype.transpose = false; + + InnerProductParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.InnerProductParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_output = reader.uint32(); + break; + case 2: + message.bias_term = reader.bool(); + break; + case 3: + message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 4: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 5: + message.axis = reader.int32(); + break; + case 6: + message.transpose = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + InnerProductParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.InnerProductParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "num_output": + message.num_output = reader.uint32(); + break; + case "bias_term": + message.bias_term = reader.bool(); + break; + case "weight_filler": + message.weight_filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + case "axis": + message.axis = reader.int32(); + break; + case "transpose": + message.transpose = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return InnerProductParameter; + })(); + + caffe.InputParameter = (function() { + + function InputParameter(properties) { + this.shape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + InputParameter.prototype.shape = $util.emptyArray; + + InputParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.InputParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shape && message.shape.length)) + message.shape = []; + message.shape.push($root.caffe.BlobShape.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + InputParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.InputParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "shape": + if (!(message.shape && message.shape.length)) + message.shape = []; + message.shape.push($root.caffe.BlobShape.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return InputParameter; + })(); + + caffe.LogParameter = (function() { + + function LogParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LogParameter.prototype.base = -1; + LogParameter.prototype.scale = 1; + LogParameter.prototype.shift = 0; + + LogParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.LogParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.base = reader.float(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.shift = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + LogParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.LogParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "base": + message.base = reader.float(); + break; + case "scale": + message.scale = reader.float(); + break; + case "shift": + message.shift = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return LogParameter; + })(); + + caffe.LRNParameter = (function() { + + function LRNParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LRNParameter.prototype.local_size = 5; + LRNParameter.prototype.alpha = 1; + LRNParameter.prototype.beta = 0.75; + LRNParameter.prototype.norm_region = 0; + LRNParameter.prototype.k = 1; + LRNParameter.prototype.engine = 0; + + LRNParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.LRNParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.local_size = reader.uint32(); + break; + case 2: + message.alpha = reader.float(); + break; + case 3: + message.beta = reader.float(); + break; + case 4: + message.norm_region = reader.int32(); + break; + case 5: + message.k = reader.float(); + break; + case 6: + message.engine = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + LRNParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.LRNParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "local_size": + message.local_size = reader.uint32(); + break; + case "alpha": + message.alpha = reader.float(); + break; + case "beta": + message.beta = reader.float(); + break; + case "norm_region": + message.norm_region = reader.enum($root.caffe.LRNParameter.NormRegion); + break; + case "k": + message.k = reader.float(); + break; + case "engine": + message.engine = reader.enum($root.caffe.LRNParameter.Engine); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + LRNParameter.NormRegion = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "ACROSS_CHANNELS"] = 0; + values[valuesById[1] = "WITHIN_CHANNEL"] = 1; + return values; + })(); + + LRNParameter.Engine = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT"] = 0; + values[valuesById[1] = "CAFFE"] = 1; + values[valuesById[2] = "CUDNN"] = 2; + return values; + })(); + + return LRNParameter; + })(); + + caffe.MemoryDataParameter = (function() { + + function MemoryDataParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MemoryDataParameter.prototype.batch_size = 0; + MemoryDataParameter.prototype.channels = 0; + MemoryDataParameter.prototype.height = 0; + MemoryDataParameter.prototype.width = 0; + + MemoryDataParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.MemoryDataParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.batch_size = reader.uint32(); + break; + case 2: + message.channels = reader.uint32(); + break; + case 3: + message.height = reader.uint32(); + break; + case 4: + message.width = reader.uint32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + MemoryDataParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.MemoryDataParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "batch_size": + message.batch_size = reader.uint32(); + break; + case "channels": + message.channels = reader.uint32(); + break; + case "height": + message.height = reader.uint32(); + break; + case "width": + message.width = reader.uint32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return MemoryDataParameter; + })(); + + caffe.MVNParameter = (function() { + + function MVNParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MVNParameter.prototype.normalize_variance = true; + MVNParameter.prototype.across_channels = false; + MVNParameter.prototype.eps = 1e-9; + + MVNParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.MVNParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.normalize_variance = reader.bool(); + break; + case 2: + message.across_channels = reader.bool(); + break; + case 3: + message.eps = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + MVNParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.MVNParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "normalize_variance": + message.normalize_variance = reader.bool(); + break; + case "across_channels": + message.across_channels = reader.bool(); + break; + case "eps": + message.eps = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return MVNParameter; + })(); + + caffe.ParameterParameter = (function() { + + function ParameterParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ParameterParameter.prototype.shape = null; + + ParameterParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ParameterParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.caffe.BlobShape.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ParameterParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ParameterParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.caffe.BlobShape.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ParameterParameter; + })(); + + caffe.PoolingParameter = (function() { + + function PoolingParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PoolingParameter.prototype.pool = 0; + PoolingParameter.prototype.pad = 0; + PoolingParameter.prototype.pad_h = 0; + PoolingParameter.prototype.pad_w = 0; + PoolingParameter.prototype.kernel_size = 0; + PoolingParameter.prototype.kernel_h = 0; + PoolingParameter.prototype.kernel_w = 0; + PoolingParameter.prototype.stride = 1; + PoolingParameter.prototype.stride_h = 0; + PoolingParameter.prototype.stride_w = 0; + PoolingParameter.prototype.engine = 0; + PoolingParameter.prototype.global_pooling = false; + PoolingParameter.prototype.round_mode = 0; + + PoolingParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.PoolingParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pool = reader.int32(); + break; + case 4: + message.pad = reader.uint32(); + break; + case 9: + message.pad_h = reader.uint32(); + break; + case 10: + message.pad_w = reader.uint32(); + break; + case 2: + message.kernel_size = reader.uint32(); + break; + case 5: + message.kernel_h = reader.uint32(); + break; + case 6: + message.kernel_w = reader.uint32(); + break; + case 3: + message.stride = reader.uint32(); + break; + case 7: + message.stride_h = reader.uint32(); + break; + case 8: + message.stride_w = reader.uint32(); + break; + case 11: + message.engine = reader.int32(); + break; + case 12: + message.global_pooling = reader.bool(); + break; + case 13: + message.round_mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + PoolingParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.PoolingParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "pool": + message.pool = reader.enum($root.caffe.PoolingParameter.PoolMethod); + break; + case "pad": + message.pad = reader.uint32(); + break; + case "pad_h": + message.pad_h = reader.uint32(); + break; + case "pad_w": + message.pad_w = reader.uint32(); + break; + case "kernel_size": + message.kernel_size = reader.uint32(); + break; + case "kernel_h": + message.kernel_h = reader.uint32(); + break; + case "kernel_w": + message.kernel_w = reader.uint32(); + break; + case "stride": + message.stride = reader.uint32(); + break; + case "stride_h": + message.stride_h = reader.uint32(); + break; + case "stride_w": + message.stride_w = reader.uint32(); + break; + case "engine": + message.engine = reader.enum($root.caffe.PoolingParameter.Engine); + break; + case "global_pooling": + message.global_pooling = reader.bool(); + break; + case "round_mode": + message.round_mode = reader.enum($root.caffe.PoolingParameter.RoundMode); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + PoolingParameter.PoolMethod = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "MAX"] = 0; + values[valuesById[1] = "AVE"] = 1; + values[valuesById[2] = "STOCHASTIC"] = 2; + return values; + })(); + + PoolingParameter.Engine = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT"] = 0; + values[valuesById[1] = "CAFFE"] = 1; + values[valuesById[2] = "CUDNN"] = 2; + return values; + })(); + + PoolingParameter.RoundMode = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "CEIL"] = 0; + values[valuesById[1] = "FLOOR"] = 1; + return values; + })(); + + return PoolingParameter; + })(); + + caffe.PowerParameter = (function() { + + function PowerParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PowerParameter.prototype.power = 1; + PowerParameter.prototype.scale = 1; + PowerParameter.prototype.shift = 0; + + PowerParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.PowerParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.power = reader.float(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.shift = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + PowerParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.PowerParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "power": + message.power = reader.float(); + break; + case "scale": + message.scale = reader.float(); + break; + case "shift": + message.shift = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return PowerParameter; + })(); + + caffe.PythonParameter = (function() { + + function PythonParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PythonParameter.prototype.module = ""; + PythonParameter.prototype.layer = ""; + PythonParameter.prototype.param_str = ""; + PythonParameter.prototype.share_in_parallel = false; + + PythonParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.PythonParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.module = reader.string(); + break; + case 2: + message.layer = reader.string(); + break; + case 3: + message.param_str = reader.string(); + break; + case 4: + message.share_in_parallel = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + PythonParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.PythonParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "module": + message.module = reader.string(); + break; + case "layer": + message.layer = reader.string(); + break; + case "param_str": + message.param_str = reader.string(); + break; + case "share_in_parallel": + message.share_in_parallel = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return PythonParameter; + })(); + + caffe.RecurrentParameter = (function() { + + function RecurrentParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RecurrentParameter.prototype.num_output = 0; + RecurrentParameter.prototype.weight_filler = null; + RecurrentParameter.prototype.bias_filler = null; + RecurrentParameter.prototype.debug_info = false; + RecurrentParameter.prototype.expose_hidden = false; + + RecurrentParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.RecurrentParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_output = reader.uint32(); + break; + case 2: + message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 3: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 4: + message.debug_info = reader.bool(); + break; + case 5: + message.expose_hidden = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + RecurrentParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.RecurrentParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "num_output": + message.num_output = reader.uint32(); + break; + case "weight_filler": + message.weight_filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + case "debug_info": + message.debug_info = reader.bool(); + break; + case "expose_hidden": + message.expose_hidden = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return RecurrentParameter; + })(); + + caffe.ReductionParameter = (function() { + + function ReductionParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReductionParameter.prototype.operation = 1; + ReductionParameter.prototype.axis = 0; + ReductionParameter.prototype.coeff = 1; + + ReductionParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ReductionParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operation = reader.int32(); + break; + case 2: + message.axis = reader.int32(); + break; + case 3: + message.coeff = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ReductionParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ReductionParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "operation": + message.operation = reader.enum($root.caffe.ReductionParameter.ReductionOp); + break; + case "axis": + message.axis = reader.int32(); + break; + case "coeff": + message.coeff = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + ReductionParameter.ReductionOp = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[1] = "SUM"] = 1; + values[valuesById[2] = "ASUM"] = 2; + values[valuesById[3] = "SUMSQ"] = 3; + values[valuesById[4] = "MEAN"] = 4; + return values; + })(); + + return ReductionParameter; + })(); + + caffe.ReLUParameter = (function() { + + function ReLUParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReLUParameter.prototype.negative_slope = 0; + ReLUParameter.prototype.engine = 0; + + ReLUParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ReLUParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.negative_slope = reader.float(); + break; + case 2: + message.engine = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ReLUParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ReLUParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "negative_slope": + message.negative_slope = reader.float(); + break; + case "engine": + message.engine = reader.enum($root.caffe.ReLUParameter.Engine); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + ReLUParameter.Engine = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT"] = 0; + values[valuesById[1] = "CAFFE"] = 1; + values[valuesById[2] = "CUDNN"] = 2; + return values; + })(); + + return ReLUParameter; + })(); + + caffe.ReshapeParameter = (function() { + + function ReshapeParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReshapeParameter.prototype.shape = null; + ReshapeParameter.prototype.axis = 0; + ReshapeParameter.prototype.num_axes = -1; + + ReshapeParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ReshapeParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.caffe.BlobShape.decode(reader, reader.uint32()); + break; + case 2: + message.axis = reader.int32(); + break; + case 3: + message.num_axes = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ReshapeParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ReshapeParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.caffe.BlobShape.decodeText(reader, true); + break; + case "axis": + message.axis = reader.int32(); + break; + case "num_axes": + message.num_axes = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ReshapeParameter; + })(); + + caffe.ScaleParameter = (function() { + + function ScaleParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ScaleParameter.prototype.axis = 1; + ScaleParameter.prototype.num_axes = 1; + ScaleParameter.prototype.filler = null; + ScaleParameter.prototype.bias_term = false; + ScaleParameter.prototype.bias_filler = null; + + ScaleParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ScaleParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int32(); + break; + case 2: + message.num_axes = reader.int32(); + break; + case 3: + message.filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 4: + message.bias_term = reader.bool(); + break; + case 5: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ScaleParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ScaleParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "num_axes": + message.num_axes = reader.int32(); + break; + case "filler": + message.filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + case "bias_term": + message.bias_term = reader.bool(); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ScaleParameter; + })(); + + caffe.SigmoidParameter = (function() { + + function SigmoidParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SigmoidParameter.prototype.engine = 0; + + SigmoidParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.SigmoidParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engine = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SigmoidParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.SigmoidParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "engine": + message.engine = reader.enum($root.caffe.SigmoidParameter.Engine); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + SigmoidParameter.Engine = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT"] = 0; + values[valuesById[1] = "CAFFE"] = 1; + values[valuesById[2] = "CUDNN"] = 2; + return values; + })(); + + return SigmoidParameter; + })(); + + caffe.SliceParameter = (function() { + + function SliceParameter(properties) { + this.slice_point = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SliceParameter.prototype.axis = 1; + SliceParameter.prototype.slice_point = $util.emptyArray; + SliceParameter.prototype.slice_dim = 1; + + SliceParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.SliceParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 3: + message.axis = reader.int32(); + break; + case 2: + if (!(message.slice_point && message.slice_point.length)) + message.slice_point = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.slice_point.push(reader.uint32()); + } else + message.slice_point.push(reader.uint32()); + break; + case 1: + message.slice_dim = reader.uint32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SliceParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.SliceParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "slice_point": + if (!(message.slice_point && message.slice_point.length)) + message.slice_point = []; + if (reader.first()) + while (!reader.last()) { + message.slice_point.push(reader.uint32()); + reader.next(); + } + else + message.slice_point.push(reader.uint32()); + break; + case "slice_dim": + message.slice_dim = reader.uint32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SliceParameter; + })(); + + caffe.SoftmaxParameter = (function() { + + function SoftmaxParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SoftmaxParameter.prototype.engine = 0; + SoftmaxParameter.prototype.axis = 1; + + SoftmaxParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.SoftmaxParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engine = reader.int32(); + break; + case 2: + message.axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SoftmaxParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.SoftmaxParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "engine": + message.engine = reader.enum($root.caffe.SoftmaxParameter.Engine); + break; + case "axis": + message.axis = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + SoftmaxParameter.Engine = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT"] = 0; + values[valuesById[1] = "CAFFE"] = 1; + values[valuesById[2] = "CUDNN"] = 2; + return values; + })(); + + return SoftmaxParameter; + })(); + + caffe.SwishParameter = (function() { + + function SwishParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SwishParameter.prototype.beta = 1; + + SwishParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.SwishParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.beta = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SwishParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.SwishParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "beta": + message.beta = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SwishParameter; + })(); + + caffe.TanHParameter = (function() { + + function TanHParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TanHParameter.prototype.engine = 0; + + TanHParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.TanHParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.engine = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TanHParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.TanHParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "engine": + message.engine = reader.enum($root.caffe.TanHParameter.Engine); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TanHParameter.Engine = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT"] = 0; + values[valuesById[1] = "CAFFE"] = 1; + values[valuesById[2] = "CUDNN"] = 2; + return values; + })(); + + return TanHParameter; + })(); + + caffe.TileParameter = (function() { + + function TileParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TileParameter.prototype.axis = 1; + TileParameter.prototype.tiles = 0; + + TileParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.TileParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int32(); + break; + case 2: + message.tiles = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TileParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.TileParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "axis": + message.axis = reader.int32(); + break; + case "tiles": + message.tiles = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return TileParameter; + })(); + + caffe.ThresholdParameter = (function() { + + function ThresholdParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ThresholdParameter.prototype.threshold = 0; + + ThresholdParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.ThresholdParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.threshold = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ThresholdParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.ThresholdParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "threshold": + message.threshold = reader.float(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ThresholdParameter; + })(); + + caffe.WindowDataParameter = (function() { + + function WindowDataParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + WindowDataParameter.prototype.source = ""; + WindowDataParameter.prototype.scale = 1; + WindowDataParameter.prototype.mean_file = ""; + WindowDataParameter.prototype.batch_size = 0; + WindowDataParameter.prototype.crop_size = 0; + WindowDataParameter.prototype.mirror = false; + WindowDataParameter.prototype.fg_threshold = 0.5; + WindowDataParameter.prototype.bg_threshold = 0.5; + WindowDataParameter.prototype.fg_fraction = 0.25; + WindowDataParameter.prototype.context_pad = 0; + WindowDataParameter.prototype.crop_mode = "warp"; + WindowDataParameter.prototype.cache_images = false; + WindowDataParameter.prototype.root_folder = ""; + + WindowDataParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.WindowDataParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source = reader.string(); + break; + case 2: + message.scale = reader.float(); + break; + case 3: + message.mean_file = reader.string(); + break; + case 4: + message.batch_size = reader.uint32(); + break; + case 5: + message.crop_size = reader.uint32(); + break; + case 6: + message.mirror = reader.bool(); + break; + case 7: + message.fg_threshold = reader.float(); + break; + case 8: + message.bg_threshold = reader.float(); + break; + case 9: + message.fg_fraction = reader.float(); + break; + case 10: + message.context_pad = reader.uint32(); + break; + case 11: + message.crop_mode = reader.string(); + break; + case 12: + message.cache_images = reader.bool(); + break; + case 13: + message.root_folder = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + WindowDataParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.WindowDataParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "source": + message.source = reader.string(); + break; + case "scale": + message.scale = reader.float(); + break; + case "mean_file": + message.mean_file = reader.string(); + break; + case "batch_size": + message.batch_size = reader.uint32(); + break; + case "crop_size": + message.crop_size = reader.uint32(); + break; + case "mirror": + message.mirror = reader.bool(); + break; + case "fg_threshold": + message.fg_threshold = reader.float(); + break; + case "bg_threshold": + message.bg_threshold = reader.float(); + break; + case "fg_fraction": + message.fg_fraction = reader.float(); + break; + case "context_pad": + message.context_pad = reader.uint32(); + break; + case "crop_mode": + message.crop_mode = reader.string(); + break; + case "cache_images": + message.cache_images = reader.bool(); + break; + case "root_folder": + message.root_folder = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return WindowDataParameter; + })(); + + caffe.SPPParameter = (function() { + + function SPPParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SPPParameter.prototype.pyramid_height = 0; + SPPParameter.prototype.pool = 0; + SPPParameter.prototype.engine = 0; + + SPPParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.SPPParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pyramid_height = reader.uint32(); + break; + case 2: + message.pool = reader.int32(); + break; + case 6: + message.engine = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SPPParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.SPPParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "pyramid_height": + message.pyramid_height = reader.uint32(); + break; + case "pool": + message.pool = reader.enum($root.caffe.SPPParameter.PoolMethod); + break; + case "engine": + message.engine = reader.enum($root.caffe.SPPParameter.Engine); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + SPPParameter.PoolMethod = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "MAX"] = 0; + values[valuesById[1] = "AVE"] = 1; + values[valuesById[2] = "STOCHASTIC"] = 2; + return values; + })(); + + SPPParameter.Engine = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT"] = 0; + values[valuesById[1] = "CAFFE"] = 1; + values[valuesById[2] = "CUDNN"] = 2; + return values; + })(); + + return SPPParameter; + })(); + + caffe.V1LayerParameter = (function() { + + function V1LayerParameter(properties) { + this.bottom = []; + this.top = []; + this.include = []; + this.exclude = []; + this.blobs = []; + this.param = []; + this.blob_share_mode = []; + this.blobs_lr = []; + this.weight_decay = []; + this.loss_weight = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + V1LayerParameter.prototype.bottom = $util.emptyArray; + V1LayerParameter.prototype.top = $util.emptyArray; + V1LayerParameter.prototype.name = ""; + V1LayerParameter.prototype.include = $util.emptyArray; + V1LayerParameter.prototype.exclude = $util.emptyArray; + V1LayerParameter.prototype.type = 0; + V1LayerParameter.prototype.blobs = $util.emptyArray; + V1LayerParameter.prototype.param = $util.emptyArray; + V1LayerParameter.prototype.blob_share_mode = $util.emptyArray; + V1LayerParameter.prototype.blobs_lr = $util.emptyArray; + V1LayerParameter.prototype.weight_decay = $util.emptyArray; + V1LayerParameter.prototype.loss_weight = $util.emptyArray; + V1LayerParameter.prototype.accuracy_param = null; + V1LayerParameter.prototype.argmax_param = null; + V1LayerParameter.prototype.concat_param = null; + V1LayerParameter.prototype.contrastive_loss_param = null; + V1LayerParameter.prototype.convolution_param = null; + V1LayerParameter.prototype.data_param = null; + V1LayerParameter.prototype.dropout_param = null; + V1LayerParameter.prototype.dummy_data_param = null; + V1LayerParameter.prototype.eltwise_param = null; + V1LayerParameter.prototype.exp_param = null; + V1LayerParameter.prototype.hdf5_data_param = null; + V1LayerParameter.prototype.hdf5_output_param = null; + V1LayerParameter.prototype.hinge_loss_param = null; + V1LayerParameter.prototype.image_data_param = null; + V1LayerParameter.prototype.infogain_loss_param = null; + V1LayerParameter.prototype.inner_product_param = null; + V1LayerParameter.prototype.lrn_param = null; + V1LayerParameter.prototype.memory_data_param = null; + V1LayerParameter.prototype.mvn_param = null; + V1LayerParameter.prototype.pooling_param = null; + V1LayerParameter.prototype.power_param = null; + V1LayerParameter.prototype.relu_param = null; + V1LayerParameter.prototype.sigmoid_param = null; + V1LayerParameter.prototype.softmax_param = null; + V1LayerParameter.prototype.slice_param = null; + V1LayerParameter.prototype.tanh_param = null; + V1LayerParameter.prototype.threshold_param = null; + V1LayerParameter.prototype.window_data_param = null; + V1LayerParameter.prototype.transform_param = null; + V1LayerParameter.prototype.loss_param = null; + V1LayerParameter.prototype.layer = null; + + V1LayerParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.V1LayerParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + if (!(message.bottom && message.bottom.length)) + message.bottom = []; + message.bottom.push(reader.string()); + break; + case 3: + if (!(message.top && message.top.length)) + message.top = []; + message.top.push(reader.string()); + break; + case 4: + message.name = reader.string(); + break; + case 32: + if (!(message.include && message.include.length)) + message.include = []; + message.include.push($root.caffe.NetStateRule.decode(reader, reader.uint32())); + break; + case 33: + if (!(message.exclude && message.exclude.length)) + message.exclude = []; + message.exclude.push($root.caffe.NetStateRule.decode(reader, reader.uint32())); + break; + case 5: + message.type = reader.int32(); + break; + case 6: + if (!(message.blobs && message.blobs.length)) + message.blobs = []; + message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32())); + break; + case 1001: + if (!(message.param && message.param.length)) + message.param = []; + message.param.push(reader.string()); + break; + case 1002: + if (!(message.blob_share_mode && message.blob_share_mode.length)) + message.blob_share_mode = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.blob_share_mode.push(reader.int32()); + } else + message.blob_share_mode.push(reader.int32()); + break; + case 7: + if (!(message.blobs_lr && message.blobs_lr.length)) + message.blobs_lr = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.blobs_lr.push(reader.float()); + } else + message.blobs_lr.push(reader.float()); + break; + case 8: + if (!(message.weight_decay && message.weight_decay.length)) + message.weight_decay = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.weight_decay.push(reader.float()); + } else + message.weight_decay.push(reader.float()); + break; + case 35: + if (!(message.loss_weight && message.loss_weight.length)) + message.loss_weight = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.loss_weight.push(reader.float()); + } else + message.loss_weight.push(reader.float()); + break; + case 27: + message.accuracy_param = $root.caffe.AccuracyParameter.decode(reader, reader.uint32()); + break; + case 23: + message.argmax_param = $root.caffe.ArgMaxParameter.decode(reader, reader.uint32()); + break; + case 9: + message.concat_param = $root.caffe.ConcatParameter.decode(reader, reader.uint32()); + break; + case 40: + message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decode(reader, reader.uint32()); + break; + case 10: + message.convolution_param = $root.caffe.ConvolutionParameter.decode(reader, reader.uint32()); + break; + case 11: + message.data_param = $root.caffe.DataParameter.decode(reader, reader.uint32()); + break; + case 12: + message.dropout_param = $root.caffe.DropoutParameter.decode(reader, reader.uint32()); + break; + case 26: + message.dummy_data_param = $root.caffe.DummyDataParameter.decode(reader, reader.uint32()); + break; + case 24: + message.eltwise_param = $root.caffe.EltwiseParameter.decode(reader, reader.uint32()); + break; + case 41: + message.exp_param = $root.caffe.ExpParameter.decode(reader, reader.uint32()); + break; + case 13: + message.hdf5_data_param = $root.caffe.HDF5DataParameter.decode(reader, reader.uint32()); + break; + case 14: + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decode(reader, reader.uint32()); + break; + case 29: + message.hinge_loss_param = $root.caffe.HingeLossParameter.decode(reader, reader.uint32()); + break; + case 15: + message.image_data_param = $root.caffe.ImageDataParameter.decode(reader, reader.uint32()); + break; + case 16: + message.infogain_loss_param = $root.caffe.InfogainLossParameter.decode(reader, reader.uint32()); + break; + case 17: + message.inner_product_param = $root.caffe.InnerProductParameter.decode(reader, reader.uint32()); + break; + case 18: + message.lrn_param = $root.caffe.LRNParameter.decode(reader, reader.uint32()); + break; + case 22: + message.memory_data_param = $root.caffe.MemoryDataParameter.decode(reader, reader.uint32()); + break; + case 34: + message.mvn_param = $root.caffe.MVNParameter.decode(reader, reader.uint32()); + break; + case 19: + message.pooling_param = $root.caffe.PoolingParameter.decode(reader, reader.uint32()); + break; + case 21: + message.power_param = $root.caffe.PowerParameter.decode(reader, reader.uint32()); + break; + case 30: + message.relu_param = $root.caffe.ReLUParameter.decode(reader, reader.uint32()); + break; + case 38: + message.sigmoid_param = $root.caffe.SigmoidParameter.decode(reader, reader.uint32()); + break; + case 39: + message.softmax_param = $root.caffe.SoftmaxParameter.decode(reader, reader.uint32()); + break; + case 31: + message.slice_param = $root.caffe.SliceParameter.decode(reader, reader.uint32()); + break; + case 37: + message.tanh_param = $root.caffe.TanHParameter.decode(reader, reader.uint32()); + break; + case 25: + message.threshold_param = $root.caffe.ThresholdParameter.decode(reader, reader.uint32()); + break; + case 20: + message.window_data_param = $root.caffe.WindowDataParameter.decode(reader, reader.uint32()); + break; + case 36: + message.transform_param = $root.caffe.TransformationParameter.decode(reader, reader.uint32()); + break; + case 42: + message.loss_param = $root.caffe.LossParameter.decode(reader, reader.uint32()); + break; + case 1: + message.layer = $root.caffe.V0LayerParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + V1LayerParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.V1LayerParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "bottom": + if (!(message.bottom && message.bottom.length)) + message.bottom = []; + if (reader.first()) + while (!reader.last()) { + message.bottom.push(reader.string()); + reader.next(); + } + else + message.bottom.push(reader.string()); + break; + case "top": + if (!(message.top && message.top.length)) + message.top = []; + if (reader.first()) + while (!reader.last()) { + message.top.push(reader.string()); + reader.next(); + } + else + message.top.push(reader.string()); + break; + case "name": + message.name = reader.string(); + break; + case "include": + if (!(message.include && message.include.length)) + message.include = []; + message.include.push($root.caffe.NetStateRule.decodeText(reader, true)); + break; + case "exclude": + if (!(message.exclude && message.exclude.length)) + message.exclude = []; + message.exclude.push($root.caffe.NetStateRule.decodeText(reader, true)); + break; + case "type": + message.type = reader.enum($root.caffe.V1LayerParameter.LayerType); + break; + case "blobs": + if (!(message.blobs && message.blobs.length)) + message.blobs = []; + message.blobs.push($root.caffe.BlobProto.decodeText(reader, true)); + break; + case "param": + if (!(message.param && message.param.length)) + message.param = []; + if (reader.first()) + while (!reader.last()) { + message.param.push(reader.string()); + reader.next(); + } + else + message.param.push(reader.string()); + break; + case "blob_share_mode": + if (!(message.blob_share_mode && message.blob_share_mode.length)) + message.blob_share_mode = []; + if (reader.first()) + while (!reader.last()) { + message.blob_share_mode.push(reader.enum($root.caffe.V1LayerParameter.DimCheckMode)); + reader.next(); + } + else + message.blob_share_mode.push(reader.enum($root.caffe.V1LayerParameter.DimCheckMode)); + break; + case "blobs_lr": + if (!(message.blobs_lr && message.blobs_lr.length)) + message.blobs_lr = []; + if (reader.first()) + while (!reader.last()) { + message.blobs_lr.push(reader.float()); + reader.next(); + } + else + message.blobs_lr.push(reader.float()); + break; + case "weight_decay": + if (!(message.weight_decay && message.weight_decay.length)) + message.weight_decay = []; + if (reader.first()) + while (!reader.last()) { + message.weight_decay.push(reader.float()); + reader.next(); + } + else + message.weight_decay.push(reader.float()); + break; + case "loss_weight": + if (!(message.loss_weight && message.loss_weight.length)) + message.loss_weight = []; + if (reader.first()) + while (!reader.last()) { + message.loss_weight.push(reader.float()); + reader.next(); + } + else + message.loss_weight.push(reader.float()); + break; + case "accuracy_param": + message.accuracy_param = $root.caffe.AccuracyParameter.decodeText(reader, true); + break; + case "argmax_param": + message.argmax_param = $root.caffe.ArgMaxParameter.decodeText(reader, true); + break; + case "concat_param": + message.concat_param = $root.caffe.ConcatParameter.decodeText(reader, true); + break; + case "contrastive_loss_param": + message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decodeText(reader, true); + break; + case "convolution_param": + message.convolution_param = $root.caffe.ConvolutionParameter.decodeText(reader, true); + break; + case "data_param": + message.data_param = $root.caffe.DataParameter.decodeText(reader, true); + break; + case "dropout_param": + message.dropout_param = $root.caffe.DropoutParameter.decodeText(reader, true); + break; + case "dummy_data_param": + message.dummy_data_param = $root.caffe.DummyDataParameter.decodeText(reader, true); + break; + case "eltwise_param": + message.eltwise_param = $root.caffe.EltwiseParameter.decodeText(reader, true); + break; + case "exp_param": + message.exp_param = $root.caffe.ExpParameter.decodeText(reader, true); + break; + case "hdf5_data_param": + message.hdf5_data_param = $root.caffe.HDF5DataParameter.decodeText(reader, true); + break; + case "hdf5_output_param": + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decodeText(reader, true); + break; + case "hinge_loss_param": + message.hinge_loss_param = $root.caffe.HingeLossParameter.decodeText(reader, true); + break; + case "image_data_param": + message.image_data_param = $root.caffe.ImageDataParameter.decodeText(reader, true); + break; + case "infogain_loss_param": + message.infogain_loss_param = $root.caffe.InfogainLossParameter.decodeText(reader, true); + break; + case "inner_product_param": + message.inner_product_param = $root.caffe.InnerProductParameter.decodeText(reader, true); + break; + case "lrn_param": + message.lrn_param = $root.caffe.LRNParameter.decodeText(reader, true); + break; + case "memory_data_param": + message.memory_data_param = $root.caffe.MemoryDataParameter.decodeText(reader, true); + break; + case "mvn_param": + message.mvn_param = $root.caffe.MVNParameter.decodeText(reader, true); + break; + case "pooling_param": + message.pooling_param = $root.caffe.PoolingParameter.decodeText(reader, true); + break; + case "power_param": + message.power_param = $root.caffe.PowerParameter.decodeText(reader, true); + break; + case "relu_param": + message.relu_param = $root.caffe.ReLUParameter.decodeText(reader, true); + break; + case "sigmoid_param": + message.sigmoid_param = $root.caffe.SigmoidParameter.decodeText(reader, true); + break; + case "softmax_param": + message.softmax_param = $root.caffe.SoftmaxParameter.decodeText(reader, true); + break; + case "slice_param": + message.slice_param = $root.caffe.SliceParameter.decodeText(reader, true); + break; + case "tanh_param": + message.tanh_param = $root.caffe.TanHParameter.decodeText(reader, true); + break; + case "threshold_param": + message.threshold_param = $root.caffe.ThresholdParameter.decodeText(reader, true); + break; + case "window_data_param": + message.window_data_param = $root.caffe.WindowDataParameter.decodeText(reader, true); + break; + case "transform_param": + message.transform_param = $root.caffe.TransformationParameter.decodeText(reader, true); + break; + case "loss_param": + message.loss_param = $root.caffe.LossParameter.decodeText(reader, true); + break; + case "layer": + message.layer = $root.caffe.V0LayerParameter.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + V1LayerParameter.LayerType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "NONE"] = 0; + values[valuesById[35] = "ABSVAL"] = 35; + values[valuesById[1] = "ACCURACY"] = 1; + values[valuesById[30] = "ARGMAX"] = 30; + values[valuesById[2] = "BNLL"] = 2; + values[valuesById[3] = "CONCAT"] = 3; + values[valuesById[37] = "CONTRASTIVE_LOSS"] = 37; + values[valuesById[4] = "CONVOLUTION"] = 4; + values[valuesById[5] = "DATA"] = 5; + values[valuesById[39] = "DECONVOLUTION"] = 39; + values[valuesById[6] = "DROPOUT"] = 6; + values[valuesById[32] = "DUMMY_DATA"] = 32; + values[valuesById[7] = "EUCLIDEAN_LOSS"] = 7; + values[valuesById[25] = "ELTWISE"] = 25; + values[valuesById[38] = "EXP"] = 38; + values[valuesById[8] = "FLATTEN"] = 8; + values[valuesById[9] = "HDF5_DATA"] = 9; + values[valuesById[10] = "HDF5_OUTPUT"] = 10; + values[valuesById[28] = "HINGE_LOSS"] = 28; + values[valuesById[11] = "IM2COL"] = 11; + values[valuesById[12] = "IMAGE_DATA"] = 12; + values[valuesById[13] = "INFOGAIN_LOSS"] = 13; + values[valuesById[14] = "INNER_PRODUCT"] = 14; + values[valuesById[15] = "LRN"] = 15; + values[valuesById[29] = "MEMORY_DATA"] = 29; + values[valuesById[16] = "MULTINOMIAL_LOGISTIC_LOSS"] = 16; + values[valuesById[34] = "MVN"] = 34; + values[valuesById[17] = "POOLING"] = 17; + values[valuesById[26] = "POWER"] = 26; + values[valuesById[18] = "RELU"] = 18; + values[valuesById[19] = "SIGMOID"] = 19; + values[valuesById[27] = "SIGMOID_CROSS_ENTROPY_LOSS"] = 27; + values[valuesById[36] = "SILENCE"] = 36; + values[valuesById[20] = "SOFTMAX"] = 20; + values[valuesById[21] = "SOFTMAX_LOSS"] = 21; + values[valuesById[22] = "SPLIT"] = 22; + values[valuesById[33] = "SLICE"] = 33; + values[valuesById[23] = "TANH"] = 23; + values[valuesById[24] = "WINDOW_DATA"] = 24; + values[valuesById[31] = "THRESHOLD"] = 31; + return values; + })(); + + V1LayerParameter.DimCheckMode = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "STRICT"] = 0; + values[valuesById[1] = "PERMISSIVE"] = 1; + return values; + })(); + + return V1LayerParameter; + })(); + + caffe.V0LayerParameter = (function() { + + function V0LayerParameter(properties) { + this.blobs = []; + this.blobs_lr = []; + this.weight_decay = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + V0LayerParameter.prototype.name = ""; + V0LayerParameter.prototype.type = ""; + V0LayerParameter.prototype.num_output = 0; + V0LayerParameter.prototype.biasterm = true; + V0LayerParameter.prototype.weight_filler = null; + V0LayerParameter.prototype.bias_filler = null; + V0LayerParameter.prototype.pad = 0; + V0LayerParameter.prototype.kernelsize = 0; + V0LayerParameter.prototype.group = 1; + V0LayerParameter.prototype.stride = 1; + V0LayerParameter.prototype.pool = 0; + V0LayerParameter.prototype.dropout_ratio = 0.5; + V0LayerParameter.prototype.local_size = 5; + V0LayerParameter.prototype.alpha = 1; + V0LayerParameter.prototype.beta = 0.75; + V0LayerParameter.prototype.k = 1; + V0LayerParameter.prototype.source = ""; + V0LayerParameter.prototype.scale = 1; + V0LayerParameter.prototype.meanfile = ""; + V0LayerParameter.prototype.batchsize = 0; + V0LayerParameter.prototype.cropsize = 0; + V0LayerParameter.prototype.mirror = false; + V0LayerParameter.prototype.blobs = $util.emptyArray; + V0LayerParameter.prototype.blobs_lr = $util.emptyArray; + V0LayerParameter.prototype.weight_decay = $util.emptyArray; + V0LayerParameter.prototype.rand_skip = 0; + V0LayerParameter.prototype.det_fg_threshold = 0.5; + V0LayerParameter.prototype.det_bg_threshold = 0.5; + V0LayerParameter.prototype.det_fg_fraction = 0.25; + V0LayerParameter.prototype.det_context_pad = 0; + V0LayerParameter.prototype.det_crop_mode = "warp"; + V0LayerParameter.prototype.new_num = 0; + V0LayerParameter.prototype.new_channels = 0; + V0LayerParameter.prototype.new_height = 0; + V0LayerParameter.prototype.new_width = 0; + V0LayerParameter.prototype.shuffle_images = false; + V0LayerParameter.prototype.concat_dim = 1; + V0LayerParameter.prototype.hdf5_output_param = null; + + V0LayerParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.V0LayerParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.num_output = reader.uint32(); + break; + case 4: + message.biasterm = reader.bool(); + break; + case 5: + message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 6: + message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 7: + message.pad = reader.uint32(); + break; + case 8: + message.kernelsize = reader.uint32(); + break; + case 9: + message.group = reader.uint32(); + break; + case 10: + message.stride = reader.uint32(); + break; + case 11: + message.pool = reader.int32(); + break; + case 12: + message.dropout_ratio = reader.float(); + break; + case 13: + message.local_size = reader.uint32(); + break; + case 14: + message.alpha = reader.float(); + break; + case 15: + message.beta = reader.float(); + break; + case 22: + message.k = reader.float(); + break; + case 16: + message.source = reader.string(); + break; + case 17: + message.scale = reader.float(); + break; + case 18: + message.meanfile = reader.string(); + break; + case 19: + message.batchsize = reader.uint32(); + break; + case 20: + message.cropsize = reader.uint32(); + break; + case 21: + message.mirror = reader.bool(); + break; + case 50: + if (!(message.blobs && message.blobs.length)) + message.blobs = []; + message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32())); + break; + case 51: + if (!(message.blobs_lr && message.blobs_lr.length)) + message.blobs_lr = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.blobs_lr.push(reader.float()); + } else + message.blobs_lr.push(reader.float()); + break; + case 52: + if (!(message.weight_decay && message.weight_decay.length)) + message.weight_decay = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.weight_decay.push(reader.float()); + } else + message.weight_decay.push(reader.float()); + break; + case 53: + message.rand_skip = reader.uint32(); + break; + case 54: + message.det_fg_threshold = reader.float(); + break; + case 55: + message.det_bg_threshold = reader.float(); + break; + case 56: + message.det_fg_fraction = reader.float(); + break; + case 58: + message.det_context_pad = reader.uint32(); + break; + case 59: + message.det_crop_mode = reader.string(); + break; + case 60: + message.new_num = reader.int32(); + break; + case 61: + message.new_channels = reader.int32(); + break; + case 62: + message.new_height = reader.int32(); + break; + case 63: + message.new_width = reader.int32(); + break; + case 64: + message.shuffle_images = reader.bool(); + break; + case 65: + message.concat_dim = reader.uint32(); + break; + case 1001: + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + V0LayerParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.V0LayerParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "num_output": + message.num_output = reader.uint32(); + break; + case "biasterm": + message.biasterm = reader.bool(); + break; + case "weight_filler": + message.weight_filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + case "bias_filler": + message.bias_filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + case "pad": + message.pad = reader.uint32(); + break; + case "kernelsize": + message.kernelsize = reader.uint32(); + break; + case "group": + message.group = reader.uint32(); + break; + case "stride": + message.stride = reader.uint32(); + break; + case "pool": + message.pool = reader.enum($root.caffe.V0LayerParameter.PoolMethod); + break; + case "dropout_ratio": + message.dropout_ratio = reader.float(); + break; + case "local_size": + message.local_size = reader.uint32(); + break; + case "alpha": + message.alpha = reader.float(); + break; + case "beta": + message.beta = reader.float(); + break; + case "k": + message.k = reader.float(); + break; + case "source": + message.source = reader.string(); + break; + case "scale": + message.scale = reader.float(); + break; + case "meanfile": + message.meanfile = reader.string(); + break; + case "batchsize": + message.batchsize = reader.uint32(); + break; + case "cropsize": + message.cropsize = reader.uint32(); + break; + case "mirror": + message.mirror = reader.bool(); + break; + case "blobs": + if (!(message.blobs && message.blobs.length)) + message.blobs = []; + message.blobs.push($root.caffe.BlobProto.decodeText(reader, true)); + break; + case "blobs_lr": + if (!(message.blobs_lr && message.blobs_lr.length)) + message.blobs_lr = []; + if (reader.first()) + while (!reader.last()) { + message.blobs_lr.push(reader.float()); + reader.next(); + } + else + message.blobs_lr.push(reader.float()); + break; + case "weight_decay": + if (!(message.weight_decay && message.weight_decay.length)) + message.weight_decay = []; + if (reader.first()) + while (!reader.last()) { + message.weight_decay.push(reader.float()); + reader.next(); + } + else + message.weight_decay.push(reader.float()); + break; + case "rand_skip": + message.rand_skip = reader.uint32(); + break; + case "det_fg_threshold": + message.det_fg_threshold = reader.float(); + break; + case "det_bg_threshold": + message.det_bg_threshold = reader.float(); + break; + case "det_fg_fraction": + message.det_fg_fraction = reader.float(); + break; + case "det_context_pad": + message.det_context_pad = reader.uint32(); + break; + case "det_crop_mode": + message.det_crop_mode = reader.string(); + break; + case "new_num": + message.new_num = reader.int32(); + break; + case "new_channels": + message.new_channels = reader.int32(); + break; + case "new_height": + message.new_height = reader.int32(); + break; + case "new_width": + message.new_width = reader.int32(); + break; + case "shuffle_images": + message.shuffle_images = reader.bool(); + break; + case "concat_dim": + message.concat_dim = reader.uint32(); + break; + case "hdf5_output_param": + message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + V0LayerParameter.PoolMethod = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "MAX"] = 0; + values[valuesById[1] = "AVE"] = 1; + values[valuesById[2] = "STOCHASTIC"] = 2; + return values; + })(); + + return V0LayerParameter; + })(); + + caffe.PReLUParameter = (function() { + + function PReLUParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PReLUParameter.prototype.filler = null; + PReLUParameter.prototype.channel_shared = false; + + PReLUParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe.PReLUParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.filler = $root.caffe.FillerParameter.decode(reader, reader.uint32()); + break; + case 2: + message.channel_shared = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + PReLUParameter.decodeText = function decodeText(reader) { + var message = new $root.caffe.PReLUParameter(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "filler": + message.filler = $root.caffe.FillerParameter.decodeText(reader, true); + break; + case "channel_shared": + message.channel_shared = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return PReLUParameter; + })(); + + return caffe; + })(); + + return $root; +})(protobuf); diff --git a/frontend/packages/core/public/netron/caffe.js b/frontend/packages/core/public/netron/caffe.js new file mode 100644 index 00000000..73ac3698 --- /dev/null +++ b/frontend/packages/core/public/netron/caffe.js @@ -0,0 +1,820 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var caffe = caffe || {}; +var long = long || { Long: require('long') }; +var protobuf = protobuf || require('protobufjs'); +var prototxt = prototxt || require('protobufjs/ext/prototxt'); + +caffe.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension == 'caffemodel') { + return true; + } + if (extension == 'pbtxt' || extension == 'prototxt') { + if (identifier == 'saved_model.pbtxt' || identifier == 'saved_model.prototxt' || + identifier.endsWith('predict_net.pbtxt') || identifier.endsWith('predict_net.prototxt') || + identifier.endsWith('init_net.pbtxt') || identifier.endsWith('init_net.prototxt')) { + return false; + } + const tags = context.tags('pbtxt'); + if (tags.has('layer') || tags.has('layers') || tags.has('net') || tags.has('train_net') || tags.has('net_param')) { + return true; + } + } + if (extension == 'pt') { + // Reject PyTorch models + const buffer = context.buffer; + const torch = [ 0x8a, 0x0a, 0x6c, 0xfc, 0x9c, 0x46, 0xf9, 0x20, 0x6a, 0xa8, 0x50, 0x19 ]; + if (buffer && buffer.length > 14 && buffer[0] == 0x80 && torch.every((v, i) => v == buffer[i + 2])) { + return false; + } + // Reject TorchScript models + if (buffer && buffer.length > 2 && buffer[0] == 0x50 && buffer[1] == 0x4B) { + return false; + } + const tags = context.tags('pbtxt'); + if (tags.has('layer') || tags.has('layers') || tags.has('net') || tags.has('train_net') || tags.has('net_param')) { + return true; + } + } + return false; + } + + open(context, host) { + return host.require('./caffe-proto').then(() => { + caffe.proto = protobuf.roots.caffe.caffe; + return caffe.Metadata.open(host).then((metadata) => { + const extension = context.identifier.split('.').pop(); + if (extension == 'pbtxt' || extension == 'prototxt' || extension == 'pt') { + const tags = context.tags('pbtxt'); + if (tags.has('net') || tags.has('train_net') || tags.has('net_param')) { + try { + const reader = prototxt.TextReader.create(context.text); + reader.field = function(tag, message) { + if (message instanceof caffe.proto.SolverParameter) { + message[tag] = this.skip(); + return; + } + throw new Error("Unknown field '" + tag + "'" + this.location()); + }; + const solver = caffe.proto.SolverParameter.decodeText(reader); + if (solver.net_param) { + return this._openNetParameter(metadata, solver.net_param, host); + } + else if (solver.net || solver.train_net) { + let file = solver.net || solver.train_net; + file = file.split('/').pop(); + return context.request(file, 'utf-8').then((text) => { + return this._openNetParameterText(metadata, context.identifier, text, host); + }).catch((error) => { + if (error) { + const message = error && error.message ? error.message : error.toString(); + throw new caffe.Error("Failed to load '" + file + "' (" + message.replace(/\.$/, '') + ")."); + } + }); + } + } + catch (error) { + // continue regardless of error + } + } + return this._openNetParameterText(metadata, context.identifier, context.text, host); + } + else { + return this._openNetParameterBuffer(metadata, context.identifier, context.buffer, host); + } + }); + }); + } + + _openNetParameterBuffer(metadata, identifier, buffer, host, resolve, reject) { + try { + const netParameter = caffe.proto.NetParameter.decode(buffer); + return this._openNetParameter(metadata, netParameter, host, resolve, reject); + } + catch (error) { + throw new caffe.Error("File format is not caffe.NetParameter (" + error.message + ") in '" + identifier + "'."); + } + } + + _openNetParameterText(metadata, identifier, text, host) { + try { + const reader = prototxt.TextReader.create(text); + reader.field = function(tag, message) { + const type = message.constructor.name; + if (tag.endsWith('_param') && (type == 'LayerParameter' || type == 'V1LayerParameter' || type == 'V0LayerParameter')) { + message[tag] = caffe.ModelFactory._decodeText(reader); + return; + } + else if (message.constructor.name.endsWith('Parameter') || message.constructor.name === 'ParamSpec') { + if (message[tag]) { + if (!Array.isArray(message[tag])) { + message[tag] = [ message[tag] ]; + } + message[tag].push(this.skip()); + } + else { + message[tag] = this.skip(); + } + return; + } + throw new Error("Unknown field '" + tag + "'" + this.location()); + }; + reader.enum = function(type) { + const token = this.read(); + if (!Object.prototype.hasOwnProperty.call(type, token)) { + const value = Number.parseInt(token, 10); + if (!Number.isNaN(token - value)) { + return value; + } + return token; + } + return type[token]; + }; + const netParameter = caffe.proto.NetParameter.decodeText(reader); + return this._openNetParameter(metadata, netParameter, host); + } + catch (error) { + throw new caffe.Error("File text format is not caffe.NetParameter (" + error.message + ") in '" + identifier + "'."); + } + } + + _openNetParameter(metadata, netParameter, host) { + try { + return new caffe.Model(metadata, netParameter); + } + catch (error) { + host.exception(error, false); + throw new caffe.Error(error.message); + } + } + + static _decodeText(reader) { + const message = {}; + reader.start(); + while (!reader.end()) { + const tag = reader.tag(); + const value = reader.skip(); + if (!message[tag]) { + message[tag] = value; + } + else { + if (!Array.isArray(message[tag])) { + message[tag] = [ message[tag] ]; + } + message[tag].push(value); + } + } + return message; + } +}; + +caffe.Model = class { + + constructor(metadata, net) { + + this._name = net.name; + + if (net.layers && net.layers.length > 0) { + if (net.layers.every((layer) => Object.prototype.hasOwnProperty.call(layer, 'layer'))) { + this._version = 0; + net.layer = net.layers; + } + else { + this._version = 1; + net.layer = net.layers; + } + } + else if (net.layer && net.layer.length > 0) { + this._version = 2; + } + + this._graphs = []; + + const phases = new Set(); + for (const layer of net.layer) { + for (const include of layer.include) { + if (include.phase !== undefined) { + phases.add(include.phase); + } + } + } + if (phases.size === 0) { + phases.add(-1); + } + + for (const phase of phases) { + this._graphs.push(new caffe.Graph(metadata, phase, net, this._version)); + } + } + + get format() { + return 'Caffe' + (this._version ? ' v' + this._version.toString() : ''); + } + + get graphs() { + return this._graphs; + } +}; + +caffe.Graph = class { + + constructor(metadata, phase, net, version) { + + switch (phase) { + case 0: this._phase = 'TRAIN'; break; + case 1: this._phase = 'TEST'; break; + case -1: this._phase = ''; break; + default: this._phase = phase.toString(); break; + } + + this._nodes = []; + this._inputs = []; + this._outputs = []; + + for (const layer of net.layer) { + layer.input = layer.bottom.slice(0); + layer.output = layer.top.slice(0); + layer.chain = []; + } + + const layers = []; + for (const layer of net.layer) { + if (phase === -1 || layer.include.every((include) => include.phase === phase)) { + layers.push(layer); + } + } + + const scope = {}; + let index = 0; + for (const layer of layers) { + layer.input = layer.input.map((input) => scope[input] ? scope[input] : input); + layer.output = layer.output.map((output) => { + scope[output] = scope[output] ? output + '\n' + index.toString() : output; // custom argument id + return scope[output]; + }); + index++; + } + + // Graph Inputs + const usedOutputs = new Set(); + for (const layer of layers) { + for (const output of layer.output) { + usedOutputs.add(output); + } + } + const unusedInputs = []; + for (const layer of layers) { + for (const input of layer.input) { + if (!usedOutputs.has(input)) { + unusedInputs.push(input); + } + } + } + + const nodes = []; + let lastLayer = null; + let lastTop = null; + while (layers.length > 0) { + let layer = layers.shift(); + if (layer.output.length == 1 && layer.input.length == 1 && + layer.output[0].split('\n').shift() == layer.input[0].split('\n').shift() && + lastLayer && + lastTop == layer.output[0].split('\n').shift()) { + lastLayer.chain = lastLayer.chain || []; + lastLayer.chain.push(layer); + } + else { + if (layer.type == 'Input' || layer.type == 'Data') { + if (layer.input.length == 0 && layer.output.length == 1 && + layer.input_param && layer.input_param.shape && + layer.input_param.shape.length == 1 && layer.input_param.shape[0].dim) { + const type = new caffe.TensorType(null, new caffe.TensorShape(layer.input_param.shape[0].dim)); + this._inputs.push(new caffe.Parameter(layer.output[0], [ new caffe.Argument(layer.output[0], type) ])); + layer = null; + } + } + if (layer) { + nodes.push(layer); + lastLayer = null; + lastTop = null; + if (layer.output.length == 1) { + lastLayer = layer; + lastTop = layer.output[0].split('\n').shift(); + } + } + } + } + + if (net.input && net.input.length > 0) { + index = 0; + for (const input of net.input) { + let inputType = null; + if (net.input_shape && index < net.input_shape.length) { + const blobShape = net.input_shape[index]; + if (blobShape && blobShape.dim) { + inputType = new caffe.TensorType(null, new caffe.TensorShape(blobShape.dim)); + } + } + if (inputType == null && net.input.length == 1 && net.input_dim && net.input_dim.length > 0) { + inputType = new caffe.TensorType(null, new caffe.TensorShape(net.input_dim)); + } + this._inputs.push(new caffe.Parameter(input, [ new caffe.Argument(input, inputType, null) ])); + index++; + } + } + + for (const layer of nodes) { + const node = new caffe.Node(metadata, layer, version); + if (layer.chain && layer.chain.length > 0) { + for (const chain of layer.chain) { + node.chain.push(new caffe.Node(metadata, chain, version)); + } + } + this._nodes.push(node); + } + + if (this._inputs.length === 0 && unusedInputs.length === 1) { + this._inputs.push(new caffe.Parameter(unusedInputs[0], [ + new caffe.Argument(unusedInputs[0], null) + ])); + } + } + + get name() { + return this._phase; + } + + get type() { + return ''; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +caffe.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +caffe.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new caffe.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +caffe.Node = class { + + constructor(metadata, layer, version) { + this._metadata = metadata; + this._chain = []; + this._attributes = []; + + switch (version) { + case 0: { + this._name = layer.layer.name; + this._type = layer.layer.type; + break; + } + case 1: { + this._name = layer.name; + const typeIndex = layer.type; + if (typeIndex === undefined) { + this._type = '?'; + } + else { + if (!caffe.Node._typeMap) { + caffe.Node._typeMap = {}; + const known = { 'BNLL': 'BNLL', 'HDF5': 'HDF5', 'LRN': 'LRN', 'RELU': 'ReLU', 'TANH': 'TanH', 'ARGMAX': 'ArgMax', 'MVN': 'MVN', 'ABSVAL': 'AbsVal' }; + for (const key of Object.keys(caffe.proto.V1LayerParameter.LayerType)) { + const index = caffe.proto.V1LayerParameter.LayerType[key]; + caffe.Node._typeMap[index] = key.split('_').map((item) => { + return known[item] || item.substring(0, 1) + item.substring(1).toLowerCase(); + }).join(''); + } + } + this._type = caffe.Node._typeMap[typeIndex] || typeIndex.toString(); + } + break; + } + case 2: + this._name = layer.name; + this._type = layer.type; + break; + } + + let initializers = []; + + switch (version) { + case 0: + for (const attributeName of Object.keys(layer.layer)) { + if (attributeName != 'type' && attributeName != 'name' && attributeName != 'blobs' && attributeName != 'blobs_lr') { + this._attributes.push(new caffe.Attribute(metadata.attribute(this.type, attributeName), attributeName, layer.layer[attributeName])); + } + } + initializers = layer.layer.blobs.map((blob) => new caffe.Tensor(blob)); + break; + case 1: + case 2: + for (const layer_kind of Object.keys(layer)) { + if (layer_kind.endsWith('_param') || layer_kind == 'transform_param') { + const param = layer[layer_kind]; + let type = this._type; + if (type == 'Deconvolution') { + type = 'Convolution'; + } + const prototype = Object.getPrototypeOf(param); + for (const name of Object.keys(param)) { + const defaultValue = prototype[name]; + const value = param[name]; + if (value != defaultValue && (!Array.isArray(value) || !Array.isArray(defaultValue) || value.length != 0 || defaultValue.length != 0)) { + this._attributes.push(new caffe.Attribute(metadata.attribute(this.type, name), name, value)); + } + } + } + } + if (layer.include && layer.include.length > 0) { + this._attributes.push(new caffe.Attribute(this._metadata.attribute(this.type, 'include'), 'include', layer.include)); + } + if (layer.exclude && layer.exclude.length > 0) { + this._attributes.push(new caffe.Attribute(this._metadata.attribute(this.type, 'exclude'), 'exclude', layer.exclude)); + } + if (this._type == 'Data' && layer.input_param && layer.input_param.shape) { + this._attributes.push(new caffe.Attribute(this._metadata.attribute(this.type, 'shape'), 'shape', layer.input_param.shape)); + } + initializers = layer.blobs.map((blob) => new caffe.Tensor(blob)); + break; + } + + const schema = this._metadata.type(this.type); + + this._inputs = []; + const inputs = layer.input.concat(initializers); + let inputIndex = 0; + if (schema && schema.inputs) { + for (const inputDef of schema.inputs) { + if (inputIndex < inputs.length || inputDef.option != 'optional') { + const inputCount = inputDef.option == 'variadic' ? inputs.length - inputIndex : 1; + this._inputs.push(new caffe.Parameter(inputDef.name, inputs.slice(inputIndex, inputIndex + inputCount).filter((input) => input !== '' || inputDef.option != 'optional').map((input) => { + return input instanceof caffe.Tensor ? new caffe.Argument('', input.type, input) : new caffe.Argument(input, null, null); + }))); + inputIndex += inputCount; + } + } + } + this._inputs = this._inputs.concat(inputs.slice(inputIndex).map((input) => { + return new caffe.Parameter(inputIndex.toString(), [ + input instanceof caffe.Tensor ? new caffe.Argument('', input.type, input) : new caffe.Argument(input, null, null) + ]); + })); + + this._outputs = []; + const outputs = layer.output; + let outputIndex = 0; + if (schema && schema.outputs) { + for (const outputDef of schema.outputs) { + if (outputIndex < outputs.length) { + const outputCount = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1; + this._outputs.push(new caffe.Parameter(outputDef.name, outputs.slice(outputIndex, outputIndex + outputCount).map((output) => { + return new caffe.Argument(output, null, null); + }))); + outputIndex += outputCount; + } + } + } + this._outputs = this._outputs.concat(outputs.slice(outputIndex).map((output, index) => { + return new caffe.Parameter((outputIndex + index).toString(), [ + new caffe.Argument(output, null, null) + ]); + })); + } + + get type() { + return this._type; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } + + get chain() { + return this._chain; + } +}; + +caffe.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + this._value = value; + if (value instanceof caffe.proto.BlobShape) { + this._value = new caffe.TensorShape(value.dim); + } + if (schema) { + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + const defaultValue = schema.default; + if (this._value == defaultValue) { + this._visible = false; + } + else if (Array.isArray(this._value) && Array.isArray(defaultValue)) { + if (this._value.length == defaultValue.length && + this._value.every((item, index) => { return item == defaultValue[index]; })) { + this._visible = false; + } + } + } + } + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +caffe.Tensor = class { + + constructor(blob) { + this._blob = blob; + + let shape = []; + if (Object.prototype.hasOwnProperty.call(blob, 'num') && + Object.prototype.hasOwnProperty.call(blob, 'channels') && + Object.prototype.hasOwnProperty.call(blob, 'width') && + Object.prototype.hasOwnProperty.call(blob, 'height')) { + if (blob.num != 1) { + shape.push(blob.num); + } + if (blob.channels != 1) { + shape.push(blob.channels); + } + if (blob.width != 1) { + shape.push(blob.width); + } + if (blob.height != 1) { + shape.push(blob.height); + } + } + else if (Object.prototype.hasOwnProperty.call(blob, 'shape')) { + shape = blob.shape.dim; + } + + let dataType = '?'; + if (blob.data.length > 0) { + dataType = 'float32'; + this._data = blob.data; + } + else if (blob.double_data.length > 0) { + dataType = 'float64'; + this._data = blob.double_data; + } + + this._type = new caffe.TensorType(dataType, new caffe.TensorShape(shape)); + } + + get kind() { + return 'Blob'; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + const context = {}; + context.state = null; + context.index = 0; + context.count = 0; + context.data = this._data; + context.dimensions = this.type.shape.dimensions; + if (!this._data) { + context.state = 'Tensor data is empty.'; + } + return context; + } + + _decode(context, dimension) { + const results = []; + const size = context.dimensions[dimension]; + if (dimension == context.dimensions.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(context.data[context.index]); + context.index++; + context.count++; + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + return results; + } +}; + +caffe.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return (this.dataType || '?') + this._shape.toString(); + } +}; + +caffe.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions.map((dimension) => { + if (dimension && long.Long.isLong(dimension)) { + return dimension.toNumber(); + } + return dimension; + }); + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return this._dimensions ? ('[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']') : ''; + } +}; + +caffe.Metadata = class { + + static open(host) { + if (caffe.Metadata._metadata) { + return Promise.resolve(caffe.Metadata._metadata); + } + return host.request(null, 'caffe-metadata.json', 'utf-8').then((data) => { + caffe.Metadata._metadata = new caffe.Metadata(data); + return caffe.Metadata._metadata; + }).catch(() => { + caffe.Metadata._metadata = new caffe.Metadata(null); + return caffe.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + this._attributeCache = {}; + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map[item.name] = item.schema; + } + } + } + } + } + + type(name) { + return this._map[name] || null; + } + + attribute(type, name) { + let map = this._attributeCache[type]; + if (!map) { + map = {}; + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[type] = map; + } + return map[name] || null; + } +}; + +caffe.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Caffe model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = caffe.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/caffe2-metadata.json b/frontend/packages/core/public/netron/caffe2-metadata.json new file mode 100644 index 00000000..ed68ac62 --- /dev/null +++ b/frontend/packages/core/public/netron/caffe2-metadata.json @@ -0,0 +1,18518 @@ +[ + { + "name": "Conv", + "schema": { + "attributes": [ + { + "default": 0, + "name": "pad" + }, + { + "default": 1, + "name": "stride" + }, + { + "name": "exhaustive_search", + "type": "boolean", + "visible": false + } + ], + "category": "Layer", + "description": "\nThe convolution operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.", + "name": "X" + }, + { + "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.", + "name": "filter" + }, + { + "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "ConvTranspose", + "schema": { + "attributes": [ + { + "description": "Should the legacy padding be VALID or SAME. When used, pads should not be used.", + "name": "legacy_pad", + "option": "optional", + "type": "int64" + }, + { + "description": "Desired kernel size. If left at default the kernel size will be inferred from the input $filter$ blob.", + "name": "kernels", + "option": "optional", + "type": "int64[]" + }, + { + "description": "Controls the stride of the kernel as it traverses the input blob.", + "name": "strides", + "option": "optional", + "type": "int64[]" + }, + { + "description": "Controls the amount of padding applied to the input feature map before computation.", + "name": "pads", + "option": "optional", + "type": "int64[]" + }, + { + "description": "", + "name": "adjs", + "option": "optional", + "type": "int64[]" + }, + { + "default": "NCHW", + "description": "Specifies the order of the input data blob, where $N$ is batch size, $C$ is number of channels, $H$ is spatial height, and $W$ is spatial width. The only other valid option is \"NHWC\".", + "name": "order", + "option": "optional", + "type": "string" + }, + { + "default": 0, + "description": "", + "name": "shared_buffer", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "", + "name": "no_bias", + "option": "optional", + "type": "boolean" + } + ], + "category": "Layer", + "description": "\nThe ConvTranspose op takes an input data tensor $X$, an input weight tensor $filter$, and optionally an input bias tensor $bias$. It then computes the transposed convolution, sometimes referred to as deconvolution, and produces a single output tensor $Y$. The hyperparameters of the op such as kernel size, stride, and padding are specified as args. At each stride, the filter is deconvolved with a subset of $X$ and the $bias$ is added. This is done throughout the input data until the output computation is complete.\n\nThe output shapes are computed as follows. The number of channels in the output feature map is the number of kernels specified in the filter blob. The spatial height and width are computed as:\n\n$$H_{out} = (H_{in}-1)*strides[0] - 2*pads[0] + kernels[0]$$\n\n\n$$W_{out} = (W_{in}-1)*strides[1] - 2*pads[1] + kernels[1]$$\n\nNote on the implementation layout: conv_transpose_op_impl.h is the templated implementation of the conv_transpose_op.h file, which is why they are separate files. Also, in the implementation this operator inherits from the *ConvTransposeUnpoolOpBase* operator.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/tree/master/caffe2/operators/conv_transpose_op.h\n- https://github.com/pytorch/pytorch/tree/master/caffe2/operators/conv_transpose_op.cc\n- https://github.com/pytorch/pytorch/tree/master/caffe2/operators/conv_transpose_unpool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ConvTranspose\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernels=[2,2],\n pads=[4,4,4,4],\n strides=[2,2]\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(2,3,5,5).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create filter: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,2,2).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (2, 3, 5, 5)\nFilter shape: (3, 1, 2, 2)\nBias shape: (1,)\nY:\n [[[[0.53606427 0.5775447 ]\n [0.40148795 1.5188271 ]]]\n\n\n [[[1.9903406 3.2794335 ]\n [0.09960175 0.31917763]]]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be operated on.", + "name": "X" + }, + { + "description": "The filter blob, of shape $(M, C_{out}, K_H, K_W)$, containing the filters to be used in the transposed convolution.", + "name": "filter" + }, + { + "description": "The bias blob, of length $C_{out}$, containing the biases for the operation, one bias per output channel. If not passed, biases assumed to be zeros.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the operation.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "FC", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Describes the axis of the input data $X$. Defaults to one because in the common case when the input $X$ has shape $(M,K)$, the first axis encodes the batch size.", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "default": 1, + "description": "Describes the axis of the input weight matrix $W$. Defaults to one because the first axis most likely describes the batch_size.", + "name": "axis_w", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "Whether to use float-16 compute kernel.", + "name": "float16_compute", + "option": "optional", + "type": "boolean" + } + ], + "category": "Layer", + "description": "\nThe FC operator computes an output $(Y)$ as a linear combination of the input data blob $(X)$ with a weight blob $(W)$ and bias blob $(b)$. More formally,\n\n$$Y = XW^T+b$$\n\nHere, $X$ is a matrix of shape $(M,K)$, $W$ is a matrix of shape $(N,K)$, $b$ is a vector of length $N$, and $Y$ is a matrix of shape $(M,N)$. $N$ can be thought of as the number of nodes in the layer, $M$ is the batch size, and $K$ is the number of features in an input observation.\n\n*NOTE: $X$ does not need to explicitly be a 2-dimensional matrix, however, if it is not it will be coerced into one. For an arbitrary $n$-dimensional tensor $X$, e.g. $[a_0, a_1, \\ldots ,a_{k-1}, a_k, \\ldots , a_{n-1}]$, where $a_i$ in $N$, and $k$ is the $axis$ arg provided, then $X$ will be coerced into a 2-dimensional tensor with dimensions $[a_0 * \\ldots * a_{k-1}, a_k * \\ldots * a_{n-1}]$. For the default case where axis=1, this means the $X$ tensor will be coerced into a 2D tensor of dimensions $[a_0, a_1 * \\ldots * a_{n-1}]$, where $a_0$ is often the batch size. In this situation, we must have $a_0 = M$ and $a_1 * \\ldots * a_{n-1} = K$. Lastly, even though $b$ is a vector of length $N$, it is copied and resized to shape $(M x N)$ implicitly, then added to each vector in the batch.*\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/fully_connected_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/fully_connected_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\n// In this example, our batch size is 1 (M=1), the input observation will have\n// 6 features (K=6), and the layer will have one hidden node (N=1). The\n// expected output is Y=7.\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"FC\",\n [\"X\", \"W\", \"b\"],\n [\"Y\"]\n)\n\n// Create X: MxK\ndata = np.array([1,2,3,4,5,6]).astype(np.float32)\ndata = data[np.newaxis,:]\n\n// Create W: NxK\nweights = np.array(np.array([1,1/2.,1/3.,1/4.,1/5.,1/6.])).astype(np.float32)\nweights = weights[np.newaxis,:]\n\n// Create b: N\nbias = np.array([1.]).astype(np.float32)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"W\", weights)\nworkspace.FeedBlob(\"b\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nY:\n [[7.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input blob to be coerced into a 2D matrix of shape $(M,K)$, where $M$ is the batch size and $K$ is the number of features in a single observation.", + "name": "X" + }, + { + "description": "Input blob to be coerced into a 2D matrix of shape $(N,K)$ describing a fully connected weight matrix. Here, $K$ is the number of features in a single observation and $N$ is the number of nodes in the FC layer.", + "name": "W" + }, + { + "description": "Input blob containing vector of length $N$ which describes one bias for each node in the layer.", + "name": "b" + } + ], + "outputs": [ + { + "description": "Output blob containing a 2D output matrix of shape $(M,N)$, where $M$ is the batch size and $N$ is the number of nodes in the layer. The output is calculated as $Y=XW^T+b$.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Add", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise binary addition (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Add\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[1,2],[3,4]]))\nworkspace.FeedBlob(\"B\", np.array([[5,6],[7,8]]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[1 2]\n [3 4]]\nB:\n[[5 6]\n [7 8]]\nC:\n[[ 6 8]\n [10 12]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions and type as A.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "Sum", + "schema": { + "description": "\nElement-wise sum of each of the input tensors. The first input tensor can be used\nin-place as the output tensor, in which case the sum will be done in place and\nresults will be accumulated the first input tensor. All inputs and outputs must\nhave the same shape and data type.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_sum_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sum\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[1,2],[3,4]]).astype(np.float32))\nworkspace.FeedBlob(\"B\", np.array([[5,6],[7,8]]).astype(np.float32))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"A\"))\n\n```\n\n**Result**\n\n```\n\nA: [[1. 2.]\n [3. 4.]]\nB: [[5. 6.]\n [7. 8.]]\nC: [[1. 2.]\n [3. 4.]]\n\n```\n\n
\n\n
\n\n Example 2 \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sum\",\n [\"A\", \"B\"],\n [\"A\"], // inplace\n)\n\nworkspace.FeedBlob(\"A\", np.array([[1,2,5],[8,3,4]]).astype(np.float32))\nworkspace.FeedBlob(\"B\", np.array([[9,5,6],[6,7,8]]).astype(np.float32))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"A after Sum:\", workspace.FetchBlob(\"A\"))\n\n```\n\n**Result**\n\n```\n\nA: [[1. 2. 5.]\n [8. 3. 4.]]\nB: [[9. 5. 6.]\n [6. 7. 8.]]\nA after Sum: [[10. 7. 11.]\n [14. 10. 12.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First tensor to be added element-wise.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second tensor to be added element-wise.", + "name": "B" + }, + { + "description": "First of the input tensors. Can be inplace.", + "name": "data_0" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Sum of A and B.", + "name": "C" + }, + { + "description": "Output tensor. Same dimension as inputs.", + "name": "sum" + } + ], + "support_level": "default" + } + }, + { + "name": "Mul", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise binary multiplication (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Mul\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[1,2],[3,4]]))\nworkspace.FeedBlob(\"B\", np.array([[5,6],[7,8]]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[1 2]\n [3 4]]\nB:\n[[5 6]\n [7 8]]\nC:\n[[ 5 12]\n [21 32]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions and type as A.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "MatMul", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Exclusive axis that divides the first and second dimension of matrix `A`.", + "name": "axis_a", + "option": "optional", + "type": "int64" + }, + { + "default": 1, + "description": "Exclusive axis that divides the first and second dimension of matrix `B`.", + "name": "axis_b", + "option": "optional", + "type": "int64" + }, + { + "default": 0, + "description": "Pass 1 to transpose `A` before multiplication and after the dimension adjustment using `axis_a`.", + "name": "trans_a", + "option": "optional", + "type": "int64" + }, + { + "default": 0, + "description": "Pass 1 to transpose `B` before multiplication and after the dimension adjustment using `axis_b`.", + "name": "trans_b", + "option": "optional", + "type": "int64" + } + ], + "description": "\nMatrix multiplication $Y = A * B$, where `A` has size (M x K), `B` has size\n(K x N), and `Y` will have a size (M x N). To transpose `A` or `B` before\nmultiplication, pass 1 to the `trans_a` and/or `trans_b` arguments, which\nseparate the first and second dimensions of the respective matrices using\n`axis_a` and `axis_b`.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/matmul_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MatMul\",\n [\"A\", \"B\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"A\", np.random.randint(10, size=(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"B\", np.random.randint(10, size=(3,3)).astype(np.float32))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nA: [[1. 8. 3.]\n [6. 4. 4.]\n [5. 4. 7.]]\nB: [[4. 0. 3.]\n [3. 1. 1.]\n [8. 5. 8.]]\nY: [[52. 23. 35.]\n [68. 24. 54.]\n [88. 39. 75.]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* 2D matrix of size (M x K).", + "name": "A" + }, + { + "description": "*(type: Tensor``)* 2D matrix of size (K x N).", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* 2D matrix of size (M x N).", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Relu", + "schema": { + "attributes": [ + { + "name": "cudnn_exhaustive_search", + "type": "boolean", + "visible": false + } + ], + "category": "Activation", + "description": "\nApplies rectified linear unit operation to the input data element-wise. The Relu operation takes one input $X$, produces one output $Y$, and is defined as:\n\n$$Y = max(0,X)$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/relu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/relu_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Relu\",\n [\"X\"],\n [\"Y\"]\n )\n\nworkspace.FeedBlob(\"X\", np.random.randn(4, 4).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-1.4655551 0.64575136 0.7921748 0.4150579 ]\n [ 0.41085166 -0.2837964 0.9881425 -1.9300346 ]\n [ 0.39705405 0.44639114 0.9940703 0.2926532 ]\n [-0.6726489 0.01330667 1.101319 0.33858967]]\n\nY:\n [[0. 0.64575136 0.7921748 0.4150579 ]\n [0.41085166 0. 0.9881425 0. ]\n [0.39705405 0.44639114 0.9940703 0.2926532 ]\n [0. 0.01330667 1.101319 0.33858967]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D output tensor with same shape as input", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Sigmoid", + "schema": { + "category": "Activation", + "description": "\nApply the Sigmoid function element-wise to the input tensor. This is often used\nas a non-linear activation function in a neural network. The sigmoid function is\ndefined as:\n\n$$Sigmoid(x) = \\frac{1}{1+\\exp(-x)}$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sigmoid_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sigmoid\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(5).astype(np.float32))\nprint(\"input:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"sigmoid:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\ninput: [ 1.5744036 0.31632107 1.7842269 1.4450722 -2.1726978 ]\nsigmoid: [0.8284105 0.57842743 0.85621804 0.80923885 0.10222916]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "PRelu", + "schema": { + "category": "Activation", + "description": "\n\nThe *PRelu* op takes input data tensor $X$, an input slope tensor $slope$, and produces one output tensor $Y$ of the same shape as $X.$ The op performs the element wise *PRelu* operation, defined as\n\n$$y=prelu(x) =\\begin{cases}slope * x & x < 0\\\\x & otherwise\\end{cases}$$\n\nNote, is slope is size 1, the value is shared across the channels, otherwise $X$ and $slope$ must be the same shape. See [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852) for more information.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/prelu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/prelu_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"PRelu\",\n [\"X\",\"Slope\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.FeedBlob(\"Slope\", np.array([0.1]).astype(np.float32))\nprint(\"Slope:\\n\", workspace.FetchBlob(\"Slope\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 0.3957382 -0.19725518 -0.26991343]\n [ 1.5513182 -0.27427664 -0.14584002]\n [-0.4121164 0.9292345 0.96426094]]\n\nSlope:\n [0.1]\n\nY:\n [[ 0.3957382 -0.01972552 -0.02699134]\n [ 1.5513182 -0.02742766 -0.014584 ]\n [-0.04121164 0.9292345 0.96426094]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input tensor of data to be operated on.", + "name": "X" + }, + { + "description": "1D input slope tensor. If `Slope` is of size 1, the value is shared across different channels", + "name": "Slope" + } + ], + "outputs": [ + { + "description": "Output tensor, with same shape as $X$.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Softmax", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Axis of the inputs when coerced to 2D matrix.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "category": "Activation", + "description": "\n\nApplies the Softmax function to an n-dimensional input Tensor rescaling them so\nthat the elements of the n-dimensional output Tensor lie in the range (0,1) and\nsum to 1. The softmax operator is typically the last layer in a classifier network,\nas its output can be interpreted as confidence probabilities of an input belonging\nto each class. The input is a 2-D tensor (Tensor) of size (batch_size x\ninput_feature_dimensions). The output tensor has the same shape and contains the\nsoftmax normalized values of the corresponding input. The softmax function is\ndefined as follows:\n\n$$softmax(x_i) = \\frac{\\exp(x_i)}{\\sum_{j} \\exp(x_j)}$$\n\nThe input does not need to explicitly be a 2D vector; rather, it will be coerced\ninto one. For an arbitrary n-dimensional tensor `X` in\n$[a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}]$, where k is the `axis` provided,\nthen `X` will be coerced into a 2-dimensional tensor with dimensions\n$[(a_0 * ... * a_{k-1}), (a_k * ... * a_{n-1})]$. For the default case where\n`axis`=1, the `X` tensor will be coerced into a 2D tensor of dimensions\n$[a_0, (a_1 * ... * a_{n-1})]$, where $a_0$ is often the batch size. In this\nsituation, we must have $a_0 = N$ and $a_1 * ... * a_{n-1} = D$. Each of these\ndimensions must be matched correctly, or else the operator will throw errors.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softmax_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softmax_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Softmax\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 5).astype(np.float32))\nprint(\"input:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"softmax:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\ninput: [[ 0.0417839 0.61960053 -0.23150268 -0.64389366 -3.0000346 ]]\nsoftmax: [[0.24422921 0.43525138 0.18582782 0.12303016 0.01166145]]\n\n```\n\n
\n\n\n\n", + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input" + }, + { + "description": "*(type: Tensor``)* Input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The softmax normalized output values with the same shape as input tensor.", + "name": "output" + }, + { + "description": "*(type: Tensor``)* The softmax normalized output tensor with the same shape as input tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "MaxPool", + "schema": { + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "name": "pad" + }, + { + "name": "cudnn_exhaustive_search", + "type": "boolean", + "visible": false + } + ], + "category": "Pool", + "description": "MaxPool \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "AveragePool", + "schema": { + "category": "Pool", + "description": "AveragePool \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SpatialBN", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If set to nonzero, run spatial batch normalization in test mode.", + "name": "is_test", + "type": "int64" + }, + { + "default": 1e-05, + "description": "The epsilon value to use to avoid division by zero.", + "name": "epsilon", + "option": "optional", + "type": "float32" + }, + { + "default": "NCHW", + "description": "Specifies the order of the input data blob, where $N$ is batch size, $C$ is number of channels, $H$ is spatial height, and $W$ is spatial width. The only other valid option is \"NHWC\".", + "name": "order", + "option": "optional", + "type": "string" + }, + { + "default": 0.9, + "description": "Factor used in computing the running mean and variance. e.g., running_mean = running_mean x momentum + mean x (1 - momentum)", + "name": "momentum", + "option": "optional", + "type": "float32" + }, + { + "default": 1, + "description": "Specifies the number of batches to apply normalization on. Requires specifying the optional sums and sumsq inputs that provide statistics across multiple batches from which mean and variance can be determined.", + "name": "num_batches", + "option": "optional", + "type": "int64" + } + ], + "category": "Normalization", + "description": "\nApplies spatial batch normalization to the input tensor as described in the original paper, [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167). Be aware, this operator has two different output sets, depending on the value of *is_test*. According to the paper, the primary operation of spatial batch normalization is:\n\n$$Y = \\frac{X - \\mu_x}{\\sqrt{\\sigma^2_{x} + \\epsilon}}*\\gamma + b$$\n\nIn the equation, $\\mu_x$ is the *mean*, $X$ is the input data, $\\sigma^2_{x}$ is the *var*, $\\epsilon$ is *epsilon*, $\\gamma$ is the *scale*, $b$ is the *bias*, and $Y$ is the output data. The *momentum* arg also affects this calculation in the computation of the running mean and variance. The influence of *momentum* is as follows:\n\n$$running\\_mean = running\\_mean * momentum + mean * (1 - momentum)$$\n\n$$running\\_var = running\\_var * momentum + var * (1 - momentum)$$\n\nOutput when is_test = 0 (train mode): *Y, mean, var, saved_mean, saved_var*\n\nOutput when is_test = 1 (test mode): *Y*\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/spatial_batch_norm_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/spatial_batch_norm_op.h\n\n", + "inputs": [ + { + "name": "input" + }, + { + "description": "The scale as a 1-dimensional tensor of size $C$ to be applied to the output.", + "name": "scale" + }, + { + "description": "The bias as a 1-dimensional tensor of size $C$ to be applied to the output.", + "name": "bias" + }, + { + "description": "The running mean (training) or the estimated mean (testing) as a 1-dimensional tensor of size $C$.", + "name": "mean" + }, + { + "description": "The running variance (training) or the estimated variance (testing) as a 1-dimensional tensor of size $C$.", + "name": "var" + }, + { + "description": "The input 4-dimensional tensor of shape $NCHW$ or $NHWC$ depending on the order parameter.", + "name": "X" + }, + { + "description": "*(optional)* Per-channel sums of elements to be used to determine the mean and variance for this batch.", + "name": "sums" + }, + { + "description": "*(optional)* Per-channel sum of elements squared per channel to be used to determine the variance for this batch.", + "name": "sumsq" + } + ], + "outputs": [ + { + "description": "The output 4-dimensional tensor of the same shape as $X$.", + "name": "Y" + }, + { + "description": "The running mean after the spatial BN operator. Must be in-place with the input *mean*. Should not be used for testing.", + "name": "mean" + }, + { + "description": "The running variance after the spatial BN operator. Must be in-place with the input *var*. Should not be used for testing.", + "name": "var" + }, + { + "description": "Saved mean used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_mean" + }, + { + "description": "Saved variance used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_var" + } + ], + "support_level": "default" + } + }, + { + "name": "LRN", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Amount of neighboring channels to sum over for normalization", + "name": "size", + "option": "optional", + "type": "int64" + }, + { + "default": 0.0, + "description": "Multiplicative (scaling) factor.", + "name": "alpha", + "option": "optional", + "type": "float32" + }, + { + "default": 0.0, + "description": "Exponent.", + "name": "beta", + "option": "optional", + "type": "float32" + }, + { + "default": 1.0, + "description": "Additive factor.", + "name": "bias", + "option": "optional", + "type": "float32" + }, + { + "default": 0, + "description": "Order of blob dimensions.", + "name": "order", + "option": "optional", + "type": "float32" + } + ], + "category": "Normalization", + "description": "\n\n`LRN` applies Local Response Normalization to an input blob. This operation performs\na kind of \"lateral inhibition\" by normalizing over local input regions, where\nnormalization is applied across channels. This operator is typically used to\nnormalize an unbounded activation (such as ReLU). The output shape is the same as\nthe input shape. The `brew` module has a wrapper for this operator for use in a\n`ModelHelper` object.\n\nThe formula for LRN is as follows:\n\n$$b_{c} = a_{c}(bias + \\frac{\\alpha}{n}\\sum_{c'=max(0,c-n/2)}^{min(N-1,c+n/2)} a_{c'}^2 )^{-\\beta}$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/local_response_normalization_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/local_response_normalization_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\"LRN\",\n [\"X\"],\n [\"Y\", \"Y_scale\"],\n size=11,\n alpha=0.001,\n beta=0.5,\n bias=2.0,\n order=\"NHWC\"\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 6, 6, 1).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\nprint(\"Y_scale:\\n\", workspace.FetchBlob(\"Y_scale\"))\n```\n\n**Result**\n\n```\nX:\n [[[[ 0.72985137]\n [-0.3753357 ]\n [ 2.7344604 ]\n [-0.5937792 ]\n [ 0.38440478]\n [-2.1659644 ]]\n\n [[-0.92846817]\n [-0.9996144 ]\n [ 0.212943 ]\n [-1.968045 ]\n [-0.77839696]\n [ 0.45492038]]\n\n [[-0.11263168]\n [ 1.9901097 ]\n [ 0.19275683]\n [ 0.15630436]\n [ 0.7536298 ]\n [-0.77339894]]\n\n [[ 0.8353551 ]\n [-0.7784452 ]\n [ 1.779317 ]\n [ 0.22421335]\n [ 1.3846219 ]\n [-3.0546608 ]]\n\n [[ 0.09977621]\n [ 2.2071757 ]\n [ 0.79971045]\n [ 3.563886 ]\n [-0.7169287 ]\n [ 0.77170426]]\n\n [[-1.4296649 ]\n [ 0.19181213]\n [ 0.45961624]\n [-1.0201577 ]\n [ 0.62854475]\n [-0.6395456 ]]]]\n\nY:\n [[[[ 0.5160766 ]\n [-0.26540157]\n [ 1.9332271 ]\n [-0.41986194]\n [ 0.27181432]\n [-1.5314047 ]]\n\n [[-0.6565133 ]\n [-0.7068181 ]\n [ 0.15057328]\n [-1.3914955 ]\n [-0.5504022 ]\n [ 0.32167578]]\n\n [[-0.0796426 ]\n [ 1.4070934 ]\n [ 0.13629955]\n [ 0.11052381]\n [ 0.53288984]\n [-0.5468682 ]]\n\n [[ 0.5906759 ]\n [-0.5504363 ]\n [ 1.2580767 ]\n [ 0.1585426 ]\n [ 0.9790328 ]\n [-2.1595135 ]]\n\n [[ 0.07055242]\n [ 1.5605361 ]\n [ 0.5654725 ]\n [ 2.5193207 ]\n [-0.50693923]\n [ 0.54567 ]]\n\n [[-1.0108787 ]\n [ 0.13563155]\n [ 0.3249962 ]\n [-0.72134334]\n [ 0.44444424]\n [-0.45222285]]]]\nY_scale:\n [[[[2.0000484]\n [2.0000129]\n [2.0006797]\n [2.000032 ]\n [2.0000134]\n [2.0004265]]\n\n [[2.0000784]\n [2.0000908]\n [2.000004 ]\n [2.0003521]\n [2.000055 ]\n [2.0000188]]\n\n [[2.0000012]\n [2.00036 ]\n [2.0000033]\n [2.0000021]\n [2.0000517]\n [2.0000544]]\n\n [[2.0000634]\n [2.000055 ]\n [2.0002878]\n [2.0000045]\n [2.0001743]\n [2.0008483]]\n\n [[2.000001 ]\n [2.000443 ]\n [2.0000582]\n [2.0011547]\n [2.0000467]\n [2.0000541]]\n\n [[2.0001857]\n [2.0000033]\n [2.0000193]\n [2.0000947]\n [2.000036 ]\n [2.0000372]]]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor (ReLU output).", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + }, + { + "description": "*(type: Tensor``)* Output scale.", + "name": "Y_scale" + } + ], + "support_level": "default" + } + }, + { + "name": "Dropout", + "schema": { + "attributes": [ + { + "default": 0.5, + "description": "Probability of an element to be zeroed.", + "name": "ratio", + "option": "optional", + "type": "float32" + }, + { + "default": 0, + "description": "If zero (train mode), perform dropout. If non-zero(test mode), Y = X.", + "name": "is_test", + "type": "int64" + } + ], + "category": "Dropout", + "description": "\n\n`Dropout` takes one input data tensor (`X`) and produces two tensor outputs, `Y` and\n`mask`. If the `is_test` argument is zero (default=0), the output `Y` will be the input\nwith random elements zeroed. The probability that a given element is zeroed is\ndetermined by the `ratio` argument.\n\nIf the `is_test` argument is set to non-zero, the output `Y` is exactly the same as the\ninput `X`. Note that outputs are scaled by a factor of $\\frac{1}{1-ratio}$ during\ntraining, so that during test time, we can simply compute an identity function. This\nscaling is important because we want the output at test time to equal the expected value\nat training time. Dropout has been proven to be an effective regularization technique to\nprevent overfitting during training.\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/dropout_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/dropout_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Dropout\",\n [\"X\"],\n [\"Y\"] + [\"mask\"],\n ratio=0.5,\n is_test=0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(5, 5)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"mask:\", workspace.FetchBlob(\"mask\"))\n```\n\n**Result**\n\n```\nX: [[5. 4. 3. 6. 9.]\n [2. 1. 8. 0. 9.]\n [7. 3. 0. 6. 3.]\n [1. 8. 2. 6. 4.]\n [6. 2. 6. 4. 0.]]\nY: [[ 0. 0. 0. 12. 18.]\n [ 0. 0. 16. 0. 0.]\n [ 0. 0. 0. 12. 6.]\n [ 0. 0. 4. 0. 0.]\n [12. 0. 0. 0. 0.]]\nmask: [[False False False True True]\n [False False True True False]\n [False False True True True]\n [False False True False False]\n [ True False False False False]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "The input data as Tensor.", + "name": "data" + }, + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The output.", + "name": "output" + }, + { + "description": "*(type: Tensor``)* The output mask containing boolean values foreach element, signifying which elements are dropped out. If `is_test` isnonzero, this output is not filled.", + "name": "mask" + }, + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Concat", + "schema": { + "attributes": [ + { + "default": -1, + "description": "Axis to concatenate on.", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "description": "Order of blob dimensions. Concats on the C dimension.", + "name": "order", + "option": "optional", + "type": "string" + }, + { + "description": "Pass non-zero integer to add the axis specified in `axis` to all input tensors.", + "name": "add_axis", + "option": "optional", + "type": "int64" + } + ], + "category": "Tensor", + "description": "\nConcatenate a list of tensors into a single tensor. Similar functionality to\nNumpy's [concatenate](https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html)\nfunction. The `axis` argument specifies what axis along which the arrays will be concatenated.\nWhen set to non-zero (default=0), the `add_axis` argument adds the axis specified in `axis` to\nall input tensors.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/concat_split_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/concat_split_op.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Concat\",\n [\"X1\", \"X2\"],\n [\"Y\", \"split_info\"],\n axis=0\n)\n\nworkspace.FeedBlob(\"X1\", np.array([[1,2],[3,4]]))\nworkspace.FeedBlob(\"X2\", np.array([[5,6]]))\nprint(\"X1:\", workspace.FetchBlob(\"X1\"))\nprint(\"X2:\", workspace.FetchBlob(\"X2\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"split_info:\", workspace.FetchBlob(\"split_info\"))\n\n```\n\n**Result**\n\n```\n\nX1: [[1 2]\n [3 4]]\nX2: [[5 6]]\nY: [[1 2]\n [3 4]\n [5 6]]\nsplit_info: [2 1]\n\n```\n\n
\n\n
\n\n Example 2 \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Concat\",\n [\"X1\", \"X2\"],\n [\"Y\", \"split_info\"],\n add_axis=1,\n axis=3\n)\n\nworkspace.FeedBlob(\"X1\", np.random.randint(10, size=(1, 1, 5, 5))) // NCHW\nworkspace.FeedBlob(\"X2\", np.random.randint(10, size=(1, 1, 5, 5))) // NCHW\nprint(\"X1:\", workspace.FetchBlob(\"X1\"))\nprint(\"X2:\", workspace.FetchBlob(\"X2\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"split_info:\", workspace.FetchBlob(\"split_info\"))\n\n```\n\n**Result**\n\n```\n\nX1: [[[[1 8 3 9 0]\n [6 4 6 5 6]\n [3 9 1 9 9]\n [5 1 0 7 7]\n [9 4 0 0 9]]]]\nX2: [[[[7 0 2 6 1]\n [3 9 4 0 3]\n [5 3 8 9 4]\n [3 4 2 1 0]\n [0 8 8 8 1]]]]\nY: [[[[[1 8 3 9 0]\n [7 0 2 6 1]]\n\n [[6 4 6 5 6]\n [3 9 4 0 3]]\n\n [[3 9 1 9 9]\n [5 3 8 9 4]]\n\n [[5 1 0 7 7]\n [3 4 2 1 0]]\n\n [[9 4 0 0 9]\n [0 8 8 8 1]]]]]\nsplit_info: [1 1]\n\n```\n\n
\n\n ", + "inputs": [ + { + "name": "inputs", + "option": "variadic" + }, + { + "description": "*(type: Tensor``)* List of input tensors.", + "name": "X1, X2, ..." + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Concatenated tensor.", + "name": "concat_result" + }, + { + "description": "*(type: Tensor``)* The dimensions of the inputs.", + "name": "split_info" + } + ], + "support_level": "default" + } + }, + { + "name": "GenerateProposals", + "schema": { + "attributes": [ + { + "description": "(float) spatial scale", + "name": "spatial_scale", + "option": "optional" + }, + { + "description": "(int) RPN_PRE_NMS_TOP_N", + "name": "pre_nms_topN", + "option": "optional" + }, + { + "description": "(int) RPN_POST_NMS_TOP_N", + "name": "post_nms_topN", + "option": "optional" + }, + { + "description": "(float) RPN_NMS_THRESH", + "name": "nms_thresh", + "option": "optional" + }, + { + "description": "(float) RPN_MIN_SIZE", + "name": "min_size", + "option": "optional" + }, + { + "description": "bool (default false), Correct bounding box transform coordates, see bbox_transform() in boxes.py Set to true to match the detectron code, set to false for backward compatibility", + "name": "correct_transform_coords", + "option": "optional" + }, + { + "description": "bool (default true). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_on", + "option": "optional" + }, + { + "description": "int (default -90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_lo", + "option": "optional" + }, + { + "description": "int (default 90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_hi", + "option": "optional" + }, + { + "description": "float (default 1.0 degrees). For RRPN, clip almost horizontal boxes within this threshold of tolerance for backward compatibility. Set to negative value for no clipping.", + "name": "clip_angle_thresh", + "option": "optional" + } + ], + "description": "\nGenerate bounding box proposals for Faster RCNN. The propoasls are generated for\na list of images based on image score 'score', bounding box regression result\n'deltas' as well as predefined bounding box shapes 'anchors'. Greedy\nnon-maximum suppression is applied to generate the final bounding boxes.\n", + "inputs": [ + { + "description": "Scores from conv layer, size (img_count, A, H, W)", + "name": "scores" + }, + { + "description": "Bounding box deltas from conv layer, size (img_count, 4 * A, H, W)", + "name": "bbox_deltas" + }, + { + "description": "Image info, size (img_count, 3), format (height, width, scale)", + "name": "im_info" + }, + { + "description": "Bounding box anchors, size (A, 4)", + "name": "anchors" + } + ], + "outputs": [ + { + "description": "Proposals, size (n x 5), format (image_index, x1, y1, x2, y2)", + "name": "rois" + }, + { + "description": "scores of proposals, size (n)", + "name": "rois_probs" + } + ], + "support_level": "default" + } + }, + { + "name": "RoIAlign", + "schema": { + "attributes": [ + { + "description": "(float) default 1.0; Spatial scale of the input feature map X relative to the input image. E.g., 0.0625 if X has a stride of 16 w.r.t. the input image.", + "name": "spatial_scale", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's height.", + "name": "pooled_h", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's width.", + "name": "pooled_w", + "option": "optional" + }, + { + "description": "(int) default -1; number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If <= 0, then an adaptive number of grid points are used (computed as ceil(roi_width / pooled_w), and likewise for height).", + "name": "sampling_ratio", + "option": "optional" + } + ], + "description": "\nRegion of Interest (RoI) align operation as used in Mask R-CNN.\n", + "inputs": [ + { + "description": "4D feature map input of shape (N, C, H, W).", + "name": "X" + }, + { + "description": "2D input of shape (R, 4 or 5) specifying R RoIs representing: batch index in [0, N - 1], x1, y1, x2, y2. The RoI coordinates are in the coordinate system of the input image. For inputs corresponding to a single image, batch index can be excluded to have just 4 columns.", + "name": "RoIs" + } + ], + "outputs": [ + { + "description": "4D output of shape (R, C, pooled_h, pooled_w). The r-th batch element is a pooled feature map cooresponding to the r-th RoI.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "BBoxTransform", + "schema": { + "attributes": [ + { + "description": "vector weights [wx, wy, ww, wh] for the deltas", + "name": "weights", + "option": "optional" + }, + { + "description": "bool (default true), transform the boxes to the scaled image space after applying the bbox deltas.Set to false to match the detectron code, set to true for keypoint models and for backward compatibility", + "name": "apply_scale", + "option": "optional" + }, + { + "description": "bool (default false), Correct bounding box transform coordates, see bbox_transform() in boxes.py Set to true to match the detectron code, set to false for backward compatibility", + "name": "correct_transform_coords", + "option": "optional" + }, + { + "description": "bool (default false). If true, then boxes (rois and deltas) include angle info to handle rotation. The format will be [ctr_x, ctr_y, width, height, angle (in degrees)].", + "name": "rotated", + "option": "optional" + }, + { + "description": "bool (default true). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_on", + "option": "optional" + }, + { + "description": "int (default -90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_lo", + "option": "optional" + }, + { + "description": "int (default 90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].", + "name": "angle_bound_hi", + "option": "optional" + }, + { + "description": "float (default 1.0 degrees). For RRPN, clip almost horizontal boxes within this threshold of tolerance for backward compatibility. Set to negative value for no clipping.", + "name": "clip_angle_thresh", + "option": "optional" + } + ], + "description": "\nTransform proposal bounding boxes to target bounding box using bounding box\n regression deltas.\n", + "inputs": [ + { + "description": "Bounding box proposals in pixel coordinates, Size (M, 4), format [x1, y1, x2, y2], orSize (M, 5), format [batch_index, x1, y1, x2, y2]. If proposals from multiple images in a batch are present, they should be grouped sequentially and in incremental order.For rotated boxes, this would have an additional angle (in degrees) in the format [, ctr_x, ctr_y, w, h, angle].", + "name": "rois" + }, + { + "description": "bounding box translations and scales,size (M, 4*K), format [dx, dy, dw, dh], K = # classes. For rotated boxes, size (M, 5*K, format [dx, dy, dw, dh, da].", + "name": "deltas" + }, + { + "description": "Image dimensions, size (batch_size, 3), format [img_height, img_width, img_scale]", + "name": "im_info" + } + ], + "outputs": [ + { + "description": "Pixel coordinates of the transformed bounding boxes,Size (M, 4*K), format [x1, y1, x2, y2]. For rotated boxes, size (M, 5*K), format [ctr_x, ctr_y, w, h, angle].", + "name": "box_out" + }, + { + "description": "Tensor of shape (batch_size) with each element denoting the number of RoIs belonging to the corresponding image in batch", + "name": "roi_batch_splits" + } + ], + "support_level": "default" + } + }, + { + "name": "BoxWithNMSLimit", + "schema": { + "attributes": [ + { + "description": "(float) TEST.SCORE_THRESH", + "name": "score_thresh", + "option": "optional" + }, + { + "description": "(float) TEST.NMS", + "name": "nms", + "option": "optional" + }, + { + "description": "(int) TEST.DEECTIONS_PER_IM", + "name": "detections_per_im", + "option": "optional" + }, + { + "description": "(bool) TEST.SOFT_NMS.ENABLED", + "name": "soft_nms_enabled", + "option": "optional" + }, + { + "description": "(string) TEST.SOFT_NMS.METHOD", + "name": "soft_nms_method", + "option": "optional" + }, + { + "description": "(float) TEST.SOFT_NMS.SIGMA", + "name": "soft_nms_sigma", + "option": "optional" + }, + { + "description": "(float) Lower bound on updated scores to discard boxes", + "name": "soft_nms_min_score_thres", + "option": "optional" + }, + { + "description": "bool (default false). If true, then boxes (rois and deltas) include angle info to handle rotation. The format will be [ctr_x, ctr_y, width, height, angle (in degrees)].", + "name": "rotated", + "option": "optional" + } + ], + "description": "\nApply NMS to each class (except background) and limit the number of\nreturned boxes.\n", + "inputs": [ + { + "description": "Scores, size (count, num_classes)", + "name": "scores" + }, + { + "description": "Bounding box for each class, size (count, num_classes * 4). For rotated boxes, this would have an additional angle (in degrees) in the format [, ctr_x, ctr_y, w, h, angle]. Size: (count, num_classes * 5).", + "name": "boxes" + }, + { + "description": "Tensor of shape (batch_size) with each element denoting the number of RoIs/boxes belonging to the corresponding image in batch. Sum should add up to total count of scores/boxes.", + "name": "batch_splits" + } + ], + "outputs": [ + { + "description": "Filtered scores, size (n)", + "name": "scores" + }, + { + "description": "Filtered boxes, size (n, 4). For rotated boxes, size (n, 5), format [ctr_x, ctr_y, w, h, angle].", + "name": "boxes" + }, + { + "description": "Class id for each filtered score/box, size (n)", + "name": "classes" + }, + { + "description": "Output batch splits for scores/boxes after applying NMS", + "name": "batch_splits" + }, + { + "description": "Optional filtered indices, size (n)", + "name": "keeps" + }, + { + "description": "Optional number of filtered indices per class, size (num_classes)", + "name": "keeps_size" + } + ], + "support_level": "default" + } + }, + { + "name": "ONNXWhile", + "schema": { + "attributes": [ + { + "description": "Net executed on each iteration", + "name": "body", + "option": "optional" + }, + { + "description": "Whether to use the trip count input", + "name": "has_trip_count", + "option": "optional" + }, + { + "description": "Whether to use the condition input", + "name": "has_cond", + "option": "optional" + }, + { + "description": "Whether to save the scopes across iterations, as in for backprop", + "name": "save_scopes", + "option": "optional" + }, + { + "description": "Do not create new scopes. Use this only if you're certain there will be no name collision, for example if you're converting from a fully-SSA IR", + "name": "disable_scopes", + "option": "optional" + } + ], + "description": "\n*** EXPERIMENTAL. This operator is a work-in-progress. No assumption should be\nmade about the stability or correctness of this op. ***\n\nGeneric Looping construct confirming to the ONNX Loop operator spec. This loop\nhas multiple termination conditions:\n\n1. Trip count. Iteration count specified at runtime. Set by specifying the\n input M. Optional. Set to empty string to omit. Note that a static trip\n count (specified at graph construction time) can be specified by passing\n in a constant node for input M.\n2. Loop termination condition. This is an input to the op that determines\n whether to run the first interation and also a loop-carried dependency for\n the body graph. The body graph must yield a value for the condition\n variable, whether this input is provided or not.\n\nThis table summarizes the operating modes of this operator with equivalent\nC-style code:\n\nOperator inputs defined as (max_trip_count, condition_var). Omitted optional\ninputs are represented as empty string. Concretely, in this caffe2 op an input\nis marked as omitted by setting its 'has_{name}' argument to False.\n\n input (\"\", \"\"):\n for (int i=0; ; ++i) {\n cond = ... // Note this value is ignored, but is required in the body\n }\n\n input (\"\", cond) // Note this is analogous to a while loop\n bool cond = ...;\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (\"\", 1) // Note this is analogous to a do-while loop\n bool cond = true\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (trip_count, \"\") // Note this is analogous to a for loop\n int trip_count = ...\n for (int i=0; i < trip_count; ++i) {\n cond = ...; // ignored\n }\n\n input (trip_count, cond)\n int trip_count = ...;\n bool cond = ...;\n for (int i=0; i < trip_count && cond; ++i) {\n cond = ...;\n }\n ", + "inputs": [ + { + "description": "Number of iterations to go out to. Used if the flag has_trip_count is True.", + "name": "max_trip_count" + }, + { + "name": "condition" + }, + { + "name": "initial", + "option": "variadic" + }, + { + "description": "Dynamic condition value for the first iteration. For all subsequent iterations, the condition from the body graph is used. This input is used if the flag has_cond is true.", + "name": "first_iter_condition" + } + ], + "outputs": [ + { + "name": "final_and_scan_outputs", + "option": "variadic" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8Quantize", + "schema": { + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": null, + "inputs": [ + { + "description": "FP32 Tensor X.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Int8 Tensor qX representing X with linear quantization.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8Conv", + "schema": { + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "default": 0, + "name": "pad" + }, + { + "default": 1, + "name": "stride" + } + ], + "category": "Layer", + "description": "\nThe convolution operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \n[Only NHWC order is supported now]Note that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is convolved with a subset of the\nimage and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nconv_op_impl.h is the templated implementation of the conv_op.h file, which is\nwhy they are separate files.\n", + "inputs": [ + { + "description": "Input data blob from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the NCHW usage. On the other hand, the NHWC Op has a different set of dimension constraints. ", + "name": "X" + }, + { + "description": "The filter blob that will be used in the convolutions; has size (M x C x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the convolution; has size (M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8FC", + "schema": { + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "category": "Layer", + "description": "\nComputes the result of passing an input vector X into a fully\nconnected layer with 2D weight matrix W and 1D bias vector b. That is,\nthe layer computes Y = X * W^T + b, where X has size (M x K),\nW has size (N x K), b has size (N), and Y has size (M x N),\nwhere M is often the batch size.\n\n\nNOTE: X does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\nX \\in [a_0, a_1 * ... * a_{n-1}]. Only this case is supported!\nLastly, even though b is a 1D vector of size N, it is copied/resized to\nbe size (M x N) implicitly and added to each vector in the batch.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n", + "inputs": [ + { + "description": "input tensor that's coerced into a 2D matrix of size (MxK) as described above", + "name": "X" + }, + { + "description": "A tensor that is coerced into a 2D blob of size (KxN) containing fully connected weight matrix", + "name": "W" + }, + { + "description": "1D blob containing bias vector", + "name": "b" + } + ], + "outputs": [ + { + "description": "2D output tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8AveragePool", + "schema": { + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "category": "Pool", + "description": "AveragePool \nconsumes an input blob X and applies average pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Average pooling consisting of averaging all values of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data tensor from average pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8Sum", + "schema": { + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "Int8Softmax", + "schema": { + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "(int) default to 1; describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size", + "name": "axis", + "option": "optional" + } + ], + "category": "Activation", + "description": "\nThe operator computes the softmax normalized values for each layer in the batch\n of the given input. The input is a 2-D tensor (Tensor) of size\n(batch_size x input_feature_dimensions). The output tensor has the same shape\nand contains the softmax normalized values of the corresponding input.\n\nX does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\nX \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then X will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the X tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n", + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input" + } + ], + "outputs": [ + { + "description": "The softmax normalized output values with the same shape as input tensor.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8Relu", + "schema": { + "attributes": [ + { + "default": 0, + "name": "order" + }, + { + "default": 0, + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "default": 0, + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "category": "Activation", + "description": "\nRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = max(0, x), is applied to\nthe tensor elementwise.\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "AffineChannel", + "schema": { + "category": "Normalization", + "description": "\nApplies a separate affine transformation to each channel of the input. Useful\nfor replacing spatial batch norm with its equivalent fixed transformation.\n", + "inputs": [ + { + "description": "Feature map input with order NCHW or NHWC.", + "name": "X" + }, + { + "description": "1D input of shape (C); the c-th element is the scale factor of the affine transformation for the c-th channel of the input.", + "name": "scale" + }, + { + "description": "1D input of shape (C); the c-th element is the bias of the affine transformation for the c-th channel of the input.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output with the same order of Input.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LearningRateAdaption", + "schema": { + "attributes": [ + { + "description": "the learning rate for performing gradient descent on learning rate lr", + "name": "lr_alpha", + "option": "optional" + }, + { + "description": "whether to apply normalized lr adaption or not", + "name": "normalized_lr_adaption", + "option": "optional" + } + ], + "description": "\n Learning Rate Adaption is an operation that perform one iteration of\n gradient descent based on learning rate:\n lr(k) = lr(k-1) - lr_alpha * df(k-1)/dlr,\n where df(k-1)/dlr is the gradient of objective function f on lr, and\n lr_alpha is a learning rate hyperparameter. It can be prove that\n df(k-1)/dlr equals INNERPRODUCT(grad(k-1), -grad(k-2)), where grad(k-1) is\n the grad of f(k-1) on parameters. When the argument\n \"normalized_lr_adaption\" is false, we simply perform the\n following update:\n lr(k) = lr(k-1) - lr_alpha * INNERPRODUCT(grad(k-1), grad(k-2)).\n If we set \"normalized_lr_adaption\" to be true, we do not directly apply\n INNERPRODUCT(grad(k-1), -grad(k-2)) as the grad. Instead, we perform the\n following update:\n lr(k) = lr(k-1) + lr_alpha * cosineSimilarity(grad(k-1), grad(k-2)).\n", + "inputs": [ + { + "description": "Learning rate", + "name": "lr" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "The effective grad", + "name": "effgrad" + } + ], + "outputs": [ + { + "description": "Updated learning rate", + "name": "output_lr" + } + ], + "support_level": "default" + } + }, + { + "name": "MeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CoshGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "IndexSize", + "schema": { + "description": "\nReturns the number of entries currently present in the index.\n", + "inputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + } + ], + "outputs": [ + { + "description": "Scalar int64 tensor with number of entries.", + "name": "items" + } + ], + "support_level": "default" + } + }, + { + "name": "LpPool", + "schema": { + "attributes": [ + { + "description": "(*float*): type of $L_p$ norm to use (default=2.0)", + "name": "p", + "option": "optional" + }, + { + "description": "(*int*): the size of the window to take a max over", + "name": "kernel", + "option": "optional" + }, + { + "description": "(*int*): the stride of the window", + "name": "stride", + "option": "optional" + }, + { + "description": "(*int*): implicit zero padding to be added on both sides", + "name": "pad", + "option": "optional" + }, + { + "description": "(*int*): parameter that controls the stride of elements in the window", + "name": "dilation", + "option": "optional" + }, + { + "description": "(*string*): order of blob dimensions (default=\"NCHW\")", + "name": "order", + "option": "optional" + } + ], + "description": "\n`LpPool` consumes an input blob and applies max pooling across the the blob according to kernel sizes, stride sizes, pad lengths and dilation. $L_p$ pooling consists of taking the $L_p$ norm of a subset of the input tensor according to the kernel size and downsampling the data into the output blob for further processing.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the output blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/lp_pool_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LpPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n p=2.0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[[[-1.1113514 -1.1173418 -0.1504435 0.1327146 -1.2221841 -0.5654315 ]\n [-1.9209646 -0.04675794 0.8604731 1.2042469 0.28154245 0.38656202]\n [-0.8772837 -0.03264008 0.26222762 0.28526652 0.321102 -2.5891325 ]\n [-0.9248281 1.440776 -0.56832 -0.6017927 1.2262512 -2.1443934 ]\n [ 0.5194415 -1.6858683 0.45221648 0.65029615 -0.8574544 0.8121054 ]\n [ 0.25902653 0.4934758 0.49870652 -0.48134378 -0.9178449 -0.07626943]]]]\n\nY:\n [[[[2.4851248 1.49361 1.4290358]\n [1.9240153 0.9139378 3.5928857]\n [1.8500228 1.0525136 1.4976646]]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): output tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsMeanFused8BitRowwise", + "schema": { + "description": "\nPerforms the same operation as SparseLengthsMean, but\noperating on 8-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 4-byte scale and 4-byte bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused8BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Transpose", + "schema": { + "attributes": [ + { + "description": "Order to permute axes of input tensor. Reverses the dimensions by default.", + "name": "axes", + "option": "optional" + } + ], + "description": "\nTranspose the input tensor by permuting the axes of the input according\nto the `axes` argument. Similar to numpy's\n[transpose](https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html)\nfunction.\n\nFor example, when axes=(1, 0, 2), given an input tensor of shape\n(1, 2, 3), the output shape will be (2, 1, 3).\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/transpose_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Transpose\",\n [\"X\"],\n [\"Y\"],\n axes=(0,3,1,2)\n)\n\nx = np.random.rand(1,32,32,3)\nworkspace.FeedBlob(\"X\", x)\nprint(\"X.shape (NHWC order):\", workspace.FetchBlob(\"X\").shape)\nworkspace.RunOperatorOnce(op)\nprint(\"Y.shape (NCHW order):\", workspace.FetchBlob(\"Y\").shape)\n```\n\n**Result**\n\n```\nX.shape (NHWC order): (1, 32, 32, 3)\nY.shape (NCHW order): (1, 3, 32, 32)\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Transposed output.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Accuracy", + "schema": { + "attributes": [ + { + "description": "Count as correct by comparing the true label to the top k scoring classes (default 1: only compare to the top scoring class i.e. argmax)", + "name": "top_k", + "option": "optional" + } + ], + "description": "\nAccuracy takes two inputs- predictions and labels, and returns a float\naccuracy value for the batch. Predictions are expected in the form of 2-D tensor\ncontaining a batch of scores for various classes, and labels are expected in the\n form of 1-D tensor containing true label indices of samples in the batch. If\nthe score for the label index in the predictions is the highest among all\nclasses, it is considered a correct prediction.\n", + "inputs": [ + { + "description": "2-D tensor (Tensor) of size (num_batches x num_classes) containing scores", + "name": "predictions" + }, + { + "description": "1-D tensor (Tensor) of size (num_batches) having the indices of true labels", + "name": "labels" + } + ], + "outputs": [ + { + "description": "1-D tensor (Tensor) of size 1 containing accuracy", + "name": "accuracy" + } + ], + "support_level": "default" + } + }, + { + "name": "TimerEnd", + "schema": { + "description": "\nStop a timer started with **TimerBegin**. Publishes a CAFFE_EVENT.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_ops.cc\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): pointer to a timer object; obtained from **TimerBegin** op", + "name": "timer" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsRangeFill", + "schema": { + "description": "\nThe *LengthsRangeFill* op takes a single input *lengths* and outputs a single tensor *range_sequence*. For each element of *lengths*, the op appends the range(0,lengths) vector to the end of *range_sequence*. For example, if input=[2,4,1], the output would be [0,1,0,1,2,3,0].\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsRangeFill\",\n [\"lengths\"],\n [\"range_sequence\"],\n)\n\nworkspace.FeedBlob(\"lengths\", np.array([2,4,1]).astype(np.int32))\nprint(\"lengths:\\n\", workspace.FetchBlob(\"lengths\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"range_sequence: \\n\", workspace.FetchBlob(\"range_sequence\"))\n\n```\n\n**Result**\n\n```\n\nlengths:\n [2 4 1]\nrange_sequence:\n [0 1 0 1 2 3 0]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "1D tensor of int32 or int64 segment lengths.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "1D tensor whose size is the sum of *lengths*", + "name": "range_sequence" + } + ], + "support_level": "default" + } + }, + { + "name": "AccumulateHistogram", + "schema": { + "attributes": [ + { + "description": "the lower bound value", + "name": "lower_bound", + "option": "optional" + }, + { + "description": "the upper bound value", + "name": "upper_bound", + "option": "optional" + }, + { + "description": "number of buckets to use in [lower_bound, upper_bound)", + "name": "num_buckets", + "option": "optional" + } + ], + "description": "\nThis operator calculate thes histogram of values in input tensor.\nThere're 2 outputs, one for histogram of current input tensor, and another\nfor histogram of the all input tensors accumulated through history.\nThe output would contain num_buckets + 2 values. index[1 ... num_buckets]\nfor values in [lower_bound, upper_bound) interval. And the rest 2 for values\nsmaller than lower_bound or greater than upper_bound respectively.\n", + "inputs": [ + { + "description": "Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output histogram of the current tensor.", + "name": "CurHist" + }, + { + "description": "Accumulated histogram of the history tensor.", + "name": "AccHist" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8ConvRelu", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "\nThe convolution operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \n[Only NHWC order is supported now]Note that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is convolved with a subset of the\nimage and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nconv_op_impl.h is the templated implementation of the conv_op.h file, which is\nwhy they are separate files.\n", + "inputs": [ + { + "description": "Input data blob from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the NCHW usage. On the other hand, the NHWC Op has a different set of dimension constraints. ", + "name": "X" + }, + { + "description": "The filter blob that will be used in the convolutions; has size (M x C x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the convolution; has size (M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths. Output will go through rectified linear function, where y = max(0, x).", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsSum8BitsRowwise", + "schema": { + "description": "\nVariation of SparseLengthsSum operator, where DATA is\nstored using 8bits. DATA was quantized with 8Bit row-wise\nquantization (see doc to FloatToRowwiseQuantized8Bits operator). To\nrestore DATA from 8Bit, we use additional input that stores scales\nand biases.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToRowwiseQuantized8Bits", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i -- scale and bias for i-th row", + "name": "scale_bias" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseUnsortedSegmentMean", + "schema": { + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Mean' to each segment. Segments ids can appear in arbitrary order (unlike in\nSparseSortedSegmentMean).\n\nThis op is basically Gather and UnsortedSegmentMean fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Integer vector with the same length as INDICES that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "Load", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If set to non-zero, save the db directly to the path specified by the `db` arg. If not set (default), prepend the path of the current root folder of the workspace to the path specified by the `db` arg.", + "name": "absolute_path", + "option": "optional", + "type": "int64" + }, + { + "default": "", + "description": "Blobs will be prefixed with this when loading. Useful for avoiding collisions with blobs existing in the workspace. The output blob names specified to this op should include this prefix.", + "name": "add_prefix", + "option": "optional", + "type": "string" + }, + { + "default": "", + "description": "Characters in the provided blob names that match `strip_prefix` will be removed prior to saving. Also, characters that precede `strip_prefix` will be removed. Useful for removing device scope from blob names.", + "name": "strip_prefix", + "option": "optional", + "type": "string" + }, + { + "description": "The output path of the db. See the `absolute_path` arg details for options regarding the current root folder of the workspace.", + "name": "db", + "option": "optional", + "type": "string" + }, + { + "description": "List of paths to dbs to load blobs from. See the `absolute_path` arg details for options regarding the current root folder of the workspace.", + "name": "dbs", + "option": "optional", + "type": "string[]" + }, + { + "description": "(type: string)* Type of db to save (options: \"lmdb\", \"leveldb\", \"minidb\").", + "name": "db_type", + "option": "optional" + }, + { + "default": 0, + "description": "If nonzero, the blobs are loaded into the device that is specified in the serialized `BlobProto`. Otherwise, the device will be set as the one that the `Load` operator is being run under.", + "name": "keep_device", + "option": "optional", + "type": "int64" + }, + { + "default": 0, + "description": "If nonzero, will load all blobs pointed to by the db to the workspace overwriting/creating blobs as needed.", + "name": "load_all", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "If True, will allow not loading all the output blobs specified in the outputs.", + "name": "allow_incomplete", + "option": "optional", + "type": "boolean" + }, + { + "description": "If set, used instead of output blob names to specify which blobs in the db shall be loaded. Must be the same length as number of output blobs.", + "name": "source_blob_names", + "option": "optional", + "type": "string[]" + } + ], + "description": "\nThe Load operator loads a set of serialized blobs from a db or multiple dbs. It\ntakes $[0, \\infty)$ number of inputs and $[0, \\infty)$ number of outputs, using\nthe db keys to match the db entries with the outputs.\n\nIf at least one input is passed, then it is assumed that that input blobs are a\nset of DBReaders to load from. Otherwise the `db` or `dbs` argument is used to load\nblobs from one single db or multiple dbs respectively. `db_type` argument is used\nto specify the type of the input db/dbs.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/load_save_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Load\",\n [],\n [\"X\", \"Y\"],\n db=\"test_db\",\n db_type=\"lmdb\"\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: List(DBReader))* [OPTIONAL] List of DBReaders to load from. Can use this instead of the `db`/`dbs` args.", + "name": "X, Y, ..." + } + ], + "support_level": "default" + } + }, + { + "name": "Exp", + "schema": { + "description": "\nCalculates the exponential of the given input tensor ($exp(x)$), element-wise. This\noperation can be done in an in-place fashion too, by providing the same input\nand output blobs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/exp_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Exp\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[0.5821691 0.07719802 0.50159824]\n [0.40952456 0.36788362 0.84887683]\n [0.02472685 0.65730894 0.9066397 ]]\nX after running op:\n[[1.7899168 1.080256 1.6513585]\n [1.5061016 1.4446739 2.3370204]\n [1.0250351 1.9295927 2.4759884]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* The exponential of the input tensor computed element-wise.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "ConvTransposeGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LayerNormGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SinhGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "FlattenToVec", + "schema": { + "description": "\n\nThe *FlattenToVec* op flattens the input tensor into a 1-D vector. The op accepts a single input tensor and returns a single output tensor.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"FlattenToVec\",\n [\"input\"],\n [\"output\"],\n)\n\nworkspace.FeedBlob(\"input\", np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]).astype(np.float32))\nprint(\"input:\\n\", workspace.FetchBlob(\"input\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"output: \\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n [[ 1. 2. 3.]\n [ 4. 5. 6.]\n [ 7. 8. 9.]\n [10. 11. 12.]]\noutput:\n [ 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12.]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "A tensor of rank >= 1.", + "name": "input" + } + ], + "outputs": [ + { + "description": "A tensor of rank 1 (vector) with the contents of the input tensor.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Ftrl", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "UnsortedSegmentMean", + "schema": { + "attributes": [ + { + "description": "Optional int argument specifying the number of output segments and thus the first dimension of the output", + "name": "num_segments", + "option": "optional" + } + ], + "description": "\nApplies 'Mean' to each segment of input tensor. Segments ids can appear in\narbitrary order (unlike in SortedSegmentMean).\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector with the same length as the first dimension of DATA that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "Sinh", + "schema": { + "description": "\nCalculates the hyperbolic sine of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sinh_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sinh\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [0.98907769 0.52907848 0.03216429 0.94983935 0.47881418]\nY: [1.15841695 0.5541099 0.03216984 1.09924557 0.49732079]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The hyperbolic sine values of the input tensor, computed element-wise", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "CloseRebatchingQueue", + "schema": { + "description": "\nCloses the Queue.\n", + "inputs": [ + { + "description": "object representing the queue", + "name": "queue" + } + ], + "support_level": "default" + } + }, + { + "name": "LpPoolGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "StumpFunc", + "schema": { + "description": "\nConverts each input element into either high_ or low_value\nbased on the given threshold.\n", + "inputs": [ + { + "description": "tensor of float", + "name": "X" + } + ], + "outputs": [ + { + "description": "tensor of float", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "BooleanMask", + "schema": { + "description": "\nGiven a 1D `data` tensor and a boolean `mask` tensor of the same shape, returns a `masked_data` tensor containing only the elements corresponding to positions where the `mask` is True, and a `masked_indices` tensor containing the indices of the True elements.\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/boolean_mask_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BooleanMask\",\n [\"data\", \"mask\"],\n [\"masked_data\", \"masked_indices\"]\n)\n\nworkspace.FeedBlob(\"data\", np.array([1,2,3,4,5,6]))\nworkspace.FeedBlob(\"mask\", np.array([True,False,False,True,True,False]))\nprint(\"data:\", workspace.FetchBlob(\"data\"))\nprint(\"mask:\", workspace.FetchBlob(\"mask\"))\nworkspace.RunOperatorOnce(op)\nprint(\"masked_data:\", workspace.FetchBlob(\"masked_data\"))\nprint(\"masked_indices:\", workspace.FetchBlob(\"masked_indices\"))\n\n```\n\n**Result**\n\n```\n\ndata: [1 2 3 4 5 6]\nmask: [ True False False True True False]\nmasked_data: [1 4 5]\nmasked_indices: [0 3 4]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor*): 1D input tensor", + "name": "data" + }, + { + "description": "(*Tensor``*): tensor of bools which determines the input elements that will be left in the `masked_data` output tensor; same shape as `data`", + "name": "mask" + } + ], + "outputs": [ + { + "description": "(*Tensor*): 1D tensor of same type as `data` input that contains the masked input tensor", + "name": "masked_data" + }, + { + "description": "(*Tensor``*): 1D tensor of indices of the True elements in the `mask` tensor", + "name": "masked_indices" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceFrontMean", + "schema": { + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "description": "\nReduces the input tensor along the last dimension of the by applying **mean**.\n\nCan reduce more than one of the \"first\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the mean operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_1 * d_2 * ... * d_{n})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{0}$ dimension.\n\nFor example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1,2]$, then $Y = [mean(1,4), mean(5,1,7), mean(2), mean(9,2)] = [2.5, 4.333, 2, 5.5]$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_mean_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceFrontMean\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[5. 0. 9.]\n [4. 1. 1.]\n [9. 0. 8.]]\n\n [[2. 6. 7.]\n [6. 2. 6.]\n [0. 4. 5.]]]\nY: [4.3333335 2.1666667 6.]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SigmoidCrossEntropyWithLogits", + "schema": { + "attributes": [ + { + "description": "default is false; if enabled, will use the log d trick to avoid the vanishing\ngradients early on; see Goodfellow et. al (2014)", + "name": "log_D_trick", + "option": "optional" + }, + { + "description": "default is false; if enabled, the model will be allowed to train on an unjoined\ndataset, where some examples might be false negative and might appear\nin the dataset later as (true) positive example.", + "name": "unjoined_lr_loss", + "option": "optional" + } + ], + "description": "\nGiven two matrices logits and targets, of same shape,\n(batch_size, num_classes), computes the sigmoid cross entropy between the two.\nReturns a tensor of shape (batch_size,) of losses for each example.\n", + "inputs": [ + { + "description": "matrix of logits for each example and class.", + "name": "logits" + }, + { + "description": "matrix of targets, same shape as logits.", + "name": "targets" + } + ], + "outputs": [ + { + "description": "Vector with the total xentropy for each example.", + "name": "xentropy" + } + ], + "support_level": "default" + } + }, + { + "name": "CosineEmbeddingCriterionGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ResizeLike", + "schema": { + "description": "\nProduces tensor containing data of first input and shape of second input.\n", + "inputs": [ + { + "description": "Tensor whose data will be copied into the output.", + "name": "data" + }, + { + "description": "Tensor whose shape will be applied to output.", + "name": "shape_tensor" + } + ], + "outputs": [ + { + "description": "Tensor with data of input 0 and shape of input 1.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "HSoftmaxSearch", + "schema": { + "attributes": [ + { + "description": "Serialized TreeProto string containing a tree including all intermidate nodes and leafs. All nodes must have names for correct outputs", + "name": "tree", + "option": "optional" + }, + { + "description": "beam used for pruning tree. The pruning algorithm is that only children, whose score is smaller than parent's score puls beam, will be propagated.", + "name": "beam", + "option": "optional" + }, + { + "description": "Number of nodes in outputs", + "name": "topN", + "option": "optional" + } + ], + "description": "\nHSoftmaxSearch is an operator to generate the most possible paths given a\nwell-trained model and input vector. Greedy algorithm is used for pruning the\nsearch tree.\n", + "inputs": [ + { + "description": "Input data from previous layer", + "name": "X" + }, + { + "description": "The matrix trained from Softmax Ops", + "name": "W" + }, + { + "description": "The bias trained from Softmax Ops", + "name": "b" + } + ], + "outputs": [ + { + "description": "The name of selected nodes and leafs. For nodes, it will be the name defined in the tree. For leafs, it will be the index of the word in the tree.", + "name": "Y_names" + }, + { + "description": "The corresponding scores of Y_names", + "name": "Y_scores" + } + ], + "support_level": "default" + } + }, + { + "name": "HSoftmaxGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CloneCommonWorld", + "schema": { + "description": "\nClones existing common world.\n", + "inputs": [ + { + "description": "Existing common world to clone.", + "name": "existing_comm_world" + } + ], + "outputs": [ + { + "description": "A common world for collective operations.", + "name": "comm_world" + } + ], + "support_level": "default" + } + }, + { + "name": "SortedSegmentRangeMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LCGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SubGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "PackedInt8BGRANHWCToNCHWCStylizerPreprocess", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ConcatBatchMatMulBatchGatherOp", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Gather", + "schema": { + "description": "\n\nThe *Gather* op accepts a *DATA* tensor of rank $r >= 1$ and *INDICES* tensor of rank $q$ as inputs. It then gathers entries of the outer-most dimension of *DATA*, indexed by *INDICES*, and concatenate them in an output tensor of rank $q + (r - 1)$.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/gather_op.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/gather_op.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Gather\",\n [\"DATA\", \"INDICES\"],\n [\"OUTPUT\"]\n)\ndata = np.array([[1., 1.2],[2.3, 3.4],[4.5, 5.7]])\nprint(\"DATA:\\n\",data)\n\ninds = np.array([[0, 1],[1, 2]])\nprint(\"INDICES:\\n\",inds)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"DATA\", data.astype(np.float32))\nworkspace.FeedBlob(\"INDICES\", inds.astype(np.int32))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT:\\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [[1. 1.2]\n [2.3 3.4]\n [4.5 5.7]]\nINDICES:\n [[0 1]\n [1 2]]\nOUTPUT:\n [[[1. 1.2]\n [2.3 3.4]]\n\n [[2.3 3.4]\n [4.5 5.7]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input data tensor of rank $r>=1$", + "name": "DATA" + }, + { + "description": "Input indices tensor of rank $q$. This tensor must contain integers.", + "name": "INDICES" + } + ], + "outputs": [ + { + "description": "Output tensor of rank $q+(r-1)$", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "KeyValueToMap", + "schema": { + "description": "Convert key and value blob pairs into a map blob", + "inputs": [ + { + "description": "Blob reference to the key", + "name": "key blob" + }, + { + "description": "Blob reference to the value", + "name": "value blob" + } + ], + "outputs": [ + { + "description": "Blob reference to the map", + "name": "map blob" + } + ], + "support_level": "default" + } + }, + { + "name": "Unique", + "schema": { + "description": "\nDeduplicates input indices vector and optionally produces reverse remapping.\nThere's no guarantees on the ordering of the output indices.\n", + "inputs": [ + { + "description": "1D tensor of int32 or int64 indices.", + "name": "indices" + } + ], + "outputs": [ + { + "description": "1D tensor of deduped entries.", + "name": "unique_indices" + }, + { + "description": "(optional) mapping from `indices` to `unique_indices`. This has the same shape as `indices`. Its elements are the indices into `unique_indices` such that `Gather(['unique_indices', 'remapping'])` yields `indices`.", + "name": "remapping" + } + ], + "support_level": "default" + } + }, + { + "name": "ResizeNearestGradient", + "schema": { + "attributes": [ + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "AveragePut", + "schema": { + "attributes": [ + { + "description": "(*str*): name of the stat. If not present, then uses name of input blob", + "name": "name", + "option": "optional" + }, + { + "description": "(*int64_t*): number to multiply input values by (used when inputting floats, as stats can only receive integers", + "name": "magnitude_expand", + "option": "optional" + }, + { + "description": "(*boolean*): whether or not to clamp inputs to the max inputs allowed", + "name": "bound", + "option": "optional" + }, + { + "description": "(*float*): Optionally provide a default value for receiving empty tensors", + "name": "default_value", + "option": "optional" + } + ], + "description": "\n Consume a value and pushes it to the global stat registry as an average.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_put_ops.cc\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): A scalar tensor, representing any numeric value", + "name": "value" + } + ], + "support_level": "default" + } + }, + { + "name": "SoftmaxGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "BatchBucketize", + "schema": { + "description": "\nBucketize the float_features into sparse features.\nThe float_features is a N * D tensor where N is the batch_size, and D is the feature_dim.\nThe indices is a 1D tensor containing the indices of the features that need to be bucketized.\nThe lengths is a 1D tensor that splits the following 'boundaries' argument.\nThe boundaries is a 1D tensor containing the border list for each feature.\n\nWith in each batch, `indices` should not have duplicate number,\nand the number of elements in `indices` should be less than or equal to `D`.\nEach element in `lengths` vector (lengths[`i`]) represents\nthe number of boundaries in the sub border list.\nThe sum of all elements in `lengths` must be equal to the size of `boundaries`.\nIf lengths[0] = 2, the first sub border list is [0.5, 1.0], which separate the\nvalue to (-inf, 0.5], (0,5, 1.0], (1.0, inf). The bucketized feature will have\nthree possible values (i.e. 0, 1, 2).\n\n\nFor example, with input:\n\n float_features = [[1.42, 2.07, 3.19, 0.55, 4.32],\n [4.57, 2.30, 0.84, 4.48, 3.09],\n [0.89, 0.26, 2.41, 0.47, 1.05],\n [0.03, 2.97, 2.43, 4.36, 3.11],\n [2.74, 5.77, 0.90, 2.63, 0.38]]\n indices = [0, 1, 4]\n lengths = [2, 3, 1]\n boundaries = [0.5, 1.0, 1.5, 2.5, 3.5, 2.5]\n\nThe output is:\n\n output =[[2, 1, 1],\n [2, 1, 1],\n [1, 0, 0],\n [0, 2, 1],\n [2, 3, 0]]\n\nafter running this operator.\n", + "inputs": [ + { + "description": "2-D dense tensor, the second dimension must be greater or equal to the indices dimension", + "name": "float_features" + }, + { + "description": "Flatten tensor, containing the indices of `float_features` to be bucketized. The datatype must be int32.", + "name": "indices" + }, + { + "description": "Flatten tensor, the size must be equal to that of `indices`. The datatype must be int32.", + "name": "lengths" + }, + { + "description": "Flatten tensor, dimension has to match the sum of lengths", + "name": "boundaries" + } + ], + "outputs": [ + { + "description": "2-D dense tensor, with 1st dim = float_features.dim(0), 2nd dim = size(indices)in the arg list, the tensor is of the same data type as `feature`.", + "name": "bucktized_feat" + } + ], + "support_level": "default" + } + }, + { + "name": "CreateTensorVector", + "schema": { + "description": "Create a std::unique_ptr >", + "support_level": "default" + } + }, + { + "name": "UnsortedSegmentSum", + "schema": { + "attributes": [ + { + "description": "Optional int argument specifying the number of output segments and thus the first dimension of the output", + "name": "num_segments", + "option": "optional" + } + ], + "description": "\nApplies 'Sum' to each segment of input tensor. Segments ids can appear in\narbitrary order (unlike in SortedSegmentSum).\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector with the same length as the first dimension of DATA that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "CreateTreeCursor", + "schema": { + "attributes": [ + { + "description": "A list of strings each one representing a field of the dataset.", + "name": "fields", + "option": "optional" + } + ], + "description": "\nCreates a cursor to iterate through a list of tensors, where some of those\ntensors contain the lengths in a nested schema. The schema is determined by\nthe `fields` arguments.\n\nFor example, to represent the following schema:\n\n Struct(\n a=Int(),\n b=List(List(Int)),\n c=List(\n Struct(\n c1=String,\n c2=List(Int),\n ),\n ),\n )\n\nthe field list will be:\n [\n \"a\",\n \"b:lengths\",\n \"b:values:lengths\",\n \"b:values:values\",\n \"c:lengths\",\n \"c:c1\",\n \"c:c2:lengths\",\n \"c:c2:values\",\n ]\n\nAnd for the following instance of the struct:\n\n Struct(\n a=3,\n b=[[4, 5], [6, 7, 8], [], [9]],\n c=[\n Struct(c1='alex', c2=[10, 11]),\n Struct(c1='bob', c2=[12]),\n ],\n )\n\nThe values of the fields will be:\n {\n \"a\": [3],\n \"b:lengths\": [4],\n \"b:values:lengths\": [2, 3, 0, 1],\n \"b:values:values\": [4, 5, 6, 7, 8, 9],\n \"c:lengths\": [2],\n \"c:c1\": [\"alex\", \"bob\"],\n \"c:c2:lengths\": [2, 1],\n \"c:c2:values\", [10, 11, 12],\n }\n\nIn general, every field name in the format \"{prefix}:lengths\" defines a domain\n\"{prefix}\", and every subsequent field in the format \"{prefix}:{field}\" will\nbe in that domain, and the length of the domain is provided for each entry of\nthe parent domain. In the example, \"b:lengths\" defines a domain of length 4, so\nevery field under domain \"b\" will have 4 entries.\nThe \"lengths\" field for a given domain must appear before any reference to\nthat domain.\n\nReturns a pointer to an instance of the Cursor, which keeps the current offset\non each of the domains defined by `fields`. Cursor also ensures thread-safety\nsuch that ReadNextBatch and ResetCursor can be used safely in parallel.\n\nA cursor does not contain data per se, so calls to ReadNextBatch actually need\nto pass a list of blobs containing the data to read for each one of the fields.\n", + "outputs": [ + { + "description": "A blob pointing to an instance of a new TreeCursor.", + "name": "cursor" + } + ], + "support_level": "default" + } + }, + { + "name": "UnsortedSegmentWeightedSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "HasElements", + "schema": { + "description": "\nThe *HasElements* op accepts a single or multiple input tensors, and produces a single boolean output $has\\_elements$. The output is *True* if and only if any of the input tensor has size > 0. Note, this op is the opposite of the *IsEmpty* op.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"HasElements\",\n [\"tensor\"],\n [\"has_elements\"],\n)\n\n// Use a not-empty tensor\nworkspace.FeedBlob(\"tensor\", np.random.randn(2, 2).astype(np.float32))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"has_elements: \", workspace.FetchBlob(\"has_elements\"),\"\\n\")\n\n// Use an empty tensor\nworkspace.FeedBlob(\"tensor\", np.empty(0))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"has_elements: \", workspace.FetchBlob(\"has_elements\"))\n\n```\n\n**Result**\n\n```\n\ntensor:\n [[ 0.6116506 -0.54433197]\n [ 0.19406661 -0.7338629 ]]\nhas_elements: True\n\ntensor:\n []\nhas_elements: False\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input data tensor to check for elements.", + "name": "tensor" + }, + { + "description": "List of input data tensors to check for elements.", + "name": "X1, X2, ..." + } + ], + "outputs": [ + { + "description": "Output scalar boolean tensor. True if input has size > 0.", + "name": "has_elements" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8ConvTranspose", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "\nThe transposed convolution consumes an input vector, the filter blob, and\nthe bias blob, and computes the output. Note that other parameters, such as\nthe stride and kernel size, or the pads' sizes in each direction are not\nnecessary for input because they are provided by the\nConvTransposeUnpoolOpBase operator. Various dimension checks are done\nimplicitly, and the sizes are specified in the Input docs for this operator.\nAs is expected, the filter is deconvolved with a subset of the\nimage and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nconv_transpose_op_impl.h is the templated implementation of the\nconv_transpose_op.h file, which is why they are separate files.\n ", + "inputs": [ + { + "description": "Input data blob from previous layer; has size (N x H x W x C), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that NHWC is supported now", + "name": "X" + }, + { + "description": "The filter blob that will be used in the transposed convolution; has size (M x kH x kW x C), where C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the convolution;has size (C). Optional, if not passed, will treat it as all 0.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the transposed convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LastNWindowCollector", + "schema": { + "attributes": [ + { + "description": "The number of random samples to append for each positive samples", + "name": "num_to_collect", + "option": "optional" + } + ], + "description": "\nCollect the last N rows from input data. The purpose is to keep track of data\naccross batches, so for example suppose the LastNWindowCollector is called\nsuccessively with the following input data\n\n [1, 2, 3, 4]\n [5, 6, 7]\n [8, 9, 10, 11]\n\nAnd the number of items is set to 6, then the output after the 3rd call\nwill contain the following elements:\n\n [6, 7, 8, 9, 10, 11]\n\nNo guarantee is made on the ordering of elements in input. So a valid value for\noutput could have been\n\n [11, 10, 9, 8, 7, 6]\n\nAlso, this method works for any order tensor, treating the first dimension as\ninput rows and keeping the last N rows seen as input. So for instance:\n\n [[1, 2], [2, 3], [3, 4], [4, 5]]\n [[5, 6], [6, 7], [7, 8]]\n [[8, 9], [9, 10], [10, 11], [11, 12]]\n\nA possible output would be\n\n [[6, 7], [7, 8], [8, 9], [9, 10], [10, 11], [11, 12]]\n\nThis is not thread safe unless a mutex is given.\n", + "inputs": [ + { + "description": "The buffer for last-N record. Should be initialized to empty tensor", + "name": "last-N buffer" + }, + { + "description": "The cursor pointing to the next position that should be replaced. Should be initialized to 0.", + "name": "next cursor" + }, + { + "description": "tensor to collect from", + "name": "DATA" + }, + { + "description": "(optional) mutex to use to make this thread-safe", + "name": "MUTEX" + }, + { + "description": "", + "name": "NUM_VISITED" + } + ], + "outputs": [ + { + "description": "Data stored in sessions", + "name": "last-N buffer" + }, + { + "description": "Updated input cursor", + "name": "next cursor" + }, + { + "description": "number of records seen so far", + "name": "NUM_VISITED" + } + ], + "support_level": "default" + } + }, + { + "name": "Bucketize", + "schema": { + "attributes": [ + { + "description": "bucketization boundaries", + "name": "boundaries", + "option": "optional" + } + ], + "description": "\nThis operator works as bucketize in tensorflow and digitize\nin numpy. It bucketizes the input 'X' based on argument 'boundaries'.\nFor each value x in input 'data', the operator returns index i given\nboundaries[i-1] < x <= boundaries[i].\nIf values in 'data' are beyond the bounds of boundaries, 0 or\nlen(boundaries) is returned as appropriate.\nThe boundaries need to be monotonically increasing.\nFor example\n\nIf data = [2, 4, 1] and boundaries = [0.1, 2.5], then\n\noutput = [1, 2, 1]\n\nIf data = [[2, 3], [4, 1], [2, 5]] and boundaries = [0.1, 2.5], then\n\noutput = [[1, 2], [2, 1], [1, 2]]\n\n", + "inputs": [ + { + "description": "input tensor", + "name": "data" + } + ], + "outputs": [ + { + "description": "indices of bins given by boundaries to which each valuein data belongs", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "HSoftmax", + "schema": { + "attributes": [ + { + "description": "Serialized HierarchyProto string containing list of vocabulary words and their paths from root of hierarchy to the leaf", + "name": "hierarchy", + "option": "optional" + } + ], + "description": "\nHierarchical softmax is an operator which approximates the softmax operator\nwhile giving significant training speed gains and reasonably comparable\nperformance. In this operator, instead of calculating the probabilities of all\nthe classes, we calculate the probability of each step in the path from root to\nthe target word in the hierarchy.\n\nThe operator takes a 2-D tensor (Tensor) containing a batch of layers, a\nset of parameters represented by the weight matrix and bias terms, and a 1-D\ntensor (Tensor) holding labels, or the indices of the target class. The\nhierarchy has to be specified as an argument to the operator.\n\nThe operator returns a 1-D tensor holding the computed log probability of the\ntarget class and a 2-D tensor of intermediate outputs (from the weight matrix\nand softmax from each step in the path from root to target class) which will be\nused by the gradient operator to compute gradients for all samples in the batch.\n", + "inputs": [ + { + "description": "Input data from previous layer", + "name": "X" + }, + { + "description": "2D blob containing 'stacked' fully connected weight matrices. Each node in the hierarchy contributes one FC weight matrix if it has children nodes. Dimension is N*D, D is input dimension of data (X), N is sum of all output dimensions, or total number of nodes (excl root)", + "name": "W" + }, + { + "description": "1D blob with N parameters", + "name": "b" + }, + { + "description": "int word_id of the target word", + "name": "labels" + } + ], + "outputs": [ + { + "description": "1-D of log probability outputs, one per sample", + "name": "Y" + }, + { + "description": "Extra blob to store the intermediate FC and softmax outputs for each node in the hierarchical path of a word. The outputs from samples are stored in consecutive blocks in the forward pass and are used in reverse order in the backward gradientOp pass", + "name": "intermediate_output" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceFrontWeightedSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SpatialSoftmaxWithLoss", + "schema": { + "description": "\nCombined Spatial Softmax and Cross-Entropy loss operator.\nSimilar to SoftmaxWithLoss, this operator computes the spatial softmax\nnormalized values for each layer in the batch of the given input, after which\ncross-entropy loss is computed. This operator is numerically more stable than\nseparate Softmax and CrossEntropy ops. The inputs are a 2-D tensor\n(Tensor) of size (batch_size x input_feature_dimensions) and tensor of\nlabels (ground truth).\nOutput is tensor with the probability for each label in a pixel for each example\n(N x D x W x H) and averaged loss (scalar).\nFor spatial softmax, weighting is by x,y position of the input.\n", + "inputs": [ + { + "description": "Unscaled log probabilities", + "name": "logits" + }, + { + "description": "Ground truth", + "name": "labels" + }, + { + "description": "Optional blob to be used to weight the samples for the loss. With spatial set, weighting is by x,y of the input", + "name": "weight_tensor" + } + ], + "outputs": [ + { + "description": "Tensor with softmax cross entropy loss", + "name": "softmax" + }, + { + "description": "Average loss", + "name": "loss" + } + ], + "support_level": "default" + } + }, + { + "name": "SafeDequeueBlobs", + "schema": { + "attributes": [ + { + "description": "(default 1) If > 1, multiple records will be dequeued and tensors for each column will be concatenated. This requires all tensors in the records to be at least 1D, and to have the same inner dimensions.", + "name": "num_records", + "option": "optional" + } + ], + "description": "\nDequeue the blobs from queue. When the queue is closed and empty, the output\nstatus will be set to true which can be used as exit criteria for execution\nstep.\nThe 1st input is the queue and the last output is the status. The rest are\ndata blobs.\n", + "inputs": [ + { + "description": "The shared pointer for the BlobsQueue", + "name": "queue" + } + ], + "outputs": [ + { + "description": "The blob to store the dequeued data", + "name": "blob" + }, + { + "description": "Is set to 0/1 depending on the success of dequeue", + "name": "status" + } + ], + "support_level": "default" + } + }, + { + "name": "Copy", + "schema": { + "description": "\nCopy input tensor into output, potentially across devices.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/copy_op.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/copy_op.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Copy\",\n [\"input\"],\n [\"output\"]\n)\n\nworkspace.FeedBlob(\"input\", np.random.rand(3,3))\nprint(\"input:\", workspace.FetchBlob(\"input\"))\nworkspace.RunOperatorOnce(op)\nprint(\"output:\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n[[0.16826761 0.68168217 0.55196001]\n [0.19735483 0.34837823 0.69015595]\n [0.09448514 0.57390828 0.37097193]]\noutput:\n[[0.16826761 0.68168217 0.55196001]\n [0.19735483 0.34837823 0.69015595]\n [0.09448514 0.57390828 0.37097193]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor*): input tensor to copy", + "name": "input" + } + ], + "outputs": [ + { + "description": "(*Tensor*): copy of input tensor", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "AveragePool2DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SortedSegmentRangeLogMeanExp", + "schema": { + "description": "\nApplies 'LogMeanExp' to each segment of input tensor. In order to allow for more\nefficient implementation of 'LogMeanExp', the input segments have to be contiguous\nand non-empty.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nLogMeanExp computes the element-wise log of the mean of exponentials of input slices. Operation doesn't change the shape of individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor to be aggregated", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor with the first dimension of K and the other dimentsions inherited from DATA", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "MergeMultiListFeatureTensors", + "schema": { + "description": "Merge given multi-feature tensors with list features into one.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".keys", + "name": "in1_keys" + }, + { + "description": ".values.lengths", + "name": "in1_values_lengths" + }, + { + "description": ".values.values", + "name": "in1_values_values" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values.lengths", + "name": "out_values_lengths" + }, + { + "description": ".values.values", + "name": "out_values_values" + } + ], + "support_level": "default" + } + }, + { + "name": "CountUp", + "schema": { + "description": "\nIncreases count value by 1 and outputs the previous value atomically.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.", + "name": "counter" + } + ], + "outputs": [ + { + "description": "*(type: int)* Count value BEFORE this operation.", + "name": "previous_count" + } + ], + "support_level": "default" + } + }, + { + "name": "MergeSingleScalarFeatureTensorsGradient", + "schema": { + "description": "Explode multi-feature tensor of scalar features into one or moresingle-feature tensors\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".presence", + "name": "in1_presence" + }, + { + "description": ".values_grad", + "name": ".values_grad" + } + ], + "outputs": [ + { + "description": "_grad of inputs", + "name": "in1_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "GFtrl", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Acos", + "schema": { + "description": "\nCalculates the arccosine of the given input tensor, element-wise.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The arccosine of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SoftsignGradient", + "schema": { + "description": "\nCalculates the softsign gradient (sgn(x)/(1+|x|)^2) of the given input tensor\nelement-wise.\n", + "inputs": [ + { + "description": "1-D input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The softsign gradient (sgn(x)/(1+|x|)^2) values of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "MergeDim", + "schema": { + "description": "\nMerge first two dimensions in a single dimension with size dim(0) * dim(1).\n", + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reshaped tensor.", + "name": "reshaped" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8ResizeNearest", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + }, + { + "description": "Output dimensions (HxW). If specified this takes precedence over scale values.", + "name": "output_size", + "option": "optional" + } + ], + "description": "\nResizes the spatial dimensions of the input using nearest neighbor\ninterpolation. The `width_scale` and `height_scale` arguments\ncontrol the size of the output, which is given by:\noutput_width = floor(input_width * width_scale)\noutput_height = floor(output_height * height_scale)\n", + "inputs": [ + { + "description": "Input Int8 tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output Int8 tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LC", + "schema": { + "description": "\nThe locally connected operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n", + "inputs": [ + { + "description": null, + "name": null + }, + { + "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "AcosGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "IndexHash", + "schema": { + "attributes": [ + { + "description": "seed for the hash function", + "name": "seed", + "option": "optional" + }, + { + "description": "must be > 0, hashed ids will be modulo this number", + "name": "modulo", + "option": "optional" + } + ], + "description": "\nThis operator translates a list of indices into a list of hashed indices.\nA seed can be fed as an argument to change the behavior of the hash function.\nIf a modulo is specified, all the hashed indices will be modulo the\nspecified number. All input and output indices are enforced to be positive.\n", + "inputs": [ + { + "description": "Input feature indices.", + "name": "Indices" + } + ], + "outputs": [ + { + "description": "Hashed feature indices.", + "name": "HashedIndices" + } + ], + "support_level": "default" + } + }, + { + "name": "GroupNorm", + "schema": { + "attributes": [ + { + "description": "(int) default 32; number of groups used by GN.", + "name": "num_groups", + "option": "optional" + }, + { + "description": "(float) default 1e-5; small constant added to var.", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\nGroup Normalization (GN) operation: https://arxiv.org/abs/1803.08494\n", + "inputs": [ + { + "description": ">=4D feature map input of shape (N, C, H, W) or (N, C, T, H, W)", + "name": "X" + }, + { + "description": "The scale as a 1-dimensional tensor of size C to be applied to the output.", + "name": "gamma" + }, + { + "description": "The bias as a 1-dimensional tensor of size C to be applied to the output.", + "name": "beta" + } + ], + "outputs": [ + { + "description": "The output >=4-dimensional tensor of the same shape as X.", + "name": "Y" + }, + { + "description": "The mean of shape (N, G). For backward usage or reference. Cannot be used as activations.", + "name": "mean" + }, + { + "description": "The std of shape (N, G). For backward usage or reference. Cannot be used as activations.", + "name": "std" + } + ], + "support_level": "default" + } + }, + { + "name": "ChannelShuffleGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "DestroyCommonWorld", + "schema": { + "description": "Closes all connections managed by a common world.", + "inputs": [ + { + "description": "The common world to be destroyed.", + "name": "common_world" + } + ], + "support_level": "default" + } + }, + { + "name": "Floor", + "schema": { + "description": "\nElement-wise application of the floor function ($y=floor(x)$) to the input\ntensor `X`. Output tensor shape is the same as the input tensor. This\noperator can be used in an in-place fashion by using the same input blob as the\noutput blob.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/floor_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Floor\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.uniform(-10, 10, (5,5))).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[ 3.813361 -1.319647 5.2089314 -4.931328 0.6218652 ]\n [ 7.2757645 5.5552588 5.785643 -2.4790506 -0.41400087]\n [ 1.1541046 -6.933266 3.3754056 1.6569928 -1.7670316 ]\n [-3.4932013 4.891472 1.5530115 -3.2443287 -4.605099 ]\n [-4.574543 -7.360948 5.91305 -8.196495 -5.357458 ]]\nX after running op:\n[[ 3. -2. 5. -5. 0.]\n [ 7. 5. 5. -3. -1.]\n [ 1. -7. 3. 1. -2.]\n [-4. 4. 1. -4. -5.]\n [-5. -8. 5. -9. -6.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsPartition", + "schema": { + "attributes": [ + { + "description": "(int, default 0) If set, the operator transforms the first tensor values as floor(X_ij / num_partitions)", + "name": "pack_first_input", + "option": "optional" + } + ], + "description": "\nLengthsPartition splits the input int tensor into multiple ones according to the\nsecond tensor. The first dimension is expected to be the tensor that describes\nlengths of the elements.\n\nTakes the second input and partitions it to shards according to the remainder of\nvalues modulo the number of partitions. It requires the second tensor to be\na 1D-tensor of the integral type. The first tensor should be 1D-tensor of int32\nthat would represent the lengths of the elements in the input. The number of\npartitions is derived as (num_output / num_input).\n\nIf additional inputs are present they must have the same shape as the first\ninput, optionally with extra trailing dimensions. They will be partitioned\naccordingly to the first input.\n\nOptional arg 'pack_first_input' transforms the first tensor values as\nX_ij / num_partitions.\n\nOutputs are ordered as\nX_0_part_0, X_1_part_0, ..., X_N-1_part_0, X_0_part_1, ..., X_N-1_part_K-1\n", + "inputs": [ + { + "description": "Input tensor containing data to be partitioned. The number of input tensors might be greater than 1 but must have the same shape as the previous tensors.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output Partitions. The number of output tensors has to be a multiple of the number of input tensors.", + "name": "partitions" + } + ], + "support_level": "default" + } + }, + { + "name": "CreateCounter", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Initial count for the counter, must be >= 0.", + "name": "init_count", + "option": "optional", + "type": "int64" + } + ], + "description": "\nCreates a count-down counter with initial value specified by the `init_count`\nargument.\n\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "outputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a new counter.", + "name": "counter" + } + ], + "support_level": "default" + } + }, + { + "name": "MarginRankingCriterion", + "schema": { + "attributes": [ + { + "description": "The margin value as a float. Default is 1.0.", + "name": "margin", + "option": "optional" + } + ], + "description": "\nMarginRankingCriterion takes two input data X1 (Tensor),\nX2 (Tensor), and label Y (Tensor) to produce the\nloss (Tensor) where the loss function,\nloss(X1, X2, Y) = max(0, -Y * (X1 - X2) + margin), is applied to\nthe tensor elementwise.\n\nIf y == 1 then it assumed the first input should be ranked higher\n(have a larger value) than the second input, and vice-versa for\ny == -1.\n", + "inputs": [ + { + "description": "The left input vector as a 1-dim TensorCPU.", + "name": "X1" + }, + { + "description": "The right input vector as a 1-dim TensorCPU.", + "name": "X2" + }, + { + "description": "The label as a 1-dim TensorCPU with int value of 1 or -1.", + "name": "Y" + } + ], + "outputs": [ + { + "description": "The output loss with the same dimensionality as X1.", + "name": "loss" + } + ], + "support_level": "default" + } + }, + { + "name": "MergeIdLists", + "schema": { + "description": "\nMergeIdLists: Merge multiple ID_LISTs into a single ID_LIST.\n\nAn ID_LIST is a list of IDs (may be ints, often longs) that represents a single\nfeature. As described in https://caffe2.ai/docs/sparse-operations.html, a batch\nof ID_LIST examples is represented as a pair of lengths and values where the\n`lengths` (int32) segment the `values` or ids (int32/int64) into examples.\n\nGiven multiple inputs of the form lengths_0, values_0, lengths_1, values_1, ...\nwhich correspond to lengths and values of ID_LISTs of different features, this\noperator produces a merged ID_LIST that combines the ID_LIST features. The\nfinal merged output is described by a lengths and values vector.\n\nWARNING: The merge makes no guarantee about the relative order of ID_LISTs\nwithin a batch. This can be an issue if ID_LIST are order sensitive.\n", + "inputs": [ + { + "description": "Lengths of the ID_LISTs batch for first feature", + "name": "lengths_0" + }, + { + "description": "Values of the ID_LISTs batch for first feature", + "name": "values_0" + } + ], + "outputs": [ + { + "description": "Lengths of the merged ID_LISTs batch", + "name": "merged_lengths" + }, + { + "description": "Values of the merged ID_LISTs batch", + "name": "merged_values" + } + ], + "support_level": "default" + } + }, + { + "name": "SumElements", + "schema": { + "attributes": [ + { + "description": "(*bool*): set to True to compute the average of the elements rather than the sum", + "name": "average", + "option": "optional" + } + ], + "description": "\nSums the elements of the input tensor. Tensor type must be float32.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nsum_op = core.CreateOperator(\n \"SumElements\",\n [\"X\"],\n [\"Y\"]\n)\n\navg_op = core.CreateOperator(\n \"SumElements\",\n [\"X\"],\n [\"Y\"],\n average=True\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(3,3)).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(sum_op)\nprint(\"Y (sum_op):\", workspace.FetchBlob(\"Y\"))\nworkspace.RunOperatorOnce(avg_op)\nprint(\"Y (avg_op):\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[7. 2. 5.]\n [9. 4. 2.]\n [1. 2. 5.]]\nY (sum_op): 37.0\nY (avg_op): 4.111111\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): blob pointing to an instance of a counter", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): Scalar tensor containing the sum (or average)", + "name": "sum" + } + ], + "support_level": "default" + } + }, + { + "name": "ThresholdedReluGradient", + "schema": { + "description": "\nThresholdedReluGradient takes both Y and dY and uses this to update dX\naccording to the chain rule and derivatives of the rectified linear function.\n", + "support_level": "default" + } + }, + { + "name": "GivenTensorInt64Fill", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseLengthsSum", + "schema": { + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Sum' to each segment. Segments are defined by their LENGTHS.\n\nThis op is basically Gather and LengthsSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nLENGTHS is a vector that defines slice sizes by first dimension of DATA. Values\nbelonging to the same segment are aggregated together. sum(LENGTHS) has\nto match INDICES size.\n\nThe first dimension of the output is equal to the number of input segment,\ni.e. `len(LENGTHS)`. Other dimensions are inherited from the input tensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Non negative vector with sum of elements equal to INDICES length", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "CountDown", + "schema": { + "description": "\nIf the internal count value > 0, decreases count value by 1 and outputs False,\notherwise outputs True.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.", + "name": "counter" + } + ], + "outputs": [ + { + "description": "*(type: bool)* False unless the internal count is zero.", + "name": "done" + } + ], + "support_level": "default" + } + }, + { + "name": "ReciprocalGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Sqr", + "schema": { + "description": "\nPerforms element-wise squaring ($x^2$) of input tensor.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sqr_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sqr\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[4. 6. 2.]\n [0. 1. 6.]\n [9. 2. 7.]]\nY:\n[[16. 36. 4.]\n [ 0. 1. 36.]\n [81. 4. 49.]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "StoreWait", + "schema": { + "attributes": [ + { + "description": "names of the blobs to wait for (optional)", + "name": "blob_names", + "option": "optional" + } + ], + "description": "\nWait for the specified blob names to be set. The blob names can be passed\neither as an input blob with blob names or as an argument.\n", + "inputs": [ + { + "description": "unique_ptr", + "name": "handler" + }, + { + "description": "names of the blobs to wait for (optional)", + "name": "names" + } + ], + "support_level": "default" + } + }, + { + "name": "ColwiseMax", + "schema": { + "description": "\nCompute column-wise max reduction of the input tensor. This op takes one input, $X$, of shape $BxMxN$, where $B$ is the batch size, $M$ is number of rows, and $N$ is number of columns. The output of this op, $Y$, is a matrix of shape $BxN$, with one row for each element of the batch, and the same number of columns as the input tensor.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ColwiseMax\",\n [\"X\"],\n [\"Y\"]\n)\n\n// Create X, simulating a batch of 2, 4x4 matricies\nX = np.random.randint(0,high=20,size=(2,4,4))\nprint(\"X:\\n\",X)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[[17 15 2 6]\n [ 8 12 6 0]\n [ 6 9 7 3]\n [ 4 13 16 13]]\n\n [[ 0 3 4 12]\n [18 1 17 12]\n [ 7 17 13 14]\n [12 17 2 1]]]\nY:\n [[17. 15. 16. 13.]\n [18. 17. 17. 14.]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "A tensor of dimensions $B x M x N$ to compute columnwise-max. Here, $B$ is batch size, and $M$ and $N$ are the number of rows and columns of each element of the batch, respectively.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The output tensor of shape $B x N$, where each row represents the column-wise maximums for that element of the input batch.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LogFatal", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "StringIndexCreate", + "schema": { + "attributes": [ + { + "description": "Max number of elements, including the zero entry.", + "name": "max_elements", + "option": "optional" + } + ], + "description": "\nCreates a dictionary that maps string keys to consecutive integers\nfrom 1 to max_elements. Zero is reserved for unknown keys.\n", + "outputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + } + ], + "support_level": "default" + } + }, + { + "name": "CopyRowsToTensor", + "schema": { + "description": "\n This operator takes in a 2d tensor, a list of indices, and a 1d tensor\n with the same width of the 2d tensor. It will replace the rows in 2d\n tensor specified in indices with the 2d tensor. The operator does an\n in-place change to the input tensor.\n Example:\n INPUT_TENSOR = [[1, 2], [3, 4], [5, 6]]\n INDICES = [1]\n ROW = [9, 0]\n OUTPUT_TENSOR = [[1, 2], [9, 0], [5, 6]]\n ", + "inputs": [ + { + "description": "Input tensor needs to be modified.", + "name": "input_tensor" + }, + { + "description": "Indices of rows need to be copied", + "name": "indices" + }, + { + "description": "1-d tensor that is going to replace the rows", + "name": "row" + } + ], + "outputs": [ + { + "description": "updated tensor", + "name": "output_tensor" + } + ], + "support_level": "default" + } + }, + { + "name": "MakeTwoClass", + "schema": { + "description": "\nGiven a vector of probabilities, this operator transforms this into a 2-column\n matrix with complimentary probabilities for binary classification. In explicit\n terms, given the vector X, the output Y is vstack(1 - X, X).\n ", + "inputs": [ + { + "description": "Input vector of probabilities", + "name": "X" + } + ], + "outputs": [ + { + "description": "2-column matrix with complimentary probabilities of X for binary classification", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Snapshot", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "NegateGradient", + "schema": { + "description": "\nNegagteGradient operator in forward pass simply copies input to the\noutput, and in backward pass, flips the sign of the output gradient\n", + "support_level": "default" + } + }, + { + "name": "Not", + "schema": { + "description": "\nPerforms element-wise negation on input tensor `X`.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n\"Not\",\n[\"X\"],\n[\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3, 3) > 0.5))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[ True False False]\n[False False False]\n[ True True True]]\nY:\n[[False True True]\n[ True True True]\n[False False False]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(Tensor``)* Negated output tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "PrependDim", + "schema": { + "attributes": [ + { + "description": "Size of the dimension to prepend.", + "name": "dim_size", + "option": "optional" + } + ], + "description": "\nReshape the tensor by prepending a dimension of fixed size and dividing the\nsize of the next dimension by that amount.\n", + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reshaped tensor.", + "name": "reshaped" + } + ], + "support_level": "default" + } + }, + { + "name": "SendTensor", + "schema": { + "attributes": [ + { + "description": "The rank to send the tensor to.", + "name": "dst", + "option": "optional" + }, + { + "description": "(int) a tag to send the tensor with.", + "name": "tag", + "option": "optional" + }, + { + "description": "(bool) if set, only send the content and assume that the receiver has already known the tensor's shape and information.", + "name": "raw_buffer", + "option": "optional" + } + ], + "description": "\nSends the tensor to another node.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be allgathered.", + "name": "X" + }, + { + "description": "An int CPUtensor of size 1 specifying the rank. If given, this overrides the 'to' argument of the op.", + "name": "dst" + }, + { + "description": "An int CPUtensor of size 1 specifying the tag to send the tensor with. This overrides the 'tag' argument of the op.", + "name": "tag" + } + ], + "support_level": "default" + } + }, + { + "name": "InferenceLSTM", + "schema": { + "attributes": [ + { + "description": "(*long*): number of layers in the lstm stack", + "name": "num_layers", + "option": "optional" + }, + { + "description": "(*bool*): whether the cells have biases or not", + "name": "has_biases", + "option": "optional" + }, + { + "description": "(*bool*): whether the batch is at dim 0", + "name": "batch_first", + "option": "optional" + }, + { + "description": "(*bool*): if bidirectional", + "name": "bidirectional", + "option": "optional" + } + ], + "description": null, + "outputs": [ + { + "description": "the output of the last layer of lstm", + "name": "output" + }, + { + "description": "hidden state at t = seq_len", + "name": "hidden" + }, + { + "description": "cell state at t = seq_len", + "name": "cell" + } + ], + "support_level": "default" + } + }, + { + "name": "SumElementsInt", + "schema": { + "description": "Sums the integer elements of the input tensor.", + "inputs": [ + { + "description": "Tensor to sum up", + "name": "X" + } + ], + "outputs": [ + { + "description": "Scalar sum", + "name": "sum" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseAdagrad", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\nGiven inputs (param, moment, indices, grad, lr), runs the dense AdaGrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment_1" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8AveragePoolRelu", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "AveragePool \nconsumes an input blob X and applies average pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Average pooling consisting of averaging all values of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data tensor from average pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Output will go through rectified linear function, where y = max(0, x).", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SegmentIdsToRanges", + "schema": { + "description": "\nTransfers a vector of segment ids to a vector of segment ranges. This operation\nsupports non-consecutive segment ids. Segments not appearing in the input vector\nwill have length 0. If the second input is provided, the number of segments =\nthe size of its first dimension. Otherwise, the number of segments = the last\nindex in the first input vector + 1.\n", + "inputs": [ + { + "description": "1-D int32_t or int64_t tensor of segment ids", + "name": "segment_ids" + }, + { + "description": "if provided, number of segments = the size of its first dimension", + "name": "data (optional)" + } + ], + "outputs": [ + { + "description": "1-D int64_t tensor of segment lengths", + "name": "lengths" + } + ], + "support_level": "default" + } + }, + { + "name": "CreateCommonWorld", + "schema": { + "attributes": [ + { + "description": "(int) size of the common world.", + "name": "size", + "option": "optional" + }, + { + "description": "(int) rank of this node in the common world.", + "name": "rank", + "option": "optional" + } + ], + "description": "\nCreates a common world for communication operators.\n", + "inputs": [ + { + "description": "Key/value handler for rendezvous (optional).", + "name": "kv_handler" + } + ], + "outputs": [ + { + "description": "A common world for collective operations.", + "name": "comm_world" + } + ], + "support_level": "default" + } + }, + { + "name": "SliceGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseUnsortedSegmentWeightedSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "DBExists", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If set to non-zero, save the db directly to the path specified by the `db` arg. If not set (default), prepend the path of the current root folder of the workspace to the path specified by the `db` arg.", + "name": "absolute_path", + "option": "optional", + "type": "int64" + }, + { + "description": "Path to the db in question; see the `absolute_path` arg details for options regarding the current root folder of the workspace.", + "name": "db_name", + "option": "optional", + "type": "string" + }, + { + "description": "Type of db to save (options: \"lmdb\", \"leveldb\", \"minidb\").", + "name": "db_type", + "option": "optional", + "type": "string" + } + ], + "description": "\nChecks if the db described by the arguments exists.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/load_save_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"DBExists\",\n [],\n [\"exists\"],\n db_name=\"test_db\",\n db_type=\"leveldb\",\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"exists:\", workspace.FetchBlob(\"exists\"))\n\n```\n\n
\n\n", + "outputs": [ + { + "description": "*(type: Tensor``)* Scalar boolean output tensor. True if the db exists, else false.", + "name": "exists" + } + ], + "support_level": "default" + } + }, + { + "name": "ReceiveTensor", + "schema": { + "attributes": [ + { + "description": "(int) he rank to receive the tensor from.", + "name": "src", + "option": "optional" + }, + { + "description": "(int) a tag to receive the tensor with.", + "name": "tag", + "option": "optional" + }, + { + "description": "(bool) if set, only send the content and assume that the receiver has already known the tensor's shape and information.", + "name": "raw_buffer", + "option": "optional" + } + ], + "description": "\nReceives the tensor from another node.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "In-place output. If raw_buffer is specified, Y should have pre-allocated data and type..", + "name": "Y" + }, + { + "description": "An int CPUtensor of size 1 specifying the rank. If given, this overrides the 'from' argument of the op.", + "name": "src" + }, + { + "description": "An int CPUtensor of size 1 specifying the tag to send the tensor with. This overrides the 'tag' argument of the op.", + "name": "tag" + } + ], + "outputs": [ + { + "description": "The received tensor.", + "name": "Y" + }, + { + "description": "The sender that sent the message as a CPUTensor of size 1 and of type int.", + "name": "src" + }, + { + "description": "The tag that the message is sent with as a CPUTensor of size 1 and of type int.", + "name": "tag" + } + ], + "support_level": "default" + } + }, + { + "name": "SquaredL2DistanceGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Swish", + "schema": { + "description": "\nSwish takes one input data (Tensor) and produces one output data\n(Tensor) where the swish function, y = x / (1 + exp(-x)), is applied to the\ntensor elementwise.\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "BatchGatherGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LearningRate", + "schema": { + "attributes": [ + { + "description": "(float, required) base learning rate", + "name": "base_lr", + "option": "optional" + }, + { + "description": "(float, default 1.0) strategy for gamma enforcement", + "name": "policy", + "option": "optional" + }, + { + "description": "(float, default 1.0) used only for inv policy type", + "name": "power", + "option": "optional" + }, + { + "description": "(float, default 1.0) momentum of change", + "name": "gamma", + "option": "optional" + }, + { + "description": "(float, default 1.0) sampling rate on iterations", + "name": "stepsize", + "option": "optional" + }, + { + "description": "(boolean, default True) in alter policy", + "name": "active_first", + "option": "optional" + }, + { + "description": "(int64_t, required) in alter policy", + "name": "active_period", + "option": "optional" + }, + { + "description": "(int64_t, required) in alter policy", + "name": "inactive_period", + "option": "optional" + }, + { + "description": "(int, default -1) maximum iterations in this training run", + "name": "max_iter", + "option": "optional" + }, + { + "description": "(int, default 0) number of iterations over which to warmup lr", + "name": "num_iter", + "option": "optional" + }, + { + "description": "(float, default 0) starting multiplier for learning rate", + "name": "start_multiplier", + "option": "optional" + }, + { + "description": "(float, default 0) end multiplier for learning rate", + "name": "end_multiplier", + "option": "optional" + }, + { + "description": "(float, default 0.5) constant multiplier for learning rate", + "name": "multiplier", + "option": "optional" + }, + { + "description": "(float, default 1) start multiplier for learning rate", + "name": "multiplier_1", + "option": "optional" + }, + { + "description": "(float, default 1) end multiplier for learning rate", + "name": "multiplier_2", + "option": "optional" + }, + { + "description": "(int array, default empty) number of iterations for each sub learning rate policy in composite policy", + "name": "sub_policy_num_iters", + "option": "optional" + }, + { + "description": "", + "name": "m1", + "option": "optional" + }, + { + "description": "", + "name": "n1", + "option": "optional" + }, + { + "description": "", + "name": "m2", + "option": "optional" + }, + { + "description": "", + "name": "n2", + "option": "optional" + }, + { + "description": "", + "name": "m3", + "option": "optional" + }, + { + "description": "(float, default 0.005) max learning rate", + "name": "max_lr", + "option": "optional" + }, + { + "description": "defaults to 0.1", + "name": "start_warmup_multiplier", + "option": "optional" + }, + { + "description": "defaults to 10000000", + "name": "constant_warmup_num_iter", + "option": "optional" + }, + { + "description": "defaults to 10000000", + "name": "linear_warmup_num_iter", + "option": "optional" + }, + { + "description": "defaults to 0.05, part of CompositeCyclicalLRPolicy", + "name": "cyclical_max_lr", + "option": "optional" + }, + { + "description": "defaults to 1000000, part of CompositeCyclicalLRPolicy", + "name": "cyclical_step_size", + "option": "optional" + }, + { + "description": "defaults to 0.999, part of CompositeCyclicalLRPolicy", + "name": "cyclical_decay", + "option": "optional" + }, + { + "description": "defaults to 0.01, part of CompositeCosineLRPolicy", + "name": "cosine_min_lr", + "option": "optional" + }, + { + "description": "defaults to 0.05, part of CompositeCosineLRPolicy", + "name": "cosine_max_lr", + "option": "optional" + }, + { + "description": "defaults to 50, part of CompositeCosineLRPolicy", + "name": "cosine_period", + "option": "optional" + }, + { + "description": "defaults to 1,0, part of CompositeCosineLRPolicy", + "name": "cosine_t_mult", + "option": "optional" + }, + { + "description": "defaults to 0.99, part of CompositeCosineLRPolicy", + "name": "cosine_lr_shrink", + "option": "optional" + } + ], + "description": "\nLearning rate is a decreasing function of time. With low learning rates the\nimprovements will be linear. With high learning rates they will start to look\nmore exponential. Learning rate is controlled by the following arguments:\n\n\nRequired:\n `iterations`\n `base_lr`: base learning rate\n `policy`: this controls how the learning rate is applied, options are:\n `fixed`\n `step`: uses `stepsize`, `gamma`\n `exp`: uses `gamma`\n `gate`: uses 'multiplier_1', 'multiplier_2', `num_iter``\n `inv`: uses `gamma`, `power`\n `linearWarmup`: uses `start_multiplier`, `num_iter`\n `constantWarmup`: uses `multiplier`, `num_iter`\n `alter`: uses `active_first`, `active_period`, `inactive_period`\n `hill`: uses those in both `linearWarmup` and `inv`, plus `end_multiplier`\n `composite`: uses `sub_policy_num_iters` and additional args with format\n `cyclic`: uses `max_lr`, `stepsize`\n `cosine`: uses `min_lr`, `max_lr`, `period`, `t_mult`, `lr_shrink`\n `constantThenLinearWarmup`: uses `start_warmup_multiplier`, `constant_warmup_num_iter`, `linear_warmup_num_iter`\n `compositeCyclical`: uses `start_warmup_multiplier`, `constant_warmup_num_iter`, `linear_warmup_num_iter`, `cyclical_max_lr`, `cyclical_step_size`, `cyclical_decay`\n `compositeCosine`: uses `start_warmup_multiplier`, `constant_warmup_num_iter`, `linear_warmup_num_iter`, `cosine_max_lr`, `cosine_period`, `cosine_t_mult`, `cosine_lr_shrink`\n sub_policy_{sub_policy_index}_{sub_policy_arg}, for example:\n sub_policy_0_policy: \"exp\", sub_policy_0_gamma: 0.99,\n sub_policy_0_lr_scale: 1.2\n sub_policy_0_policy: \"fixed\", sub_policy_0_lr_scale: 1.0\n sub_policy_num_iters: [1000, 1000]\n\nOptional:\n `stepsize`: defaults to 0\n `max_lr`: defaults to 0.005\n `gamma`: defaults to 0\n `power`: defaults to 0\n `num_iter`: defaults to 0\n `start_multiplier`: defaults to 0\n `multiplier`: defaults to 0.5\n `multiplier_1`: defaults to 1\n `multiplier_2`: defaults to 1\n `m1`: defaults to 0.5, the first piece lr of piece warmup\n `n1`: defaults to 0, iter threshold of the first piece lr\n `m2`: defaults to 0.5, the second piece lr of piece warmup\n `n2`: defaults to 0, iter threshold of the second piece lr\n `m3`: defaults to 0.5, the third piece lr of piece warmup\n `start_warmup_multiplier`: defaults to 0.1, part of constantThenLinearWarmup\n `constant_warmup_num_iter`: defaults to 10000000, part of constantThenLinearWarmup and constantThenLinearWarmup\n `linear_warmup_num_iter`: defaults to 10000000, part of constantThenLinearWarmup, CompositeCyclicalLRPolicy, CompositeCosineLRPolicy\n `cyclical_max_lr`: defaults to 0.05, part of CompositeCyclicalLRPolicy\n `cyclical_step_size`: defaults to 1000000, part of CompositeCyclicalLRPolicy\n `cyclical_decay`: defaults to 1.0, part of CompositeCyclicalLRPolicy\n `cosine_min_lr`:defaults to 0.01, part of CompositeCosineLRPolicy\n `cosine_max_lr`:defaults to 0.05, part of CompositeCosineLRPolicy\n `cosine_period`:defaults to 50, part of CompositeCosineLRPolicy\n `cosine_t_mult`:defaults to 1.0, part of CompositeCosineLRPolicy\n `cosine_lr_shrink`:defaults to 0.99, part of CompositeCosineLRPolicy\n\nUsage:\n train_net.LearningRate(*iterations*, \"*label*\", base_lr=*float*,\n policy=\"policy_name\", stepsize=*int*, gamma=*float*)\n\n\nExample usage:\n train_net.LearningRate(200, \"LR\", base_lr=-0.1,\n policy=\"step\", stepsize=20, gamma=0.9)\n", + "inputs": [ + { + "description": "description needed", + "name": "input" + } + ], + "outputs": [ + { + "description": "description needed", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "PReluGradient", + "schema": { + "description": "\n\nPReluGradient takes both Y and dY and uses this to update dX and dW according\nto the chain rule and derivatives of the rectified linear function.\n\n", + "support_level": "default" + } + }, + { + "name": "LengthsMean", + "schema": { + "description": "\nApplies 'Mean' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'Mean' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n\n\nThe *LengthsMean* op takes two inputs *DATA* and *LENGTHS*, and produces a single output *OUTPUT*. The op finds the mean value in each of the segments of *DATA*, where segments are defined by their lengths.\nFor example, if $DATA = [2,4,3,1,2,10]$ and $LENGTHS = [2,3,1]$ then $OUTPUT = [mean([2,4]), mean([3,1,2]), mean([10])] = [3,2,10]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsMean\",\n [\"DATA\", \"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [ 3. 2. 10.]\n\n```\n\n
\n\n\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "Conv3DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CheckCounterDone", + "schema": { + "description": "\nIf the internal count value <= 0, outputs true, otherwise outputs false.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.", + "name": "counter" + } + ], + "outputs": [ + { + "description": "*(type: bool)* True if the internal count is zero or negative, otherwise False.", + "name": "done" + } + ], + "support_level": "default" + } + }, + { + "name": "LabelCrossEntropyGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Adam", + "schema": { + "attributes": [ + { + "description": "Default 0.9", + "name": "beta1", + "option": "optional" + }, + { + "description": "Default 0.999", + "name": "beta2", + "option": "optional" + }, + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\nComputes the Adam update (https://arxiv.org/abs/1412.6980) for an\ninput gradient and momentum parameters. Concretely, given inputs\n(param, m1, m2, grad, lr, iters),\n\n t = iters + 1\n correction_multiplier = sqrt(1 - power(beta2, t)) /\n (1 - power(beta1, t))\n m1_o = (beta1 * m1) + (1 - beta1) * grad\n m2_o = (beta2 * m2) + (1 - beta2) * np.square(grad)\n grad_o = correction_multiplier * m1_o / \\\n (sqrt(m2_o) + epsilon)\n param_o = param + lr * grad_o\n\nand returns (param_o, m1_o, m2_o, grad_o), in which grad_o is an optional output\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "First moment history", + "name": "moment_1" + }, + { + "description": "Second moment history", + "name": "moment_2" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "iteration number", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated first moment", + "name": "output_moment_1" + }, + { + "description": "Updated second moment", + "name": "output_moment_2" + }, + { + "description": "Optional Effective gradient", + "name": "output_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "FCGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "IsNaN", + "schema": { + "description": "Returns a new tensor with boolean elements representing if each element is NaN or not.", + "inputs": [ + { + "description": "Tensor to check for nan", + "name": "tensor" + } + ], + "outputs": [ + { + "description": "Tensor containing a 1 at each location of NaN elements.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsWeightedSumWithMainInputGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ReduceBackSum", + "schema": { + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "description": "\nReduces the input tensor along the last dimension of the by applying **sum**.\n\nCan reduce more than one of the \"last\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the sum operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_0 * d_1 * d_2 * ... * d_{n-1})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{n-1}$ dimension.\n\nFor example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1]$, then $Y = [sum(1,5), sum(4,1,8), sum(2)] = [6, 13, 2]$\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_sum_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceBackSum\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[2. 7. 7.]\n [1. 1. 0.]\n [9. 7. 2.]]\n\n [[6. 6. 4.]\n [1. 2. 6.]\n [6. 6. 3.]]]]\nY: [[36. 40.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LogitGradient", + "schema": { + "attributes": [ + { + "description": "small positive epsilon value, the default is 1e-6.", + "name": "eps", + "option": "optional" + } + ], + "description": null, + "inputs": [ + { + "description": "input float tensor", + "name": "X" + }, + { + "description": "input float tensor", + "name": "dY" + } + ], + "outputs": [ + { + "description": "output float tensor", + "name": "dX" + } + ], + "support_level": "default" + } + }, + { + "name": "RowwiseMax", + "schema": { + "description": "\nCompute row-wise max reduction of the input tensor. This op takes one input, $X$, of shape $BxMxN$, where $B$ is the batch size, $M$ is number of rows, and $N$ is number of columns. The output of this op, $Y$, is a matrix of shape $BxM$, with one row for each element of the batch, and the same number of columns as the number of rows of the input tensor.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"RowwiseMax\",\n [\"X\"],\n [\"Y\"]\n)\n\n// Create X, simulating a batch of 2, 4x4 matricies\nX = np.random.randint(0,high=20,size=(2,4,4))\nprint(\"X:\\n\",X)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[[ 5 12 10 1]\n [ 4 16 2 15]\n [ 5 11 12 15]\n [15 4 17 19]]\n\n [[16 5 5 13]\n [17 2 1 17]\n [18 3 19 5]\n [14 16 10 16]]]\nY:\n [[12. 16. 15. 19.]\n [16. 17. 19. 16.]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "A tensor of dimensions $B x M x N$ to compute rowwise-max. Here, $B$ is batch size, and $M$ and $N$ are the number of rows and columns of each element of the batch, respectively.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The output tensor of shape $B x M$, where each row represents the row-wise maximums for that element of the input batch.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Fail", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SortedSegmentMean", + "schema": { + "description": "\nApplies 'Mean' to each segment of input tensor. Segments need to be sorted and\ncontiguous. See also UnsortedSegmentMean that doesn't have this requirement.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceFrontMaxGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "RemoveDataBlocks", + "schema": { + "description": "\nShrink the data tensor by removing data blocks with given zero-based indices in\nthe outermost dimension of the tensor. Indices are not assumed in any order or\nunique but with the range [0, blocks_size). Indices could be empty.\n ", + "inputs": [ + { + "description": "a N-D data tensor, N >= 1", + "name": "data" + }, + { + "description": "zero-based indices of blocks to be removed", + "name": "indices" + } + ], + "outputs": [ + { + "description": "data after removing data blocks indexed by 'indices'", + "name": "shrunk data" + } + ], + "support_level": "default" + } + }, + { + "name": "SwapBestPath", + "schema": { + "description": "\nGiven a sequence of indices and a matrix, enforce that these indices have the\nbest columnwise scores\nscore\n", + "inputs": [ + { + "description": "N*D predictions matrix", + "name": "predictions" + }, + { + "description": "N*1 vector holds the best path indices ", + "name": "bestPath" + } + ], + "outputs": [ + { + "description": "N*D updated predictions matrix", + "name": "new_predictions" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsIndicesInGradientWeightedSumWithMainInputGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "StringEndsWith", + "schema": { + "attributes": [ + { + "description": "The suffix to check input strings against.", + "name": "suffix", + "option": "optional" + } + ], + "description": "\nPerforms the ends-with check on each string in the input tensor.\nReturns tensor of boolean of the same dimension of input.\n", + "inputs": [ + { + "description": "Tensor of std::string.", + "name": "strings" + } + ], + "outputs": [ + { + "description": "Tensor of bools of same shape as input.", + "name": "bools" + } + ], + "support_level": "default" + } + }, + { + "name": "While", + "schema": { + "attributes": [ + { + "description": "Net executed on each iteration", + "name": "loop_net", + "option": "optional" + }, + { + "description": "Net to (re)compute condition value", + "name": "cond_net", + "option": "optional" + } + ], + "description": "\n'While' control operator, first input is a scalar boolean blob that stores loop's\ncondition value. Accepts 'loop_net' (required) and 'cond_net' (optional) arguments for\nloop's body and condition subnets respectively. If condition subnet is specified,\nit is executed before the first and after each iteration. Subnets are executed in\nthe same workspace as 'While'.\n ", + "inputs": [ + { + "description": "Scalar boolean condition", + "name": "condition" + } + ], + "support_level": "default" + } + }, + { + "name": "Range", + "schema": { + "description": "\nGenerates an output tensor within the half-open interval $[start, stop)$ (the interval including start but excluding stop).\n- The `start` input is optional, and defaults to 0 when not set.\n- The `step` input is optional, and defaults to 1 when not set.\n- The type of the `output` tensor is determined by the types of inputs used.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Range\",\n [\"start\", \"stop\", \"step\"],\n [\"output\"]\n)\n\nworkspace.FeedBlob(\"start\", np.array(4, dtype=np.int32))\nworkspace.FeedBlob(\"stop\", np.array(17, dtype=np.int32))\nworkspace.FeedBlob(\"step\", np.array(2, dtype=np.int32))\nprint(\"start:\", workspace.FetchBlob(\"start\"))\nprint(\"stop:\", workspace.FetchBlob(\"stop\"))\nprint(\"step:\", workspace.FetchBlob(\"step\"))\nworkspace.RunOperatorOnce(op)\nprint(\"output:\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\nstart: 4\nstop: 17\nstep: 2\noutput: [ 4 6 8 10 12 14 16]\n\n```\n\n
\n ", + "inputs": [ + { + "description": "(*Tensor*): [OPTIONAL] scalar or 1-element tensor containing the start of the interval (inclusive) (default=0)", + "name": "start" + }, + { + "description": "(*Tensor*): scalar or 1-element tensor containing the end of the interval (exclusive)", + "name": "stop" + }, + { + "description": "(*Tensor*): [OPTIONAL] scalar or 1-element tensor specifying the spacing between values (default=1)", + "name": "step" + } + ], + "outputs": [ + { + "description": "(*Tensor*): 1D tensor of same type as inputs that contains the sequence", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8AddRelu", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "\n Performs element-wise binary Add (with no broadcast support). \"\n \"Output will go through rectified linear \"\n \"function, where y = max(0, x).\n", + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "Second operand. It should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "SquareRootDivide", + "schema": { + "description": "\nGiven DATA tensor with first dimension N and SCALE vector of the same size N\nproduces an output tensor with same dimensions as DATA. Which consists of DATA\nslices. i-th slice is divided by sqrt(SCALE[i]) elementwise. If SCALE[i] == 0\noutput slice is identical to the input one (no scaling)\n\nExample:\n\n Data = [\n [2.0, 4.0],\n [9.0, 12.0]\n ]\n\n SCALE = [4, 9]\n\n OUTPUT = [\n [1.0, 2.0],\n [3.0, 4.0]\n ]\n\n", + "support_level": "default" + } + }, + { + "name": "SortedSegmentMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Sign", + "schema": { + "description": "\nComputes sign for each element of the input: -1, 0 or 1.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n\"Sign\",\n[\"X\"],\n[\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3, 3).astype(np.float32) - np.random.rand(3, 3).astype(np.float32)))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[ 0.02816287 0.22408086 -0.30342305]\n[-0.18481976 0.03948995 0.39698976]\n[-0.63304734 -0.6919183 -0.31524038]]\nY:\n[[ 1. 1. -1.]\n[-1. 1. 1.]\n[-1. -1. -1.]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Xor", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise logical operation **xor** (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Xor\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", (np.random.rand(3, 3) > 0.5))\nworkspace.FeedBlob(\"B\", (np.random.rand(3, 3) > 0.5))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[ True True True]\n [False False True]\n [False True False]]\nB:\n[[False False False]\n [ True True True]\n [False False False]]\nC:\n[[ True True True]\n [ True True False]\n [False True False]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor of booleans. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "SigmoidGradient", + "schema": { + "description": "\nSigmoidGradient takes both Y and dY and uses this to update dX according to the\nchain rule and derivatives of the sigmoid function.\n", + "support_level": "default" + } + }, + { + "name": "MergeMultiScalarFeatureTensorsGradient", + "schema": { + "description": "Explode given multi-feature tensors with scalar features into many.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".values_grad", + "name": "out_values_grad" + } + ], + "outputs": [ + { + "description": ".values_grad", + "name": "in1_values_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "Sin", + "schema": { + "description": "\nCalculates the sine of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sin_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sin\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [0.8466114 0.1803606 0.5601509 0.04959291 0.64770824]\nY: [0.74903965 0.17938434 0.5313141 0.04957259 0.60336035]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor calculated as the sine of the input tensor, element-wise.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Log", + "schema": { + "description": "\nCalculates the natural log of the given input tensor ($ln(x)$), element-wise. This\noperation can be done in an in-place fashion too, by providing the same input\nand output blobs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/log_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Log\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[0.07341351 0.15404125 0.386613 ]\n [0.34090295 0.99727786 0.24141751]\n [0.32016268 0.8724168 0.93515724]]\nX after running op:\n[[-2.6116474 -1.8705349 -0.9503311 ]\n [-1.0761575 -0.00272586 -1.4212275 ]\n [-1.138926 -0.13648799 -0.06704059]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor computed as the natural log of the input tensor computed, element-wise.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "DenseVectorToIdList", + "schema": { + "description": "\nDenseVectorToIdList: Convert a blob with dense feature into a ID_LIST.\n\nAn ID_LIST is a list of IDs (may be ints, often longs) that represents a single\nfeature. As described in https://caffe2.ai/docs/sparse-operations.html, a batch\nof ID_LIST examples is represented as a pair of lengths and values where the\n`lengths` (int32) segment the `values` or ids (int32/int64) into examples.\n\nInput is a single blob where the first dimension is the batch size and the\nsecond dimension is the length of dense vectors. This operator produces a\nID_LIST where out_values are the indices of non-zero entries\nand out_lengths are the number of non-zeros entries in each row.\n\n", + "inputs": [ + { + "description": "A data blob of dense vectors", + "name": "values" + } + ], + "outputs": [ + { + "description": "Lengths of the sparse feature", + "name": "out_lengths" + }, + { + "description": "Values of the sparse feature", + "name": "out_values" + } + ], + "support_level": "default" + } + }, + { + "name": "Reshape", + "schema": { + "attributes": [ + { + "description": "New shape. Do not set if using `new_shape` input.", + "name": "shape", + "option": "optional" + } + ], + "category": "Shape", + "description": "\nReshape the input tensor similar to numpy's\n[reshape](https://docs.scipy.org/doc/numpy/reference/generated/numpy.reshape.html).\n\nTakes a tensor as input and an optional tensor specifying the new shape. When\nthe second input is absent, an extra argument shape must be specified. Outputs\nthe reshaped tensor as well as the original shape.\n\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is going to be copied\nfrom the input tensor.\n\nFor empty tensor, we will set the -1 dimension to be 0 (if one dimension is -1).\nWhen the tensor is empty, dimension of 0 will remain to be 0.\nE.g: data=np.empty(shape=[4, 0]), shape=[0, -1], the output tensor will be\nnp.emtpy(shape=[0, 0])\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reshape_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Reshape\",\n [\"data\"],\n [\"reshaped\", \"old_shape\"],\n shape=(3,2)\n)\n\nworkspace.FeedBlob(\"data\", (np.random.randint(100, size=(6))))\nprint(\"data:\", workspace.FetchBlob(\"data\"))\nworkspace.RunOperatorOnce(op)\nprint(\"reshaped:\", workspace.FetchBlob(\"reshaped\"))\nprint(\"old_shape:\", workspace.FetchBlob(\"old_shape\"))\n```\n\n**Result**\n\n```\ndata: [86 60 85 96 7 37]\nreshaped: [[86 60]\n [85 96]\n [ 7 37]]\nold_shape: [6]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor.", + "name": "data" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Tensor containing new shape.", + "name": "new_shape" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Reshaped output tensor.", + "name": "reshaped" + }, + { + "description": "*(type: Tensor``)* Tensor containing old shape of `data`.", + "name": "old_shape" + } + ], + "support_level": "default" + } + }, + { + "name": "AveragePoolGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "EnqueueBlobs", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "DepthConcat", + "schema": { + "description": "Backward compatible operator name for Concat.", + "support_level": "default" + } + }, + { + "name": "GatherPadding", + "schema": { + "attributes": [ + { + "description": "Outer-size of padding present around each range.", + "name": "padding_width", + "option": "optional" + }, + { + "description": "(Optional) Specifies a different end-padding width.", + "name": "end_padding_width", + "option": "optional" + } + ], + "description": "\nGather the sum of start and end paddings in a padded input sequence. Used in\norder to compute the gradients of AddPadding w.r.t the padding tensors.\n", + "inputs": [ + { + "description": "T Padded input data", + "name": "data_in" + }, + { + "description": "(i64) Num of elements in each range. sum(lengths) = N. If not provided, considers all data as a single segment.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Sum of all start paddings, or of all paddings if end_padding_sum is not provided.", + "name": "padding_sum" + }, + { + "description": "T Sum of all end paddings, if provided.", + "name": "end_padding_sum" + } + ], + "support_level": "default" + } + }, + { + "name": "DepthSplit", + "schema": { + "description": "Backward compatible operator name for Split.", + "support_level": "default" + } + }, + { + "name": "UnsortedSegmentMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Conv1DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LengthsSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ReduceMin", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce.", + "name": "axes", + "option": "optional" + }, + { + "description": "Keep the reduced dimension(s) or not, default True keeps the reduced dimension(s).", + "name": "keepdims", + "option": "optional" + } + ], + "description": "\n Computes the min of the input tensor's element along the provided axes.\n The resulted tensor has the same rank as the input if keepdims equal True.\n If keepdims equal false, then the resulted tensor have the reduced dimension\n pruned.\n", + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced" + } + ], + "support_level": "default" + } + }, + { + "name": "StatRegistryExport", + "schema": { + "attributes": [ + { + "description": "(default true) Whether to atomically reset the counters afterwards.", + "name": "reset", + "option": "optional" + } + ], + "description": null, + "inputs": [ + { + "description": "If provided, export values from given StatRegistry.Otherwise, export values from the global singleton StatRegistry.", + "name": "handle" + } + ], + "outputs": [ + { + "description": "1D string tensor with exported key names", + "name": "keys" + }, + { + "description": "1D int64 tensor with exported values", + "name": "values" + }, + { + "description": "The unix timestamp at counter retrieval.", + "name": "timestamps" + } + ], + "support_level": "default" + } + }, + { + "name": "Free", + "schema": { + "description": "\nFrees the content of the blobs. The input and output blobs should be\none-to-one inplace.", + "support_level": "default" + } + }, + { + "name": "FlexibleTopK", + "schema": { + "description": "\nGiven two tensors: X and K,\nretrieve the top K[..., 1] elements from X on the last dimension.\nX is an input tensor of shape [a_1, a_2, ..., a_n, r].\nK is an input tensor of shape [a_1, a_2, ..., a_n, 1],\nwhere for each element, r >= K[..., 1] > 0\nOutput two outputs:\n-Flatten values tensor of shape [ \\sum_i K[i, 1] ] which contains the values of\n the top K[..., 1] elements along the last dimension\n-Flatten indices tensor of shape [ \\sum_i K[i, 1] ] which contains the indices\n of the top K[..., 1] elements, flatten indices from the input tensor).\nThese two outputs should be used with the input K, so that we know which indices\nin X are picked.\n\nGiven two equivalent values, this operator uses the indices along the last dim-\nension as a tiebreaker. That is, the element with the lower index will appear\nfirst.\n ", + "inputs": [ + { + "description": "Tensor of shape [a_1, a_2, ..., a_n, r]", + "name": "X" + }, + { + "description": "Tensor of shape [a_1, a_2, ..., a_n, 1]", + "name": "K" + } + ], + "outputs": [ + { + "description": "Tensor of shape [ \\sum_i K[i, 1] ] containing top K[..., 1] values from the input tensor", + "name": "Flatten values" + }, + { + "description": "Tensor of shape [ \\sum_i K[i, 1] ] containing the indices into the flatten input", + "name": "Flatten indices" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceL1", + "schema": { + "attributes": [ + { + "description": "(*Tuple(int)*): list of axes to reduce", + "name": "axes", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to keep the reduced dimension(s) (default=1), else set to 0 to not keep the reduced dimension(s)", + "name": "keepdims", + "option": "optional" + } + ], + "description": "\nComputes the **L1 norm** of the input tensor's elements along the provided `axes`. The resulting tensor has the same rank as the input if the `keepdims` argument equals 1 (default). If `keepdims` is set to 0, then the `axes` dimensions are pruned.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceL1\",\n [\"X\"],\n [\"Y\"],\n axes=(0,1),\n keepdims=0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,5,5)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[ 2. 7. 6. 4. 5.]\n [ 2. 1. 9. 8. 7.]\n [ 4. 9. 1. 0. 0.]\n [ 6. 4. 0. 8. 1.]\n [ 1. 7. 1. 0. 2.]]\n\n [[ 5. 8. 1. 7. 7.]\n [ 4. 5. 6. 5. 4.]\n [ 1. 9. 6. 6. 3.]\n [ 6. 6. 8. 8. 4.]\n [ 2. 3. 5. 8. 1.]]]]\n\nY:\n[[ 7. 15. 7. 11. 12.]\n [ 6. 6. 15. 13. 11.]\n [ 5. 18. 7. 6. 3.]\n [ 12. 10. 8. 16. 5.]\n [ 3. 10. 6. 8. 3.]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "NumpyTile", + "schema": { + "description": null, + "inputs": [ + { + "description": "The input tensor.", + "name": "data" + }, + { + "description": "1-D Tensor specifying how many times to repeat each axis.", + "name": "repeats" + } + ], + "outputs": [ + { + "description": "Tensor that will contain input replicated along the given axis.", + "name": "tiled_data" + } + ], + "support_level": "default" + } + }, + { + "name": "EluGradient", + "schema": { + "description": "\nEluGradient takes both Y and dY and uses this to update dX according to the\nchain rule and derivatives of the rectified linear function.\n", + "support_level": "default" + } + }, + { + "name": "ArgMax", + "schema": { + "attributes": [ + { + "default": -1, + "description": "The axis to get argmax.", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "default": true, + "description": "If True (default), the output tensor shape will match the input tensor shape except the `axis` dimension equals 1. Else, the `axis` dimension of the output tensor is removed.", + "name": "keepdims", + "option": "optional", + "type": "boolean" + } + ], + "description": "\nRetrieve the argmax of an axis dimension specified by the `axis`\nargument. Given an input tensor and two arguments (`axis` and\n`keepdims`), returns a tensor containing the indices of the largest\nelement along the given axis. If the `keepdims` arg is *True* (default),\nthe shape of the output tensor matches the input tensor except the\n`axis` dimension equals 1. Else, the `axis` dimension of the output\ntensor is removed.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/arg_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ArgMax\",\n [\"X\"],\n [\"Indices\"],\n axis=2,\n keepdims=False\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(3,3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Indices:\", workspace.FetchBlob(\"Indices\"))\n\n```\n\n**Result**\n\n```\nX: [[[4. 9. 6.]\n [6. 6. 1.]\n [9. 5. 4.]]\n\n [[6. 7. 4.]\n [7. 9. 1.]\n [3. 2. 8.]]\n\n [[3. 4. 6.]\n [5. 2. 7.]\n [1. 5. 7.]]]\nIndices: [[1 0 0]\n [1 1 2]\n [2 2 2]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Tensor of indices for the largest values.", + "name": "Indices" + } + ], + "support_level": "default" + } + }, + { + "name": "IndexGet", + "schema": { + "description": "\nGiven an index handle and a tensor of keys, return an Int tensor of same shape\ncontaining the indices for each of the keys. If the index is frozen, unknown\nentries are given index 0. Otherwise, new entries are added into the index.\nIf an insert is necessary but max_elements has been reached, fail.\n", + "inputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + }, + { + "description": "Tensor of keys to be looked up.", + "name": "keys" + } + ], + "outputs": [ + { + "description": "Indices for each of the keys.", + "name": "indices" + } + ], + "support_level": "default" + } + }, + { + "name": "HeatmapMaxKeypoint", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "MomentsGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "L1Distance", + "schema": { + "description": "\nComputes the row-wise L1 Distance between the two input tensors $X$ and $Y$, which is defined as\n\n$$L1Distance(\\mathbf{x},\\mathbf{y}) = \\sum_{i}\\mid x_i - y_i\\mid$$\n\nNote, both inputs must either be 1-dimensional or 2-dimensional and both must have the same shape. The output $Z$ will be 1-dimensional regardless and its length will equal the number of rows in the inputs.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"L1Distance\",\n [\"X\", \"Y\"],\n [\"Z\"]\n)\n\n// Create X\nX = 5*np.ones((1, 4))\nprint(\"X:\\n\",X)\n\n// Create Y\nY = np.ones((1, 4))\nprint(\"Y:\\n\",Y)\n\n// Feed X & Y into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"Y\", Y.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Z:\\n\", workspace.FetchBlob(\"Z\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[5. 5. 5. 5.]]\nY:\n [[1. 1. 1. 1.]]\nZ:\n [16.]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "First input tensor. (1D or 2D)", + "name": "X" + }, + { + "description": "Second input tensor. (must have the same shape as $X$)", + "name": "Y" + } + ], + "outputs": [ + { + "description": "1D output tensor. One value for each row of the inputs.", + "name": "Z" + } + ], + "support_level": "default" + } + }, + { + "name": "Allreduce", + "schema": { + "description": "\nDoes an allreduce operation among the nodes. Currently only Sum is supported.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be allreduced.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The allreduced tensor, same on all nodes.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Onnxifi", + "schema": { + "attributes": [ + { + "description": "(string default=\"\") Serialized ONNX model to be converted to backend representation", + "name": "onnx_model", + "option": "optional" + }, + { + "description": "Initialization pair indicating the mapping of the name between NetDef and ONNX model", + "name": "initializers", + "option": "optional" + }, + { + "description": "A list of key/value pairs indicating which input index to look up for real batch size for the given max output batch size", + "name": "output_resize_hints", + "option": "optional" + } + ], + "description": "\n The Onnxifi operator is a black-box operator to lower the computation to Onnxifi backend\n ", + "support_level": "default" + } + }, + { + "name": "Partition", + "schema": { + "attributes": [ + { + "description": "(int, default 0) If set, the operator transforms the first tensor values as floor(X_ij / num_partitions)", + "name": "pack_first_input", + "option": "optional" + } + ], + "description": "\nSplits the input int tensor into multiple ones according to the first tensor.\n\nTakes the first input and partitions it to shards according to the remainder of\nvalues modulo the number of partitions. It requires that the first tensor is of\nintegral type. The number of partitions is derived as (num_output / num_input).\n\nIf additional inputs are present they must have the same shape as the first\ninput, optionally with extra trailing dimensions. They will be partitioned\naccordingly to the first input.\n\nOptional arg 'pack_first_input' transforms the first tensor values as\nX_ij / num_partitions.\n\nOutputs are ordered as\nX_0_part_0, X_1_part_0, ..., X_N-1_part_0, X_0_part_1, ..., X_N-1_part_K-1\n", + "inputs": [ + { + "description": "Input tensor containing data to be partitioned. The number of input tensors might be greater than 1 but must have the same shape as the previous tensors.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output Partitions. The number of output tensors has to be a multiple of the number of input tensors.", + "name": "partitions" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsTopKGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "FlexibleTopKGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ThrowException", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LengthsMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Tile", + "schema": { + "attributes": [ + { + "description": "(*int*): number of replicas", + "name": "tiles", + "option": "optional" + }, + { + "description": "(*int*): axis to replicate along", + "name": "axis", + "option": "optional" + } + ], + "description": "\nConstructs a tensor by tiling a given tensor along a specified axis. This operation creates a new tensor by replicating the input tensor a number of times specified by the `tiles` argument along the `axis` dimension. The output tensor's `axis` dimension has $(X.dims(axis) * tiles)$ elements.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/tile_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Tile\",\n [\"X\", \"tiles\", \"axis\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(5,5)))\nworkspace.FeedBlob(\"tiles\", np.array([5]).astype(np.int32))\nworkspace.FeedBlob(\"axis\", np.array([1]).astype(np.int32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[9 1 7 1 3]\n [2 3 6 2 5]\n [0 9 2 6 4]\n [5 8 1 5 9]\n [2 0 1 3 7]]\nY:\n[[9 1 7 1 3 9 1 7 1 3 9 1 7 1 3 9 1 7 1 3 9 1 7 1 3]\n [2 3 6 2 5 2 3 6 2 5 2 3 6 2 5 2 3 6 2 5 2 3 6 2 5]\n [0 9 2 6 4 0 9 2 6 4 0 9 2 6 4 0 9 2 6 4 0 9 2 6 4]\n [5 8 1 5 9 5 8 1 5 9 5 8 1 5 9 5 8 1 5 9 5 8 1 5 9]\n [2 0 1 3 7 2 0 1 3 7 2 0 1 3 7 2 0 1 3 7 2 0 1 3 7]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): [OPTIONAL] number of replicas (overrides `tiles` argument)", + "name": "tiles" + }, + { + "description": "(*Tensor``*): [OPTIONAL] axis to replicate along (overrides `axis` argument)", + "name": "axis" + } + ], + "outputs": [ + { + "description": "(*Tensor*): output tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Asin", + "schema": { + "description": "\nCalculates the arcsine of the given input tensor, element-wise.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The arcsine of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SplitByLengths", + "schema": { + "attributes": [ + { + "description": "Which axis to split on", + "name": "axis", + "option": "optional" + }, + { + "description": "Either NHWC or NCWH, will split on C axis, defaults to NCHW", + "name": "order", + "option": "optional" + } + ], + "description": "\nSplit a tensor into a list of tensors, given a lengths input, along the specified\n'axis'. If `K` outputs are provided, the op assumes `len(lengths) % K == 0`.\nThe `input` will be split into `K` parts. Each part of length\n`sum(lengths[i*k:i*k+k))`", + "inputs": [ + { + "description": "The tensor to split", + "name": "input" + }, + { + "description": "The tensor `l_i` indicates the logic block of input.", + "name": "legnths" + } + ], + "support_level": "default" + } + }, + { + "name": "CreateBlobsQueueDB", + "schema": { + "attributes": [ + { + "description": "(default: -1 (no key)) index of blob for DB key in the BlobsQueue.", + "name": "key_blob_index", + "option": "optional" + }, + { + "description": "(default: 0) index of blob for DB value in the BlobsQueue.", + "name": "value_blob_index", + "option": "optional" + }, + { + "description": "(default: 0.0 (no timeout)) Timeout in seconds for reading from the BlobsQueue.", + "name": "timeout_secs", + "option": "optional" + } + ], + "description": "Create a DBReader from a BlobsQueue", + "inputs": [ + { + "description": "The shared pointer to a queue containing Blobs.", + "name": "queue" + } + ], + "outputs": [ + { + "description": "The DBReader for the given BlobsQueue", + "name": "reader" + } + ], + "support_level": "default" + } + }, + { + "name": "GRUUnitGradient", + "schema": { + "attributes": [ + { + "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.", + "name": "sequence_lengths", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "CloseBlobsQueue", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseLengthsSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Int8Sigmoid", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "\nApply the Sigmoid function element-wise to the input tensor. This is often used\nas a non-linear activation function in a neural network. The sigmoid function is\ndefined as:\n\n$$Sigmoid(x) = \\frac{1}{1+\\exp(-x)}$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sigmoid_op.cc\n", + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input" + } + ], + "outputs": [ + { + "description": "The sigmoid normalized output values with the same shape as input tensor.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "GRUUnit", + "schema": { + "attributes": [ + { + "description": "Bool to determine if hidden state is zeroes or passed along for timesteps past the given sequence_length.", + "name": "drop_states", + "option": "optional" + }, + { + "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.", + "name": "sequence_lengths", + "option": "optional" + } + ], + "description": "\nGRUUnit computes the activations of a standard GRU,\nin a sequence-length aware fashion.\n\nConcretely, given the (fused) inputs X (TxNxD), the previous hidden\nstate (NxD), and the sequence lengths (N), computes the GRU\nactivations, avoiding computation if the input is invalid (as in, the\nvalue at X[t][n] >= seqLengths[n].\n\n", + "outputs": [ + { + "description": "The new GRU hidden state calculated by this op.", + "name": "hidden" + } + ], + "support_level": "default" + } + }, + { + "name": "SortedSegmentSum", + "schema": { + "description": "\nApplies 'Sum' to each segment of input tensor. Segments need to be sorted and\ncontiguous. See also UnsortedSegmentSum that doesn't have this requirement.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "Sqrt", + "schema": { + "description": "\nPerforms element-wise square-root ($\\sqrt{x}$) of input tensor $X$.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sqrt_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sqrt\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[8. 3. 3.]\n [4. 0. 0.]\n [1. 2. 5.]]\nY:\n[[2.8284268 1.7320508 1.7320508 ]\n [1.9999999 0. 0. ]\n [0.99999994 1.4142134 2.236068 ]]\n\n```\n\n
\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "TTLinearGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseFtrl", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LambdaRankNdcgGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseLengthsWeightedSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseNormalize", + "schema": { + "attributes": [ + { + "description": "A bool variable to control whether to use max norm or constant norm. When use_max_norm = false, constant norm is used so that all the embedding vectors are scaled to have a L2 norm equals to A (see blow argument norm=A). If use_max_norm = true, max norm is used so that embedding is scaled so that its l2 norm is no larger than A. If an embedding's norm is less than A originally, the embedding is left unchanged. The default is True.", + "name": "use_max_norm", + "option": "optional" + }, + { + "description": "L2 norm of the embedding. The default is 1.0.", + "name": "norm", + "option": "optional" + } + ], + "description": "\nGiven a sparse matrix, apply max_norm or constant_norm sparse regularization.\n", + "inputs": [ + { + "description": "Parameters to be normalized", + "name": "param" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed (optional - not used, this argument is for backwards compatibility)", + "name": "grad" + } + ], + "outputs": [ + { + "description": "Normalized parameters", + "name": "output_param" + } + ], + "support_level": "default" + } + }, + { + "name": "LeakyRelu", + "schema": { + "attributes": [ + { + "default": 0.01, + "description": "Coefficient of leakage.", + "name": "alpha", + "option": "optional", + "type": "float32" + } + ], + "description": "\nThe *LeakyRelu* op takes one input tensor $X$ and an argument $alpha$, and produces one output tensor $Y$ of the same shape as $X.$ The op performs the element wise leaky relu operation, defined as\n\n$$y=LeakyRelu(x) =\\begin{cases}\\alpha x & x < 0\\\\x & otherwise\\end{cases}$$\n\nThe default value of *alpha* is 0.01.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/leaky_relu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/leaky_relu_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LeakyRelu\",\n [\"X\"],\n [\"Y\"],\n alpha=0.01\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-0.91060215 0.09374836 2.1429708 ]\n [-0.748983 0.19164062 -1.5130422 ]\n [-0.29539835 -0.8530696 0.7673204 ]]\n\nY:\n [[-0.00910602 0.09374836 2.1429708 ]\n [-0.00748983 0.19164062 -0.01513042]\n [-0.00295398 -0.0085307 0.7673204 ]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input tensor of data to be operated on.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output tensor, calculated as described above.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "AddGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LeakyReluGradient", + "schema": { + "attributes": [ + { + "description": "Coefficient of leakage", + "name": "alpha", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "ReadRandomBatch", + "schema": { + "attributes": [ + { + "description": "Number of top-level entries to read.", + "name": "batch_size", + "option": "optional" + }, + { + "description": "(bool) Repeat the dataset indefinitely", + "name": "loop_over", + "option": "optional" + } + ], + "description": "\nRead the next batch of examples out of the given cursor,\nidx blob, offset matrix and data blobs.\n\nInput(0) is a blob pointing to a TreeCursor,\nInput(1) is a blob pointing to the shuffled idx\nInput(2) is a blob pointing to the offset matrix and\n[Input(3),... Input(num_fields)] a list of tensors containing the data for\neach field of the dataset.\n\nReadRandomBatch is thread safe.\n", + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + }, + { + "description": "idx with a shuffled order.", + "name": "idx" + }, + { + "description": "offset matrix containing length offset info.", + "name": "offsetsmat" + }, + { + "description": "First dataset field", + "name": "dataset_field_0" + } + ], + "outputs": [ + { + "description": "Tensor containing the next batch for field 0.", + "name": "field_0" + } + ], + "support_level": "default" + } + }, + { + "name": "HalfToFloat", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ReduceScatter", + "schema": { + "description": "\nDoes reduce-scatter operation among the nodes. Currently only Sum is supported.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be reduce-scattered.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The reduced tensor, scattered on all nodes.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SeluGradient", + "schema": { + "attributes": [ + { + "description": "(float) default to 1.6732~; affects the activation function itself.This should go with the weight initialization in the paper. See https://arxiv.org/abs/1706.02515", + "name": "alpha", + "option": "optional" + }, + { + "description": "(float) default to 1.0507~; affects the activation function itself.", + "name": "scale", + "option": "optional" + } + ], + "description": "\nSeluGradient takes both Y and dY and uses this to update dX according to the\nchain rule and derivatives of the selu function.\n", + "inputs": [ + { + "description": "input tensor", + "name": "Y" + }, + { + "description": "input tensor", + "name": "dY" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsToShape", + "schema": { + "description": "\nThis operator takes a list of $N$ equal integers as input which represent the lengths of $N$ vectors. The output is the calculated shape of the matrix if the $N$ integers were combined into a single matrix.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsToShape\",\n [\"X\"],\n [\"Y\"]\n)\n\n// Create X: Sample softmax output for 5-class model\nX = np.array([2,2,2,2,2,2,2,2,2,2])\nprint(\"X:\\n\",X)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.int32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [2 2 2 2 2 2 2 2 2 2]\nY:\n [10 2]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "List, of length $N$, of equal integers representing the lengths of several vectors.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Vector of length 2 describing the dimensions of the data if the $N$ vectors from the input were combined to a single matrix.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "AveragePool1DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ResetCounter", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Resets counter to this value, must be >= 0.", + "name": "init_count", + "option": "optional", + "type": "int64" + } + ], + "description": "\nResets a count-down counter with initial value specified by the `init_count`\nargument.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.", + "name": "counter" + } + ], + "outputs": [ + { + "description": "*(type: int)* [OPTIONAL] count value BEFORE this operation.", + "name": "previous_value" + } + ], + "support_level": "default" + } + }, + { + "name": "NormalizeGradient", + "schema": { + "attributes": [ + { + "description": "axis to normalize", + "name": "axis", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseMomentumSGDUpdate", + "schema": { + "attributes": [ + { + "description": "Momentum hyperparameter.", + "name": "momentum", + "option": "optional" + }, + { + "description": "(boolean) Whether to use Nesterov Accelerated Gradient.", + "name": "nesterov", + "option": "optional" + } + ], + "description": "\n\nPerforms a momentum SGD update analogous to MomentumSGDUpdate, but using a\nGradientSlice and indices into the full param and momentum tables. Both param\nand momentum should be in-place (corresponding inputs and outputs should be the\nsame blobs).\n\n\n\n", + "inputs": [ + { + "description": "GradientSlice with gradients for updated indices.", + "name": "grad" + }, + { + "description": "Momentum blob, same shape as param.", + "name": "moment" + }, + { + "description": "Learning rate.", + "name": "lr" + }, + { + "description": "Full parameter blob.", + "name": "param" + }, + { + "description": "Indices (in first dimension of param) where updates are performed.", + "name": "indices" + } + ], + "outputs": [ + { + "description": "Adjusted gradient.", + "name": "output_grad" + }, + { + "description": "Updated momentum.", + "name": "output_moment" + }, + { + "description": "Updated parameter", + "name": "output_param" + } + ], + "support_level": "default" + } + }, + { + "name": "AffineChannelGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ColwiseMaxGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Conv2DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CosGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Scatter", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Which dimension to scatter on.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nUpdate values of the tensor by overriding current value specified by indices.\n\nWrites all values from the tensor UPDATES into DATA at the indices specified in the INDICES tensor.\nFor each value in DATA, its output index is specified by its index in UPDATES and by the corresponding value in INDICES for the specified axis.\n\nFor a 3-D tensor, DATA is updated as:\n\nDATA[INDICES[i][j][k]][j][k] = UPDATES[i][j][k] # if axis == 0\nDATA[i][INDICES[i][j][k]][k] = UPDATES[i][j][k] # if axis == 1\nDATA[i][j][INDICES[i][j][k]] = UPDATES[i][j][k] # if axis == 2\n\nCurrently only works on CPU because of access to INDICES.\n", + "inputs": [ + { + "description": "Tensor to be updated.", + "name": "DATA" + }, + { + "description": "1-D list of indices on the first dimensionof X_0 that need to be updated", + "name": "INDICES" + }, + { + "description": "Update slices, with shape len(INDICES) + shape(X_0)[1:]", + "name": "UPDATES" + } + ], + "outputs": [ + { + "description": "The updated output.", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "MapToKeyValue", + "schema": { + "description": "Convert a map blob into key and value blob pairs", + "inputs": [ + { + "description": "Blob reference to the map", + "name": "map blob" + } + ], + "outputs": [ + { + "description": "Blob reference to the key", + "name": "key blob" + }, + { + "description": "Blob reference to the value", + "name": "value blob" + } + ], + "support_level": "default" + } + }, + { + "name": "StringStartsWith", + "schema": { + "attributes": [ + { + "description": "The prefix to check input strings against.", + "name": "prefix", + "option": "optional" + } + ], + "description": "\nPerforms the starts-with check on each string in the input tensor.\nReturns tensor of boolean of the same dimension of input.\n", + "inputs": [ + { + "description": "Tensor of std::string.", + "name": "strings" + } + ], + "outputs": [ + { + "description": "Tensor of bools of same shape as input.", + "name": "bools" + } + ], + "support_level": "default" + } + }, + { + "name": "IndexStore", + "schema": { + "description": "\nStores the keys of this index in a 1-D tensor. Since element 0 is reserved\nfor unknowns, the first element of the output tensor will be element of index 1.\n", + "inputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + } + ], + "outputs": [ + { + "description": "1-D tensor with elements starting with index 1.", + "name": "items" + } + ], + "support_level": "default" + } + }, + { + "name": "Im2Col", + "schema": { + "description": "The Im2Col operator from Matlab.", + "inputs": [ + { + "description": "4-tensor in NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "4-tensor. For NCHW: N x (C x kH x kW) x outH x outW.For NHWC: N x outH x outW x (kH x kW x C", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "FCTransposedGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "AddPadding", + "schema": { + "attributes": [ + { + "description": "Number of copies of padding to add around each range.", + "name": "padding_width", + "option": "optional", + "type": "int64" + }, + { + "description": "[OPTIONAL] Specifies a different end-padding width. If this is not set, will use same as `padding_width`.", + "name": "end_padding_width", + "option": "optional", + "type": "int64" + } + ], + "description": "\nGiven a partitioned tensor $T$, where the partitions are\ndefined as ranges on its outer-most (slowest varying) dimension $N$,\nreturn a tensor $T<(N + 2 * padding\\_width), D_1, ..., D_n>$ with paddings\nadded to the start and end of each range.\n\nOptionally, different paddings can be provided for beginning and end.\nPaddings provided must be a tensor $T$. If no padding is\nprovided, add zero padding. If no lengths vector is provided, add padding\nonly once, at the start and end of data.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sequence_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AddPadding\",\n [\"X\", \"lengths\"],\n [\"Y\", \"lengths_out\"],\n padding_width=1\n\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,2,2).astype(np.float32)))\nworkspace.FeedBlob(\"lengths\", np.array([3]).astype(np.int32))\n\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"lengths_out:\", workspace.FetchBlob(\"lengths_out\"))\n```\n\n**Result**\n\n```\nX: [[[0.2531572 0.4588472 ]\n [0.45140603 0.61161053]]\n\n [[0.92500854 0.8045306 ]\n [0.03356671 0.30233648]]\n\n [[0.4660227 0.6287745 ]\n [0.79372746 0.08609265]]]\nY: [[[0. 0. ]\n [0. 0. ]]\n\n [[0.2531572 0.4588472 ]\n [0.45140603 0.61161053]]\n\n [[0.92500854 0.8045306 ]\n [0.03356671 0.30233648]]\n\n [[0.4660227 0.6287745 ]\n [0.79372746 0.08609265]]\n\n [[0. 0. ]\n [0. 0. ]]]\nlengths_out: [5]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* Input data ($T$).", + "name": "data_in" + }, + { + "description": "*(type: Tensor``)* Number of elements in each range. sum(lengths) = N.", + "name": "lengths" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Padding data for range start ($T$).", + "name": "start_padding" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Padding for range end. If not provided, `start_padding` is used ($T$).", + "name": "end_padding" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Padded data tensor ($T$).", + "name": "data_out" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Lengths for each padded range.", + "name": "lengths_out" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8Reshape", + "schema": { + "attributes": [ + { + "description": "New shape", + "name": "shape", + "option": "optional" + }, + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "\nReshape the input tensor similar to numpy.reshape.\n\nIt takes a tensor as input and an optional tensor specifying the new shape.\nWhen the second input is absent, an extra argument `shape` must be specified.\nIt outputs the reshaped tensor as well as the original shape.\n\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is going to be copied\nfrom the input tensor.\n", + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + }, + { + "description": "New shape.", + "name": "new_shape" + } + ], + "outputs": [ + { + "description": "Reshaped data.", + "name": "reshaped" + }, + { + "description": "Original shape.", + "name": "old_shape" + } + ], + "support_level": "default" + } + }, + { + "name": "Where", + "schema": { + "description": "\nOperator Where takes three input data (Tensor, Tensor, Tensor) and\nproduces one output data (Tensor) where z = c ? x : y is applied elementwise.\n", + "inputs": [ + { + "description": "input tensor containing booleans", + "name": "C" + }, + { + "description": "input tensor", + "name": "X" + }, + { + "description": "input tensor", + "name": "Y" + } + ], + "outputs": [ + { + "description": "output tensor", + "name": "Z" + } + ], + "support_level": "default" + } + }, + { + "name": "GetAllBlobNames", + "schema": { + "attributes": [ + { + "description": "(bool, default true) Whether to include blobs inherited from parent workspaces.", + "name": "include_shared", + "option": "optional" + } + ], + "description": "\nReturn a 1D tensor of strings containing the names\nof each blob in the active workspace.\n", + "outputs": [ + { + "description": "1D tensor of strings containing blob names.", + "name": "blob_names" + } + ], + "support_level": "default" + } + }, + { + "name": "FloatToFusedRandRowwiseQuantized", + "schema": { + "attributes": [ + { + "description": "How many bits to quantize per data (defaults to 8).", + "name": "bitwidth", + "option": "optional" + }, + { + "description": "random or not (True). False is set up for unittest.", + "name": "random", + "option": "optional" + } + ], + "description": "\nApplies row-wise stochastic/random quantization by determining the range of\neach row in the input matrix, and then quantize each element to one of two\nclosest discrete levels by randomly drawing Bernoulli distribution.\nThe method is extended from TernGrad [1],\nwhich randomly quantizes gradients to three levels to reduce communication in distributed training.\nThe format of each row (x) in the output matrix is [bitwidth][tail][min][max][data]:\nbitwidth[1 Byte]: bitwidth per data [1, 2, 4 or 8];\ntail[1 Byte]: the number of unused buckets [1-8] (One byte is split to 8/bitwidth buckets and each bucket stores one low-precision data in bitwidth bits);\nmin[4 Bytes]: the minimum floating value min(x);\nmax[4 Bytes]: the maximum floating value max(x);\ndata: quantized data.\nThe quantization is uniform with levels q = min + (max-min)/(2^bitwidth - 1)*[0:1:2^bitwidth].\nDuring stochastic/random quantization x'=Quantize(x), for q_j < x_i <= q_{j+1}, we draw quantization x'_i from Bernoulli distributions with\nP(x'_i = q_{j+1}) = (x_i - q_j)/(q_{j+1} - q_j), and\nP(x'_i = q_j) = (q_{j+1} - x_i)/(q_{j+1} - q_j) where x'_i is the quantized value of x_i.\n[1] proved E{x'_i}=x_i, which is an unbiased approximation. More details are in the paper.\nFor example, suppose targeted bitwidth = 2 and x = [0.3, -1.4, -0.6, 0.9, 1.0],\nthen tail = 3, min = -1.4, max = 1.0 and q = [-1.4, -0.6, 0.2, 1.0].\nx_1 = 0.3 will be quantized to x'_1 = 0.2 with probability 7/8 and to x'_1 = 1.0 with probability 1/8.\nThe storage format of quantized data is: [x'_1|x'_3|x'_5|xxx]-[x'_2|x'_4|xxx|xxx].\nIn general, a input row is split to multiple segments. One segment is a continuous subarray of the row,\nand its length is the number of bytes storing quantized data in the output matrix.\nThe b-th bucket of the i-th byte stores the i-th data of the b-th segment of input row.\n\n[1] Wen, Wei, Cong Xu, Feng Yan, Chunpeng Wu, Yandan Wang, Yiran Chen, and Hai Li.\n\"Terngrad: Ternary gradients to reduce communication in distributed deep learning.\"\nIn Advances in Neural Information Processing Systems, pp. 1508-1518. 2017.\n\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused bitwidth, tail, min, max and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "PackRNNSequence", + "schema": { + "description": "\nPack values based on the length blob. Each number from length blob represents\nthe corresponding values that need to be packed. The dimension for each pack\nis the same as the maximum number from the length blob (padding with zero is\nimplemented for smaller length value). The overall output dimension is:\nT * N * D, where T is the max number of lengths, N is the size of lengths,\nand D is the dimension of each feature value. The following example shows\nthe input and output of this operator:\n\n\nGiven:\n values = [v1, v2, v3, v4, v5, v6, v7, v8]\n lengths = [2, 3, 1, 2];\n\n\nOutput:\n output = [\n [v1, v3, v6, v7],\n [v2, v4, 0, v8],\n [0, v5, 0, 0 ],\n ]\n\n\nOne application for this operator is the transfer data into the format that is\nused for RNN models. Note that the gradient operator of PackRNNSequence is\nUnpackRNNSequence.\n", + "inputs": [ + { + "description": "Data tensor, contains a sequence of features", + "name": "values" + }, + { + "description": "lengths with each number representing the pack size.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Output tensor after packing", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "PadEmptySamples", + "schema": { + "description": "\nPad empty field given lengths and index features,\n\nInput(0) is a blob pointing to the lengths of samples in one batch,\n[Input(1),... Input(num_fields)] a list of tensors containing the data for\neach field of the features.\n\nPadEmptySamples is thread safe.\n", + "inputs": [ + { + "description": "A blob containing a pointer to the lengths.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Tensor containing lengths with empty sample padded.", + "name": "out_lengths" + } + ], + "support_level": "default" + } + }, + { + "name": "PadImage", + "schema": { + "description": "\nPadImage pads values around the boundary of an image according to the pad\nvalues and stride sizes defined by the ConvPoolOpBase operator.\n ", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case. ", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data tensor from padding the H and W dimensions on the tensor. Dimensions will vary based on various pad and stride sizes.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Glu", + "schema": { + "description": "\nApplies gated linear unit to the input Tensor X. The output Y is half the size\nof the input X, so if the shape of X is [d1, d2, ..., N] shape of Y will be\n[d1, d2, ..., dn/2] and Y(:dn-1, i) = GLU(X(:dn-1, i), X(:dn-1, i+N/2)) =\nX(dn-1, i) * sigmoid(X(dn-1, i+N/2))\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Shape", + "schema": { + "attributes": [ + { + "description": "Array of interested axes.If given, this operator only returns the dimensions of the given axes.Otherwise, the operator returns the dimensions of all axes.", + "name": "axes", + "option": "optional", + "type": "int64[]" + } + ], + "description": "\nProduce a 1D int64 tensor with the shape of the input tensor.\nIf called with an optional argument `axes`, the result will only\ncontain the dimensions of specified axes.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/shape_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Shape\",\n [\"X\"],\n [\"shape\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(2,3))))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"shape:\", workspace.FetchBlob(\"shape\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[3 2 5]\n [5 7 3]]\nshape: [2 3]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Output tensor containing shape of input tensor.", + "name": "shape" + } + ], + "support_level": "default" + } + }, + { + "name": "ReservoirSampling", + "schema": { + "attributes": [ + { + "description": "The number of random samples to append for each positive samples", + "name": "num_to_collect", + "option": "optional" + } + ], + "description": "\nCollect `DATA` tensor into `RESERVOIR` of size `num_to_collect`. `DATA` is\nassumed to be a batch.\n\nIn case where 'objects' may be repeated in data and you only want at most one\ninstance of each 'object' in the reservoir, `OBJECT_ID` can be given for\ndeduplication. If `OBJECT_ID` is given, then you also need to supply additional\nbook-keeping tensors. See input blob documentation for details.\n\nThis operator is thread-safe.\n", + "inputs": [ + { + "description": "The reservoir; should be initialized to empty tensor", + "name": "RESERVOIR" + }, + { + "description": "Number of examples seen so far; should be initialized to 0", + "name": "NUM_VISITED" + }, + { + "description": "Tensor to collect from. The first dimension is assumed to be batch size. If the object to be collected is represented by multiple tensors, use `PackRecords` to pack them into single tensor.", + "name": "DATA" + }, + { + "description": "Mutex to prevent data race", + "name": "MUTEX" + }, + { + "description": "(Optional, int64) If provided, used for deduplicating object in the reservoir", + "name": "OBJECT_ID" + }, + { + "description": "(Optional) Auxiliary bookkeeping map. This should be created from `CreateMap` with keys of type int64 and values of type int32", + "name": "OBJECT_TO_POS_MAP_IN" + }, + { + "description": "(Optional) Tensor of type int64 used for bookkeeping in deduplication", + "name": "POS_TO_OBJECT_IN" + } + ], + "outputs": [ + { + "description": "Same as the input", + "name": "RESERVOIR" + }, + { + "description": "Same as the input", + "name": "NUM_VISITED" + }, + { + "description": "(Optional) Same as the input", + "name": "OBJECT_TO_POS_MAP" + }, + { + "description": "(Optional) Same as the input", + "name": "POS_TO_OBJECT" + } + ], + "support_level": "default" + } + }, + { + "name": "BatchBoxCox", + "schema": { + "description": "\nInput `data` is a N * D matrix. Apply box-cox transform for each column.\n`lambda1` and `lambda2` is of size D that defines the hyper-parameters for\nthe transform of each column `x` of the input `data`:\n\n ln(x + lambda2), if lambda1 == 0\n ((x + lambda2)^lambda1 - 1)/lambda1, if lambda1 != 0\n\n", + "inputs": [ + { + "description": "input float or double N * D matrix", + "name": "data" + }, + { + "description": "tensor of size D with the same type as data", + "name": "lambda1" + }, + { + "description": "tensor of size D with the same type as data", + "name": "lambda2" + } + ], + "outputs": [ + { + "description": "output matrix that applied box-cox transform", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Clip", + "schema": { + "attributes": [ + { + "description": "Minimum value, under which element is replaced by min (default=*numeric_limits::lowest()*).", + "name": "min", + "option": "optional", + "type": "float32" + }, + { + "description": "Maximum value, under which element is replaced by max (default=*numeric_limits::max()*).", + "name": "max", + "option": "optional", + "type": "float32" + } + ], + "description": "\nThis operator limits the given input within an interval. The interval is\nspecified by the `min` and `max` arguments. They default to\n*numeric_limits::lowest()* and *numeric_limits::max()* respectively. The\nclipping operation can be done in an in-place fashion by using the same output\nblob as the input blob.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/clip_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Clip\",\n [\"X\"],\n [\"Y\"],\n min=20.0,\n max=60.0\n\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(100, size=(5,5))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\nX: [[45. 16. 59. 99. 48.]\n [12. 44. 46. 82. 28.]\n [ 1. 91. 18. 9. 71.]\n [24. 37. 61. 12. 81.]\n [36. 38. 30. 84. 40.]]\nY: [[45. 20. 59. 60. 48.]\n [20. 44. 46. 60. 28.]\n [20. 60. 20. 20. 60.]\n [24. 37. 60. 20. 60.]\n [36. 38. 30. 60. 40.]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(Tensor``)* Input tensor within range [*numeric_limits::lowest()*, *numeric_limits::max()*].", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(Tensor``)* Output tensor clipped within range [`min`, `max`].", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "FeedBlob", + "schema": { + "attributes": [ + { + "description": "(string) if provided then we will use this string as the value for theprovided output tensor", + "name": "value", + "option": "optional" + } + ], + "description": "\nFeedBlobs the content of the blobs. The input and output blobs should be\none-to-one inplace.", + "support_level": "default" + } + }, + { + "name": "CreateBlobsQueue", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "BisectPercentile", + "schema": { + "attributes": [ + { + "description": "1D tensor, which is the concatenation of all sorted raw feature values for all features.", + "name": "percentile_raw", + "option": "optional" + }, + { + "description": "1D tensor. There is one-one mapping between percentile_mapping and percentile_raw such that each element in percentile_mapping corresponds to the percentile value of the corresponding raw feature value.", + "name": "percentile_mapping", + "option": "optional" + }, + { + "description": "1D tensor. There is one-one mapping between percentile_upper and percentile_raw such that each element in percentile_mapping corresponds to the percentile lower bound of the corresponding raw feature value.", + "name": "percentile_lower", + "option": "optional" + }, + { + "description": "1D tensor. There is one-one mapping between percentile_upper and percentile_raw such that each element in percentile_mapping corresponds to the percentile upper bound of the corresponding raw feature value.", + "name": "percentile_upper", + "option": "optional" + }, + { + "description": "1D tensor. There is one-one mapping between percentile_upper and percentile_raw such that each element in percentile_mapping corresponds to the percentile upper bound of the corresponding raw feature value.", + "name": "lengths", + "option": "optional" + } + ], + "description": "\n This operator is to map raw feature values into the percentile\n representations based on Bisection for more than one feature.\n\n The input is the bath of input feature values, with the size of (batch_size,\n num_feature), where num_feature = F (F >= 1).\n\n For each feature, we also need additional information regarding the feature\n value distribution.\n There are several vectors to keep data to percentile mappping information\n as arguments (context):\n 1. feature raw values (R)\n 2. feature percentile mapping (P)\n 3. feature percentile lower bound (L)\n 4. feature percentile upper bound (U)\n\n A toy example:\n Suppose the sampled data distribution is as follows:\n 1, 1, 2, 2, 2, 2, 2, 2, 3, 4\n We have the mapping vectors as follows:\n R = [1, 2, 3, 4]\n P = [0.15, 0.55, 0.9, 1.0]\n L = [0.1, 0.3, 0.9, 1.0]\n U = [0.2, 0.8, 0.9, 1.0]\n Where P is computed as (L + U) / 2.\n\n For a given list of feature values, X = [x_0, x_1, ..., x_i, ...], for each\n feature value (x_i) we first apply bisection to find the right index (t),\n such that R[t] <= x_i < R[t+1].\n If x_i = R[t], P[t] is returned;\n otherwise, the interpolation is apply by (R[t], R[t+1]) and (U[t] and L[t]).\n\n As there are F features (F >= 1), we concate all the R_f, P_f, L_f, and\n U_f for each feature f and use an additional input length to keep track of\n the number of points for each set of raw feature value to percentile mapping.\n For example, there are two features:\n R_1 =[0.1, 0.4, 0.5];\n R_2 = [0.3, 1.2];\n We will build R = [0.1, 0.4, 0.5, 0.3, 1.2]; besides, we have\n lengths = [3, 2]\n to indicate the boundaries of the percentile information.\n\n", + "inputs": [ + { + "description": "Input 2D tensor of floats of size (N, D), where N is the batch size and D is the feature dimension.", + "name": "raw_values" + } + ], + "outputs": [ + { + "description": "2D tensor of output with the same dimensions as the input raw_values.", + "name": "percentile" + } + ], + "support_level": "default" + } + }, + { + "name": "ReversePackedSegs", + "schema": { + "description": "\nReverse segments in a 3-D tensor (lengths, segments, embeddings,), leaving\npaddings unchanged. This operator is used to reverse input of a recurrent neural\nnetwork to make it a BRNN.\n ", + "inputs": [ + { + "description": "a 3-D (lengths, segments, embeddings,) tensor.", + "name": "data" + }, + { + "description": "length of each segment.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "a (lengths, segments, embeddings,) tensor with each segment reversedand paddings unchanged.", + "name": "reversed data" + } + ], + "support_level": "default" + } + }, + { + "name": "CreateScope", + "schema": { + "description": "\n'CreateScope' operator initializes and outputs empty scope that is used\nby Do operator to store local blobs\n ", + "support_level": "default" + } + }, + { + "name": "SpatialSoftmaxWithLossGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "StoreAdd", + "schema": { + "attributes": [ + { + "description": "key of the counter (required)", + "name": "blob_name", + "option": "optional" + }, + { + "description": "value that is added (optional, default: 1)", + "name": "add_value", + "option": "optional" + } + ], + "description": "\nAdd a value to a remote counter. If the key is not set, the store\ninitializes it to 0 and then performs the add operation. The operation\nreturns the resulting counter value.\n", + "inputs": [ + { + "description": "unique_ptr", + "name": "handler" + } + ], + "outputs": [ + { + "description": "the current value of the counter", + "name": "value" + } + ], + "support_level": "default" + } + }, + { + "name": "MergeSingleListFeatureTensorsGradient", + "schema": { + "description": "Explode multi-feature tensors with list features into single-feature tensors.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".presence", + "name": "in1_presence" + }, + { + "description": ".values.values_grad", + "name": "out_values_values" + } + ], + "outputs": [ + { + "description": ".values_grad", + "name": "out1_values" + } + ], + "support_level": "default" + } + }, + { + "name": "DistributeFpnProposals", + "schema": { + "attributes": [ + { + "description": "(int) ROI_CANONICAL_SCALE", + "name": "roi_canonical_scale", + "option": "optional" + }, + { + "description": "(int) ROI_CANONICAL_LEVEL", + "name": "roi_canonical_level", + "option": "optional" + }, + { + "description": "(int) ROI_MAX_LEVEL", + "name": "roi_max_level", + "option": "optional" + }, + { + "description": "(int) ROI_MIN_LEVEL", + "name": "roi_min_level", + "option": "optional" + } + ], + "description": "\n...\n", + "inputs": [ + { + "description": "Top proposals limited to rpn_post_nms_topN total, format (image_index, x1, y1, x2, y2)", + "name": "rois" + } + ], + "outputs": [ + { + "description": "RPN proposals for ROI level 2, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn2" + }, + { + "description": "RPN proposals for ROI level 3, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn3" + }, + { + "description": "RPN proposals for ROI level 4, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn4" + }, + { + "description": "RPN proposals for ROI level 5, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn5" + }, + { + "description": "Permutation on the concatenation of all rois_fpni, i=min...max, such that when applied the RPN RoIs are restored to their original order in the input blobs.", + "name": "rois_idx_restore" + } + ], + "support_level": "default" + } + }, + { + "name": "CollectRpnProposals", + "schema": { + "attributes": [ + { + "description": "(int) RPN_MAX_LEVEL", + "name": "rpn_max_level", + "option": "optional" + }, + { + "description": "(int) RPN_MIN_LEVEL", + "name": "rpn_min_level", + "option": "optional" + }, + { + "description": "(int) RPN_POST_NMS_TOP_N", + "name": "rpn_post_nms_topN", + "option": "optional" + } + ], + "description": "\n...\n", + "inputs": [ + { + "description": "RPN proposals for FPN level 2, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn2" + }, + { + "description": "RPN proposals for FPN level 3, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn3" + }, + { + "description": "RPN proposals for FPN level 4, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn4" + }, + { + "description": "RPN proposals for FPN level 5, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn5" + }, + { + "description": "RPN proposals for FPN level 6, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn6" + }, + { + "description": "RPN objectness probabilities for FPN level 2. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn2" + }, + { + "description": "RPN objectness probabilities for FPN level 3. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn3" + }, + { + "description": "RPN objectness probabilities for FPN level 4. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn4" + }, + { + "description": "RPN objectness probabilities for FPN level 5. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn5" + }, + { + "description": "RPN objectness probabilities for FPN level 6. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn6" + } + ], + "outputs": [ + { + "description": "Top proposals limited to rpn_post_nms_topN total, format (image_index, x1, y1, x2, y2)", + "name": "rois" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsMaxWithMainInputAndForwardOutputGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "MomentumSGDUpdate", + "schema": { + "description": "\n\nPerforms a momentum SGD update for an input gradient and momentum\nparameters. Concretely, given inputs (grad, m, lr, param) and arguments\n(momentum, nesterov), computes:\n\n if not nesterov:\n adjusted_gradient = lr * grad + momentum * m\n param = param - adjusted_gradient\n return (adjusted_gradient, adjusted_gradient, param)\n else:\n m_new = momentum * m + lr * grad\n param = param - ((1 + momentum) * m_new - momentum * m),\n return ((1 + momentum) * m_new - momentum * m, m_new, param)\n\nOutput is (grad, momentum, parameter).\n\nNote the difference to MomentumSGD, which returns a new gradient\nbut does not perform the parameter update.\n\n", + "support_level": "default" + } + }, + { + "name": "SparseLengthsIndicesInGradientWeightedSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Pow", + "schema": { + "attributes": [ + { + "description": "The exponent of the power function. Do not use if setting exponent via input.", + "name": "exponent", + "option": "optional" + }, + { + "default": -1, + "description": "", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "", + "name": "broadcast", + "option": "optional", + "type": "boolean" + } + ], + "description": "\nThe *Pow* op takes an input data tensor $X$ and an exponent parameter *exponent*, which can be a scalar or another tensor. As output, it produces a single output data tensor $Y$, where the function $f(x) = x^{exponent}$ has been applied to $X$ elementwise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pow_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pow_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Pow\",\n [\"X\", \"exponent\"],\n [\"Y\"],\n broadcast=1\n)\n\nworkspace.FeedBlob(\"X\", np.array([1,2,3,4,5,6]).astype(np.float32))\nprint(\"X: \", workspace.FetchBlob(\"X\"))\n\nworkspace.FeedBlob(\"exponent\", np.array([2]).astype(np.float32))\nprint(\"exponent: \", workspace.FetchBlob(\"exponent\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y: \", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [1. 2. 3. 4. 5. 6.]\nexponent: [2.]\nY: [ 1. 4. 9. 16. 25. 36.]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input data blob to be operated on.", + "name": "X" + }, + { + "description": "Exponent blob containing the exponent(s) for calculation. Do not use if setting exponent via argument.", + "name": "exponent" + } + ], + "outputs": [ + { + "description": "Output data blob with the same shape as the input.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Cube", + "schema": { + "description": null, + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor calculated as the cube of the input tensor, element-wise.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "AveragePool2D", + "schema": { + "description": "AveragePool2D \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "BitwiseAnd", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise bitwise operation `bitwise_and` (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Output tensor. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "Mean", + "schema": { + "description": "\nElement-wise mean of an arbitrary number of input tensors. This operation can be\nperformed in-place, by using the first input blob as the output blob. All inputs\nmust have the same shape and data type, and the output will have the same shape\nas the inputs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/mean_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Mean\",\n [\"X\", \"Y\", \"Z\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Z\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"Z:\", workspace.FetchBlob(\"Z\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Mean:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[0.6035237 0.5305746 0.6298913 ]\n [0.9169737 0.01280353 0.16286302]\n [0.6017664 0.9946255 0.05128575]]\nY:\n[[0.07544111 0.45371833 0.08460239]\n [0.9708728 0.7422064 0.7933344 ]\n [0.97671497 0.3411384 0.73818344]]\nZ:\n[[0.08837954 0.90187573 0.46734726]\n [0.6308827 0.8719029 0.39888734]\n [0.90059936 0.92883426 0.5695987 ]]\nMean:\n[[0.25578147 0.6287229 0.39394698]\n [0.8395764 0.5423043 0.45169494]\n [0.8263602 0.75486606 0.45302266]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* List of input tensors with the same shape.", + "name": "X, Y, ..." + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with the same dimensions as inputs. Contains the mean values of the input tensors calculated element-wise.", + "name": "M" + } + ], + "support_level": "default" + } + }, + { + "name": "And", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise logical operation **and** (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"And\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", (np.random.rand(3, 3) > 0.5))\nworkspace.FeedBlob(\"B\", (np.random.rand(3, 3) > 0.5))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n [[ True False False]\n [False True False]\n [False False True]]\nB:\n [[ True False True]\n [False False False]\n [False False False]]\nC:\n [[ True False False]\n [False False False]\n [False False False]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor of booleans. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "GivenTensorFill", + "schema": { + "attributes": [ + { + "description": "The value of the elements to go in the *output* tensor.", + "name": "values" + }, + { + "description": "The data type for the elements of the output tensor. Strictly must be one of the types from DataType enum in TensorProto.", + "name": "dtype", + "option": "optional" + }, + { + "description": "Desired shape of the *output* tensor.", + "name": "shape", + "option": "optional", + "type": "int64[]" + }, + { + "description": "The additional dimensions appended at the end of the *shape* indicated by the input blob. Cannot set the *extra_shape* argument when there is no input blob.", + "name": "extra_shape", + "option": "optional", + "type": "int64[]" + }, + { + "default": false, + "description": "set to *True* to use the *input* as shape. First, input must be in CPU context.", + "name": "input_as_shape", + "option": "optional", + "type": "boolean" + } + ], + "description": "\nThis op fills an output tensor with the data specified by the *value* and *dtype* arguments. The output tensor shape is specified by the *shape* argument. Beware, when using this argument *value* should have a value for every element of the *output*, as missing values will not be initialized automatically. If *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set.\n\n*Note: Do not set the shape argument and pass in an input at the same time.*\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/given_tensor_fill_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/given_tensor_fill_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GivenTensorFill\",\n [],\n [\"out\"],\n values=[1., 2., 3.],\n shape=[3],\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Out:\\n\", workspace.FetchBlob(\"out\"))\n\n```\n\n**Result**\n\n```\n\nOut:\n [1. 2. 3.]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(Optional) 1D tensor specifying the shape of the output. Must be used with *input_as_shape=True*", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output tensor with desired dimension filled with specified data. If the shape argument is set, this is the shape specified, and if the *input* exists and *input_as_shape=True*, it is the shape specified by the *input* tensor.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "GeluGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LayerNorm", + "schema": { + "attributes": [ + { + "description": "(int) default to 1; Describes axis of the inputs. Defaults to one because the 0th axis most likely describes the batch size", + "name": "axis", + "option": "optional" + }, + { + "description": "(float) default to 0.001. Small value to be added to the stdev when dividing out by that value. This prevents division by zero.", + "name": "epsilon", + "option": "optional" + }, + { + "description": "(bool) default to False; If true, this op will do affine transformation after normalization.", + "name": "elementwise_affine", + "option": "optional" + } + ], + "description": "\nComputes layer normalization as described in https://arxiv.org/pdf/1607.06450.pdf.\nGiven an input vector x \\in [a_0, a_1, ...,a_{k-1}, a_k, ..., a_{n-1}],\nthis op treats dimensions a_k through a_{n-1} as feature vectors. For each\nfeature vector, the op contains the mean and standard deviation. Then,\nit returns the normalized values (with respect to the feature vector).\n\nNote that this op does not contain the scale an bias terms described in the\npaper. Simply follow this op with an FC op to add those. Concretely, this op\nimplements:\n\nh = \\frac{1}{\\sigma}(a - \\mu)\nwhere \\mu = \\frac{1}{H}\\sum_{i=1}^{H} a_i\nand \\sigma = \\sqrt{\\frac{1}{H}\\sum_{i=1}^{H}(a_i - \\mu)^2}\nwhere H is the number of hidden units (i.e. product of dimensions from 'axis'\nto the end.)\n", + "inputs": [ + { + "description": "Input tensor which layer normalization will be applied to", + "name": "input" + }, + { + "description": "scale tensor for elementwise_affine, the shape should be the same as the dimensions of X begin from axis", + "name": "gamma" + }, + { + "description": "bias tensor for elementwise_affine, the shape should be the same as the dimensions of X begin from axis", + "name": "beta" + } + ], + "outputs": [ + { + "description": "Normalized values", + "name": "output" + }, + { + "description": "Mean values for each feature vector", + "name": "mean" + }, + { + "description": "Standard deviations for each feature vector", + "name": "stddev" + } + ], + "support_level": "default" + } + }, + { + "name": "SortedSegmentWeightedSum", + "schema": { + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "description": "\nApplies 'WeightedSum' to each segment of input tensor. Segments need to be sorted and\ncontiguous. See also UnsortedSegmentWeightedSum that doesn't have this requirement.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "BernoulliJSD", + "schema": { + "description": "\nComputes the Jensen-Shannon divergence (JSD) between two Bernoulli distributions\nwhere each is parametrized by a single probability.\n", + "inputs": [ + { + "description": "array of probabilities for target", + "name": "T" + } + ], + "outputs": [ + { + "description": "array of JSD losses", + "name": "L" + } + ], + "support_level": "default" + } + }, + { + "name": "Cosh", + "schema": { + "description": "\nCalculates the hyperbolic cosine of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/cosh_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Cosh\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [0.66423494 0.32074615 0.81523746 0.90423071 0.39275789]\nY: [1.22883528 1.05188156 1.35112322 1.43744212 1.07812598]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The hyperbolic cosine values of the input tensor, computed element-wise", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceFrontWeightedSum", + "schema": { + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "description": "\nReduces the input tensor along the first dimension of the input tensor by\napplying 'WeightedSum'. This op acts in a similar way to SortedSegmentWeightedSum and\nUnsortedSegmentWeightedSum but as if all input slices belong to a single segment.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "UnpackRNNSequence", + "schema": { + "description": "\nThis is the reverse operator for PackRNNSequence. It maps the packed values\nback to sequence values based on the length blob. Each number from length blob\nrepresents the corresponding values that has been grouped. The dimension\nfor each pack is the same as the maximum number from the length blob (padding\nwith zero was implemented for smaller length value). The overall output\ndimension is: M * D, where M is the sum of lengths, and D is the dimension of\neach feature value. The following example shows the input and output of\nthis operator:\n\n\nGiven:\n values = [\n [v1, v3, v6, v7],\n [v2, v4, 0, v8],\n [0, v5, 0, 0 ],\n ]\n lengths = [2, 3, 1, 2]\n\n\nOutput:\n output = [v1, v2, v3, v4, v5, v6, v7, v8];\n\n\nOne application for this operator is the transfer data from the format of RNN\nback to sequence values. Note that the gradient operator of\nUnpackRNNSequence is PackRNNSequence.\n", + "inputs": [ + { + "description": "Data tensor, contains the packed features", + "name": "values" + }, + { + "description": "lengths with each number representing the pack size.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Output tensor before packing", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "ScaleBlobs", + "schema": { + "attributes": [ + { + "description": "(float, default 1.0) the scale to apply.", + "name": "scale", + "option": "optional" + } + ], + "description": "\nScaleBlobs takes one or more input data (Tensor) and produces one\nor more output data (Tensor) whose value is the input data tensor\nscaled element-wise.\n", + "support_level": "default" + } + }, + { + "name": "SafeEnqueueBlobs", + "schema": { + "description": "\nEnqueue the blobs into queue. When the queue is closed and full, the output\nstatus will be set to true which can be used as exit criteria for execution\nstep.\nThe 1st input is the queue and the last output is the status. The rest are\ndata blobs.\n", + "inputs": [ + { + "description": "The shared pointer for the BlobsQueue", + "name": "queue" + } + ], + "support_level": "default" + } + }, + { + "name": "UnsortedSegmentSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Alias", + "schema": { + "description": "\nMakes the output and the input share the same underlying storage.\n\nWARNING: in general, in caffe2's operator interface different tensors should\nhave different underlying storage, which is the assumption made by\ncomponents such as the dependency engine and memory optimization. Thus, in\nnormal situations you should not use the AliasOp, especially in a normal\nforward-backward pass.\n\nThe Alias op is provided so one can achieve true asynchrony, such as\nHogwild, in a graph. But make sure you understand all the implications\nsimilar to multi-thread computation before you use it explicitly.\n", + "inputs": [ + { + "description": "Input tensor whose storage will be shared.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Tensor of same shape as input, sharing its storage.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "ScatterWeightedSum", + "schema": { + "description": "\nSimilar to WeightedSum, computes the weighted sum of several tensors, with\nthe difference that inputs are sliced tensors. The first tensor has to be\nin-place and only slices of it on the first dimension as indexed by INDICES\nwill be updated.\n\nNote: The op pretty much ignores the exact shapes of the input arguments and\ncares only about sizes. It's done for performance consideration to avoid\nunnecessary reshapes. Only first dimension of X_0 is important, let's call it\nN. If M is the total size of X_0 and K is the size of INDICES then X_i is\nassumed to be of shape K x (M / N) regardless of the real shape.\n\nNote: Each update in INDICES is applied independently which means that if\nduplicated elements are present in INDICES the corresponding slice of X_0\nwill be scaled multiple times. Manual collapsing of INDICES is required\nbeforehand if necessary.\n\nNote: Updates are applied sequentially by inputs which might have undesired\nconsequences if the input tensor is accessed concurrently by different op\n(e.g. when doing Hogwild). Other threads might see intermediate results even\non individual slice level, e.g. X_0 scaled by weight_0 but without any\nupdates applied.\n\nCurrently only works on CPU because of access to INDICES.\n", + "inputs": [ + { + "description": "Tensor to be updated.", + "name": "X_0" + }, + { + "description": "Scalar weight for X_0, applied only to slices affected.", + "name": "Weight_0" + }, + { + "description": "1-D list of indices on the first dimension of X_0 that need to be updated", + "name": "INDICES" + }, + { + "description": "Update slices, with shape len(INDICES) + shape(X_0)[1:]", + "name": "X_1" + }, + { + "description": "Scalar weight for X_1 update", + "name": "Weight_1" + } + ], + "outputs": [ + { + "description": "Has to be exactly the same tensor as the input 0", + "name": "X_0" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8Flatten", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "(Default to 1) Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output", + "name": "axis", + "option": "optional" + } + ], + "description": "\nFlattens the input tensor into a 2D matrix. If input tensor has shape\n(d_0, d_1, ... d_n) then the output will have shape\n(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn)\n", + "inputs": [ + { + "description": "A Int8 tensor of rank >= axis.", + "name": "input" + } + ], + "outputs": [ + { + "description": "A 2D Int8 tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "LRNGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "MaxPool2D", + "schema": { + "description": "MaxPool2D \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Softplus", + "schema": { + "description": "\nSoftplus takes one input data tensor $X$ and produces one output data tensor $Y,$ where the softplus function, $y = ln(e^x + 1)$, is applied to $X$ elementwise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softplus_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softplus_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Softplus\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-0.5380011 0.65190786 0.55673236]\n [-0.16272168 0.5451048 0.30880353]\n [-0.76606876 -0.6238556 -0.40444514]]\n\nY:\n [[0.4598992 1.0713093 1.0097669 ]\n [0.61509246 1.0023911 0.8594219 ]\n [0.38174385 0.42909983 0.5112337 ]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input data blob to be operated on.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data blob with same shape as input.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceL2", + "schema": { + "attributes": [ + { + "description": "(*Tuple(int)*): list of axes to reduce", + "name": "axes", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to keep the reduced dimension(s) (default=1), else set to 0 to not keep the reduced dimension(s)", + "name": "keepdims", + "option": "optional" + } + ], + "description": "\nComputes the **L2 norm** of the input tensor's elements along the provided `axes`. The resulting tensor has the same rank as the input if the `keepdims` argument equals 1 (default). If `keepdims` is set to 0, then the `axes` dimensions are pruned.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceL2\",\n [\"X\"],\n [\"Y\"],\n axes=(0,1),\n keepdims=0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,5,5)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[ 8. 0. 2. 5. 1.]\n [ 1. 3. 0. 4. 0.]\n [ 1. 3. 6. 7. 7.]\n [ 6. 9. 8. 4. 6.]\n [ 6. 1. 5. 7. 3.]]\n\n [[ 2. 4. 6. 2. 8.]\n [ 1. 1. 8. 0. 8.]\n [ 5. 9. 0. 3. 2.]\n [ 1. 7. 3. 7. 3.]\n [ 6. 8. 9. 8. 7.]]]]\n\nY:\n[[ 8.24621105 4. 6.3245554 5.38516474 8.06225777]\n [ 1.41421354 3.1622777 8. 4. 8. ]\n [ 5.09901953 9.48683262 6. 7.6157732 7.28010988]\n [ 6.08276272 11.40175438 8.54400349 8.06225777 6.70820379]\n [ 8.48528099 8.06225777 10.29563046 10.63014603 7.6157732 ]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "WallClockTime", + "schema": { + "description": "Time since epoch in nanoseconds.", + "outputs": [ + { + "description": "The time in nanoseconds.", + "name": "time" + } + ], + "support_level": "default" + } + }, + { + "name": "Cos", + "schema": { + "description": "\nCalculates the cosine of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/cos_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Cos\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [0.6816719 0.76771533 0.933932 0.01404487 0.11862425]\nY: [0.7765203 0.71949923 0.5946774 0.99990135 0.9929724 ]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor calculated as the cosine of the input tensor, element-wise.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "AveragePool1D", + "schema": { + "description": "AveragePool1D \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Col2Im", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseLengthsPositionalWeightedSum", + "schema": { + "description": "\nVariation of SparseLengthsWeightedSum operator, where, for each row,\nweights are accessed by indices [0..L-1], where L is the length of given row.\nThis is basically a fused operator of LengthsRangeFill + Gather +\nSparseWeightedSum\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToRowwiseQuantized8Bits", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the length of DATA", + "name": "WEIGHT" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Size", + "schema": { + "description": "\nReturn a 1D tensor of type *int64* that contains the number of elements of the input tensor.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Size\",\n [\"X\"],\n [\"size\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(3,3))))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"size:\", workspace.FetchBlob(\"size\"))\n\nworkspace.ResetWorkspace()\n\nworkspace.FeedBlob(\"X\", (np.random.rand(6,4)))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"size:\", workspace.FetchBlob(\"size\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[3 7 0]\n [0 1 6]\n [5 0 8]]\nsize: 9\nX:\n[[0.92017884 0.32115368 0.68692035 0.64135016]\n [0.8723328 0.77830265 0.80688656 0.25524236]\n [0.37970216 0.76407047 0.85689564 0.30692883]\n [0.69352573 0.42531502 0.16415212 0.59209324]\n [0.52684188 0.37094846 0.60670079 0.6489272 ]\n [0.94715906 0.34800557 0.61898769 0.28947359]]\nsize: 24\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor to calculate number of elements.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* 1D tensor of type int64 that contains the number of elements in the input tensor *X*.", + "name": "size" + } + ], + "support_level": "default" + } + }, + { + "name": "Fused8BitRowwiseQuantizedToFloat", + "schema": { + "description": "\nDe-quantizes the result of the\nFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 32-bit float in the second to the last 4 bytes of each\nrow, followed by the bias as a 32-bit float in the next 4 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_output" + } + ], + "support_level": "default" + } + }, + { + "name": "NanCheck", + "schema": { + "description": "Identity operator, but checks all values for nan or inf", + "inputs": [ + { + "description": "Tensor to check for nan/inf", + "name": "tensor" + } + ], + "outputs": [ + { + "description": "Tensor to copy input into if no NaNs or inf. Can be in-place", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "CbrtGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "RowWiseSparseAdagrad", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\nGiven inputs (param, moment, indices, grad, lr), runs a modified sparse Adagrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_momwnr), where moment is a 1D tensor with length equal to the number of\nrows in param: shape(moment) == shape(param)[0]. Each element of moment is\napplied to an entire row of param, and the new moment is calculated by adding\nthe average squared sum of gradients across each row. Note that indices must\nalso be a 1D tensor indexing into the rows of param.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment_1" + } + ], + "support_level": "default" + } + }, + { + "name": "GivenTensorDoubleFill", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "LengthsToRanges", + "schema": { + "description": "\nGiven a vector of segment lengths, calculates offsets of each segment and packs\nthem next to the lengths. For the input vector of length N the output is a Nx2\nmatrix with (offset, lengths) packaged for each segment.\n\nFor example, `[1, 3, 0, 2]` transforms into `[[0, 1], [1, 3], [4, 0], [4, 2]]`.\n", + "inputs": [ + { + "description": "1D tensor of int32 segment lengths.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "2D tensor of shape len(lengths) X 2 and the same type as `lengths`", + "name": "ranges" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseSortedSegmentWeightedSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CosineEmbeddingCriterion", + "schema": { + "description": "\nCosineEmbeddingCriterion takes two inputs: the similarity value and\nthe label, and computes the elementwise criterion output as\n\n output = 1 - s, if y == 1\n max(0, s - margin), if y == -1\n", + "inputs": [ + { + "description": "The cosine similarity as a 1-dim TensorCPU.", + "name": "S" + }, + { + "description": "The label as a 1-dim TensorCPU with int value of 1 or -1.", + "name": "Y" + } + ], + "outputs": [ + { + "description": "The output loss with the same dimensionality as S.", + "name": "loss" + } + ], + "support_level": "default" + } + }, + { + "name": "IsEmpty", + "schema": { + "description": "\nThe *IsEmpty* op accepts a single input $tensor$, and produces a single boolean output $is\\_empty$. The output is *True* if and only if $tensor$ has size == 0.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"IsEmpty\",\n [\"tensor\"],\n [\"is_empty\"],\n)\n\n// Use a not-empty tensor\nworkspace.FeedBlob(\"tensor\", np.random.randn(2, 2).astype(np.float32))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"is_empty: \", workspace.FetchBlob(\"is_empty\"),\"\\n\")\n\n// Use an empty tensor\nworkspace.FeedBlob(\"tensor\", np.empty(0))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"is_empty: \", workspace.FetchBlob(\"is_empty\"))\n\n```\n\n**Result**\n\n```\n\ntensor:\n [[ 0.26018378 0.6778789 ]\n [-1.3097627 -0.40083608]]\nis_empty: False\n\ntensor:\n []\nis_empty: True\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input data tensor to check if empty.", + "name": "tensor" + } + ], + "outputs": [ + { + "description": "Output scalar boolean tensor. True if input has size == 0.", + "name": "is_empty" + } + ], + "support_level": "default" + } + }, + { + "name": "KeySplit", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LC1D", + "schema": { + "description": "\nThe locally connected operator consumes an input vector, a 1D filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n", + "inputs": [ + { + "description": null, + "name": null + }, + { + "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "GenerateProposalsCPP", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "XavierFill", + "schema": { + "attributes": [ + { + "description": "Desired shape of the *output* tensor.", + "name": "shape", + "option": "optional", + "type": "int64[]" + }, + { + "description": "The additional dimensions appended at the end of the *shape* indicated by the input blob. Cannot set the *extra_shape* argument when there is no input blob.", + "name": "extra_shape", + "option": "optional", + "type": "int64[]" + }, + { + "default": false, + "description": "set to *True* to use the *input* as shape. First, input must be in CPU context.", + "name": "input_as_shape", + "option": "optional", + "type": "boolean" + } + ], + "description": "\nThis op fills an output tensor with values sampled from a uniform distribution with the range determined by the desired shape of the output. Rather, than specifying the range of values manually, the novelty of Xavier Fill is that it automatically scales the range of the distribution it draws from based on the size of the desired output tensor. For more information check out the paper [Understanding the difficulty of training deep feedforward neural networks](http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf). The output tensor shape is specified by the *shape* argument. However, if *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set.\n\n*Note: Do not set the shape argument and pass in an input at the same time.*\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"XavierFill\",\n [],\n [\"out\"],\n shape=[3,3],\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Out:\\n\", workspace.FetchBlob(\"out\"))\n\n```\n\n**Result**\n\n```\n\nOut:\n [[-0.8412168 0.33207083 -0.88418937]\n [ 0.43059897 -0.8340702 0.07781601]\n [ 0.93261135 -0.24542928 -0.3980782 ]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(Optional) 1D tensor specifying the shape of the output. Must be used with *input_as_shape=True*", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output tensor of random values drawn from an automatically scaled uniform distribution, based on the size of the output tensor. If the shape argument is set, this is the shape specified by the shape argument, and if the *input* exists and *input_as_shape=True*, it is the shape specified by the *input* tensor.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "QuantDecode", + "schema": { + "description": "\nDecode inputs using codebook. This is a general LUT operator that returns\ntensors with values from codebook (input 0) based on given indices in\ncodes (input 1 ~ n).\n\n\nExample:\n\n\nInput:\n codebook = [1.5, 2.5, 3.5]\n codes_0 = [0, 1, 1, 2]\n codes_1 = [2, 0, 0]\n\n\nOutput:\n decoded_0 = [1.5, 2.5, 2.5, 3.5]\n decoded_1 = [3.5, 1.5, 1.5]\n", + "inputs": [ + { + "description": "Codebook in 1d tensor (float)", + "name": "codebook" + }, + { + "description": "Encoded codes 0 (uint8/uint16/int32)", + "name": "codes_0" + }, + { + "description": "Encoded codes 1 if existed (uint8/uint16/int32)", + "name": "codes_1" + }, + { + "description": "Encoded codes n if existed (uint8/uint16/int32)", + "name": "codes_n" + } + ], + "outputs": [ + { + "description": "Decoded tensor for codes_0 (float)", + "name": "decoded_0" + }, + { + "description": "Decoded tensor for codes_1 (float)", + "name": "decoded_1" + }, + { + "description": "Decoded tensor for codes_n (float)", + "name": "decoded_n" + } + ], + "support_level": "default" + } + }, + { + "name": "ElementwiseLinearGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "TimerGetAndEnd", + "schema": { + "description": "\nQueries the current time of a timer in nanos, stops the timer publishing a CAFFE_EVENT.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\ntimerbegin_op = core.CreateOperator(\n \"TimerBegin\",\n [],\n [\"timer\"]\n)\n\ntimerget_op = core.CreateOperator(\n \"TimerGet\",\n [\"timer\"],\n [\"nanos\"]\n)\n\ntimerend_op = core.CreateOperator(\n \"TimerEnd\",\n [\"timer\"],\n []\n)\n\ntimergetandend_op = core.CreateOperator(\n \"TimerGetAndEnd\",\n [\"timer\"],\n [\"nanos\"]\n)\n\n// Test TimerBegin/TimerGet/TimerEnd\nworkspace.RunOperatorOnce(timerbegin_op)\nprint(\"timer:\", workspace.FetchBlob(\"timer\"))\nworkspace.RunOperatorOnce(timerget_op)\nprint(\"nanos:\", workspace.FetchBlob(\"nanos\"))\nworkspace.RunOperatorOnce(timerend_op)\n\n\n// Test TimerBegin/TimerGetAndEnd\nworkspace.RunOperatorOnce(timerbegin_op)\nprint(\"timer:\", workspace.FetchBlob(\"timer\"))\nworkspace.RunOperatorOnce(timergetandend_op)\nprint(\"nanos:\", workspace.FetchBlob(\"nanos\"))\n\n```\n\n**Result**\n\n```\n\ntimer: b'timer, a C++ native class of type caffe2::TimerInstance*.'\nnanos: 361140\ntimer: b'timer, a C++ native class of type caffe2::TimerInstance*.'\nnanos: [252250]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): pointer to a timer object; obtained from **TimerBegin** op", + "name": "timer" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): scalar tensor containing time in nanoseconds", + "name": "nanos" + } + ], + "support_level": "default" + } + }, + { + "name": "DivGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ReduceMaxGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Do", + "schema": { + "attributes": [ + { + "description": "Subnet with blob bindings", + "name": "net", + "option": "optional" + }, + { + "description": "List of inner net blob names to bind to outer workspace", + "name": "inner_blobs", + "option": "optional" + }, + { + "description": "Indices of corresponding outer workspace blobs, in order: operator inputs, operator outputs (skipping workspace blobs)", + "name": "outer_blobs_idx", + "option": "optional" + }, + { + "description": "List of blobs from the forward Do operator workspace needed in backward pass, used in gradient Do operator", + "name": "saved_fwd_blobs", + "option": "optional" + }, + { + "description": "Whether to reuse workspace or create a new one in a given scope", + "name": "reuse_workspace", + "option": "optional" + } + ], + "description": "\n'Do' control operator, executes a subnet in a separate workspace.\nLast blobs in the input and output lists should be the same blob created with\nCreateScope op. Arguments 'inner_blobs' and 'outer_blobs_idx' provide a mapping\nbetween selected inner blob names and corresponding outer blob indices.\n ", + "support_level": "default" + } + }, + { + "name": "DotProductWithPaddingGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "UniqueUniformFill", + "schema": { + "attributes": [ + { + "description": "Minimum value, inclusive", + "name": "min", + "option": "optional" + }, + { + "description": "Maximum value, inclusive", + "name": "max", + "option": "optional" + }, + { + "description": "The data type for the elements of the output tensor.Strictly must be one of the types from DataType enum in TensorProto.This only supports INT32 and INT64 now. If not set, assume INT32", + "name": "dtype", + "option": "optional" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob. Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": "\nFill the output tensor with uniform samples between min and max (inclusive).\nIf the second input is given, its elements will be excluded from uniform\nsampling. Using the second input will require you to provide shape via the first\ninput.\n", + "inputs": [ + { + "description": "Input tensor to provide shape information", + "name": "input" + }, + { + "description": "(optional) Avoid elements in this tensor. Elements must be unique.", + "name": "avoid" + } + ], + "outputs": [ + { + "description": "Output tensor of unique uniform samples", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "LongIndexCreate", + "schema": { + "attributes": [ + { + "description": "Max number of elements, including the zero entry.", + "name": "max_elements", + "option": "optional" + } + ], + "description": "\nCreates a dictionary that maps int64 keys to consecutive integers\nfrom 1 to max_elements. Zero is reserved for unknown keys.\n", + "outputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handler" + } + ], + "support_level": "default" + } + }, + { + "name": "ComputeOffset", + "schema": { + "description": "\nCompute the offsets matrix given cursor and data blobs. Need to be ran at\nbeginning or after reseting cursor\n\nInput(0) is a blob pointing to a TreeCursor, and\n[Input(1),... Input(num_fields)] a list of tensors containing the data for\neach field of the dataset.\n\nComputeOffset is thread safe.\n", + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + }, + { + "description": "First dataset field", + "name": "dataset_field_0" + } + ], + "outputs": [ + { + "description": "Tensor containing offset info for this chunk.", + "name": "field_0" + } + ], + "support_level": "default" + } + }, + { + "name": "ByteWeightDequant", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CopyOnDeviceLike", + "schema": { + "description": "Copy input tensor into output to the specific device.", + "inputs": [ + { + "description": "The input tensor.", + "name": "input" + }, + { + "description": "Tensor, on which device the copy will be performed.", + "name": "dst" + } + ], + "outputs": [ + { + "description": "Tensor that will contain a copy of the input.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "BatchOneHot", + "schema": { + "description": "\nInput is a matrix tensor. Its first dimension is the batch\nsize. Expand each column of it using one hot encoding. The `lengths` specifies\nthe size of each column after encoding, and the `values` is the dictionary value\nof one-hot encoding for each column. For example\n\n If data = [[2, 3], [4, 1], [2, 5]], lengths = [2, 3],\n and values = [2, 4, 1, 3, 5], then\n\n output = [[1, 0, 0, 1, 0], [0, 1, 1, 0, 0], [1, 0, 0, 0, 1]]\n", + "inputs": [ + { + "description": "input tensor matrix", + "name": "data" + }, + { + "description": "the size is the same as the width of the `data`", + "name": "lengths" + }, + { + "description": "one hot encoding dictionary values", + "name": "values" + } + ], + "outputs": [ + { + "description": "output matrix that expands each input column with one hot encoding", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "DropoutGrad", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "MulGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "MarginRankingCriterionGradient", + "schema": { + "description": "\nMarginRankingCriterionGradient takes both X1, X2, Y and dY and\nuses them to update dX1, and dX2 according to the chain rule\nand derivatives of the loss function.\n", + "support_level": "default" + } + }, + { + "name": "CreateMutex", + "schema": { + "description": "Creates an unlocked mutex and returns it in a unique_ptr blob.", + "outputs": [ + { + "description": "Blob containing a std::unique_ptr.", + "name": "mutex_ptr" + } + ], + "support_level": "default" + } + }, + { + "name": "Float16UniformFill", + "schema": { + "attributes": [ + { + "description": "Shape of the tensor", + "name": "shape", + "option": "optional" + }, + { + "description": "Minimim value to generate", + "name": "min", + "option": "optional" + }, + { + "description": "Maximum value to generate", + "name": "max", + "option": "optional" + } + ], + "description": "Fills a half float tensor of a specified shape with values from a uniform distribution[min,max]", + "support_level": "default" + } + }, + { + "name": "SparseAdadelta", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + }, + { + "description": "Default 0.95, the squared gradient sum is decayed by this factor.", + "name": "decay", + "option": "optional" + } + ], + "description": "\n\nGiven inputs (param, moment, moment_delta, indices, grad, lr),\nruns the dense AdaDelta update on (param, grad, moment[indices],\n moment_delta[indices], lr), and returns (new_param, new_moment,\n new_moment_delta) as in the dense case.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Average of squared gradients", + "name": "moment" + }, + { + "description": "Average of squared parameter updates", + "name": "moment_delta" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated average squared gradient", + "name": "output_moment" + }, + { + "description": "Updated average of squared parameter updates", + "name": "output_moment_delta" + } + ], + "support_level": "default" + } + }, + { + "name": "FloatToRowwiseQuantized8Bits", + "schema": { + "description": "\nThis operator applies 8Bit row-wise quantization to\ninput tensor and returns quantized tensor. Row wise quantization of\ninput tensor is the following process. We take tensor of size\n(m_1, m_2,...,m_n), n >= 2, reshape it into matrix of size\n(m_1, m_2 x... x m_n) and apply row-wise quantization. After this,\nwe compute scale_i= (min_i - max_i) / 255 and bias_i = min_i for\ni-th row r_i of reshaped matrix, where min_i and max_i -- minimum\nand maximum elements of i-th row, and quantize each element r_{ij} as\n0 <= round(r_ij - bias_i) / scale_i) < 256. Instead of input tensor\nwe obtain uint8 tensor and auxiliary information as scale and bias to\nrestore input tensor (with losses).\n", + "inputs": [ + { + "description": "input", + "name": "input" + } + ], + "outputs": [ + { + "description": "quantized_input", + "name": "quantized_input" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i", + "name": "scale_bias" + } + ], + "support_level": "default" + } + }, + { + "name": "SumRelu", + "schema": { + "description": null, + "inputs": [ + { + "description": "First of the input tensors. Can be inplace.", + "name": "data_0" + } + ], + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "sum" + } + ], + "support_level": "default" + } + }, + { + "name": "LSTMUnitGradient", + "schema": { + "attributes": [ + { + "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.", + "name": "sequence_lengths", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "AveragePool3DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SortedSegmentRangeMean", + "schema": { + "description": "\nApplies 'Mean' to each segment of input tensor. In order to allow for more\nefficient implementation of 'Mean', the input segments have to be contiguous\nand non-empty.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nMean computation is done element-wise, so that each element of the output slice corresponds to the average value of the respective elements in the input slices. Operation doesn't change the shape of individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor to be aggregated", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor with the first dimension of K and the other dimentsions inherited from DATA", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "DiagonalFill", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "value", + "option": "optional" + }, + { + "description": "The data type for the elements of the output tensor.Strictly must be one of the types from DataType enum in TensorProto.", + "name": "dtype", + "option": "optional" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": "\nThe operator fills the diagonal elements of the output tensor (>= 2D)\nwith a constant value specified by the 'value' argument, and others 0. If\nnumber of dimensions of the output tensor is greater than 2, all dimensions\nmust be equal.\n\nThe data type is specified by the 'dtype' argument. The 'dtype' argument must\nbe one of the data types specified in the 'DataType' enum field in the\nTensorProto message. If the 'dtype' argument is not provided, the data type of\n'value' is used.\n\nThe output tensor shape is specified by the 'shape' argument. If the number of\ninput is 1, the shape will be identical to that of the input at run time with\noptional additional dimensions appended at the end as specified by 'extra_shape'\nargument. In that case the 'shape' argument should not be set.\n\nIf input_as_shape is set to true, then the input should be a 1D tensor\ncontaining the desired output shape (the dimensions specified in extra_shape\nwill also be appended)\n\nNOTE: Currently, it supports data type of float, int32, int64, and bool.\n", + "inputs": [ + { + "description": "Input tensor (optional) to provide shape information.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output tensorargument and its type is specified by the 'dtype' argument", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "ConcatTensorVector", + "schema": { + "description": "\nConcat Tensors in the std::unique_ptr >\nalong the first dimension.\n ", + "inputs": [ + { + "description": "std::unique_ptr >", + "name": "vector of Tensor" + } + ], + "outputs": [ + { + "description": "tensor after concatenating", + "name": "tensor" + } + ], + "support_level": "default" + } + }, + { + "name": "Conv2D", + "schema": { + "description": "\nThe convolution operator consumes an input vector, a 2D filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.", + "name": "X" + }, + { + "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.", + "name": "filter" + }, + { + "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "MultiClassAccuracy", + "schema": { + "description": "\nRespectively compute accuracy score for each class given a number of instances\nand predicted scores of each class for each instance.\n", + "inputs": [ + { + "description": "2-D float tensor (N,D,) of predicted scores of each class for each data. N is the number of instances, i.e., batch size. D is number of possible classes/labels.", + "name": "prediction" + }, + { + "description": "1-D int tensor (N,) of labels for each instance.", + "name": "labels" + } + ], + "outputs": [ + { + "description": "1-D float tensor (D,) of accuracy for each class. If a class has no instance in the batch, its accuracy score is set to zero.", + "name": "accuracies" + }, + { + "description": "1-D int tensor (D,) of number of instances for each class in the batch.", + "name": "amounts" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceL2Gradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseUnsortedSegmentSum", + "schema": { + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Sum' to each segment. Segments ids can appear in arbitrary order (unlike in\nSparseSortedSegmentSum).\n\nThis op is basically Gather and UnsortedSegmentSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Integer vector with the same length as INDICES that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceFrontMax", + "schema": { + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "description": "\nReduces the input tensor along the last dimension of the by applying **max**.\n\nCan reduce more than one of the \"first\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the max operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_1 * d_2 * ... * d_{n})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{0}$ dimension.\n\nFor example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1,2]$, then $Y = [max(1,4), max(5,1,7), max(2), max(9,2)] = [4, 7, 2, 9]$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_max_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceFrontMax\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[2. 8. 1.]\n [9. 6. 6.]\n [7. 7. 0.]]\n\n [[4. 3. 9.]\n [9. 2. 7.]\n [6. 4. 7.]]]\nY: [9. 8. 9.]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "ExpandDims", + "schema": { + "attributes": [ + { + "description": "List of dimensions of *data* to add single dimensional entry.", + "name": "dims", + "option": "optional", + "type": "int64[]" + } + ], + "description": "\nThe *ExpandDims* op inserts single-dimensional entries into the shape of the input tensor *data,* and produces a single output tensor *expanded*. The op also takes an argument *dims* with a list of dimensions for where to add the single dimensional entries. If the same blob is provided as input and output, the operation is copy-free. This is the exact inverse operation of *Squeeze*.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ExpandDims\",\n [\"data\"],\n [\"expanded\"],\n dims=[0,1],\n)\n\nworkspace.FeedBlob(\"data\", np.zeros((100,100)).astype(np.float32))\nprint(\"data.shape:\", workspace.FetchBlob(\"data\").shape)\n\nworkspace.RunOperatorOnce(op)\nprint(\"expanded.shape:\", workspace.FetchBlob(\"expanded\").shape)\n\n```\n\n**Result**\n\n```\n\ndata.shape: (100, 100)\nexpanded.shape: (1, 1, 100, 100)\n\n```\n\n
\n\n\n\n", + "inputs": [ + { + "description": "Input tensor of data to be operated on.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reshaped tensor with same data as input.", + "name": "expanded" + } + ], + "support_level": "default" + } + }, + { + "name": "RowMul", + "schema": { + "description": "\nGiven a matrix A and column vector w, the output is the multiplication of row i\nof A and element i of w, e.g. C[i][j] = A[i][j] * w[i]. This operator should be\ndeprecated when the gradient operator of Mul with broadcast is implemented.\n", + "inputs": [ + { + "description": "The matrix", + "name": "mat" + }, + { + "description": "The column vector", + "name": "w" + } + ], + "outputs": [ + { + "description": "Output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "BatchMoments", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "IsMemberOf", + "schema": { + "attributes": [ + { + "description": "List of values to check for membership.", + "name": "value", + "option": "optional" + }, + { + "description": "The data type for the elements of the output tensor. Strictly must be one of the types from DataType enum in TensorProto.", + "name": "dtype", + "option": "optional" + } + ], + "description": "\nThe *IsMemberOf* op takes an input tensor *X* and a list of values as argument, and produces one output data tensor *Y*. The output tensor is the same shape as *X* and contains booleans. The output is calculated as the function *f(x) = x in value* and is applied to *X* elementwise.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/elementwise_logical_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/elementwise_logical_ops.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"IsMemberOf\",\n [\"X\"],\n [\"Y\"],\n value=[0,2,4,6,8],\n)\n\n// Use a not-empty tensor\nworkspace.FeedBlob(\"X\", np.array([0,1,2,3,4,5,6,7,8]).astype(np.int32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y: \\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n// value=[0,2,4,6,8]\n\nX:\n [0 1 2 3 4 5 6 7 8]\nY:\n [ True False True False True False True False True]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input tensor of any shape", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output tensor (same size as X containing booleans)", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "MinGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "RangeFill", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ReluN", + "schema": { + "attributes": [ + { + "description": "the cap of output", + "name": "n", + "option": "optional" + } + ], + "description": "\nRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = min(max(0, x), n),\nis applied to the tensor elementwise.\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "TanhGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CubeGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ReduceTailSum", + "schema": { + "description": "\nReduce the tailing dimensions\n", + "inputs": [ + { + "description": "The matrix", + "name": "mat" + } + ], + "outputs": [ + { + "description": "Output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "GroupNormGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Moments", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. If axes is not provided, the op computes the element-wise mean and variance.", + "name": "axes", + "option": "optional" + }, + { + "description": "Keep the reduced dimension(s) or not, default True keeps the reduced dimension(s).", + "name": "keepdims", + "option": "optional" + } + ], + "description": "\n Computes the mean and variance of the input tensor's element along the\n provided axes. The resulted tensor has the same rank as the input if keepdims\n equals True.\n If keepdims equals False, then the resulted tensor have the reduced dimension\n pruned.\n", + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reduced mean tensor.", + "name": "mean" + }, + { + "description": "Reduced variance tensor.", + "name": "variance" + } + ], + "support_level": "default" + } + }, + { + "name": "ATen", + "schema": { + "description": null, + "support_level": "contribution" + } + }, + { + "name": "LC1DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LengthsToSegmentIds", + "schema": { + "description": "\nGiven a vector of segment lengths (*lengths*) the *LengthsToSegmentIds* op returns a zero-based, consecutive vector of segment ids (*segment_ids*). For example, *lengths=[1, 3, 0, 2]* will produce *segment_ids=[0, 1, 1, 1, 3, 3]*. In general, the inverse operation is *SegmentIdsToLengths*. Notice though that trailing empty sequence lengths can't be properly recovered from segment ids.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsToSegmentIds\",\n [\"lengths\"],\n [\"segment_ids\"],\n)\n\nworkspace.FeedBlob(\"lengths\", np.array([1, 3, 0, 2]).astype(np.int32))\nprint(\"lengths:\\n\", workspace.FetchBlob(\"lengths\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"segment_ids: \\n\", workspace.FetchBlob(\"segment_ids\"))\n\n```\n\n**Result**\n\n```\n\nlengths:\n [1 3 0 2]\nsegment_ids:\n [0 1 1 1 3 3]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "1D tensor of int32 or int64 segment lengths.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "1D tensor of length *sum(lengths)*", + "name": "segment_ids" + } + ], + "support_level": "default" + } + }, + { + "name": "Wngrad", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\nComputes the WnGrad update for an input gradient and accumulated\nhistory. This operator implement the optimization algorithm\nin https://arxiv.org/abs/1803.02865 by Wu, Ward and Bottou.\nConcretely, given inputs (param, grad, seq_b, learning_rate),\ncomputes\n\n new_seq_b = seq_b + 1 / seq_b * norm(grad)^2\n effective_lr = learning_rate / (new_seq_b + epsilon)\n update = learning_rate * grad / (new_seq_b + epsilon)\n new_param = param + update\nand returns (new_param, new_seq_b).\n\nOptionally returns effective_lr and update as well.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Seq_b history", + "name": "seq_b" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated seq_b", + "name": "output_seq_b" + }, + { + "description": "(optional) Effective learning rate", + "name": "output_effective_lr" + }, + { + "description": "(optional) Actual update that is applied.", + "name": "output_update" + } + ], + "support_level": "default" + } + }, + { + "name": "Or", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise logical operation **or** (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Or\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", (np.random.rand(3, 3) > 0.5))\nworkspace.FeedBlob(\"B\", (np.random.rand(3, 3) > 0.5))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[False True True]\n [False True True]\n [ True True True]]\nB:\n[[False True False]\n [ True True True]\n [False True False]]\nC:\n[[False True True]\n [ True True True]\n [ True True True]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor of booleans. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "EQ", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise equal to comparison **==** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"EQ\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [ True False False True True False]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "ErfGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ChannelBackpropStats", + "schema": { + "description": "\nGiven an input tensor in NCHW format, the gradient for the output of SpatialBN\nand the per-channel mean and inverse std var vectors for the input, computes the\nper-channel bias and scale gradient to be used during the backward pass for\nsubsequent spatial batch normalization gradient calculation. Typically, the\nresults of this op are subsequently reduced over multiple devices to obtain\nstatistics over a larger batch size in cases where the batch size for a single\nmodel copy is too low to yield the full benefit of batch normalization. The\nresulting bias and scale can then be plugged back into SpatialBNGradient to get\nresults over the larger batch size ", + "inputs": [ + { + "description": "The input 4-dimensional tensor of shape NCHW", + "name": "X" + }, + { + "description": "The mean saved from the forward pass as a 1-dimensional tensor of size C.", + "name": "mean" + }, + { + "description": "The saved inverse standard deviation as a 1-dimensional tensor of size C.", + "name": "inv_std" + }, + { + "description": "Gradient for the output layer of SpatialBN, here used as input because we are on the backward pass", + "name": "output_grad" + } + ], + "outputs": [ + { + "description": "Gradient for the scale vector", + "name": "scale_grad" + }, + { + "description": "Gradient for the bias vector", + "name": "bias_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "GatherRangesToDense", + "schema": { + "attributes": [ + { + "description": "Expected lengths for ranges", + "name": "lengths", + "option": "optional" + }, + { + "description": "The number of observations needed before deciding that the ratio of mismatched ranges is alarming, also determines whether an info sumarizing the empty and mismatch ratio will be printed at the end.", + "name": "min_observation", + "option": "optional" + }, + { + "description": "An error is raised when ratio of empty ranges exceeds this (default is 1, which means by default no error will be triggered).", + "name": "max_empty_ratio", + "option": "optional" + }, + { + "description": "An error is raised when ratio of mismatched ranges exceeds this.", + "name": "max_mismatched_ratio", + "option": "optional" + }, + { + "description": "A log is recorded only after an error is triggered every n times.", + "name": "log_every_n", + "option": "optional" + } + ], + "description": "\nGiven DATA tensor of rank 1, and RANGES tensor of rank 3, gather values\ncorresponding to each range into a separate output tensor. If the optional input\nKEY tensor is also given, the output will be sorted by KEY for each example.\n\nRANGES dimensions description:\n1: represents list of examples within a batch\n2: represents list features\n3: two values which are start and length or a range (to be applied on DATA)\n\nEach feature has fixed lengths which are passed as lengths argument and a\nseparate tensor will be produced for each feature.\ni.e. DATA.dim(1) = len(lengths) = NumOuptuts.\n\nMissing features (represented by empty ranges) filled with default_value.\n\nExample 1:\n DATA = [1, 2, 3, 4, 5, 6, 7, 8]\n RANGES = [\n [\n [2, 4],\n [0, 2],\n ],\n [\n [0, 0],\n [6, 2],\n ]\n ]\n lengths = [4, 2]\n OUTPUT[0] = [[3, 4, 5, 6], [0, 0, 0, 0]]\n OUTPUT[1] = [[1, 2], [7, 8]]\n\nExample 2 (with KEY):\nDATA = [1, 2, 3, 4, 5, 6, 7, 8]\nKEY = [0, 1, 3, 2, 1, 0, 1, 0]\nRANGES = [\n [\n [2, 4],\n [0, 2],\n ],\n [\n [0, 0],\n [6, 2],\n ]\n]\nlengths = [4, 2]\nOUTPUT[0] = [[6, 5, 4, 3], [0, 0, 0, 0]]\nOUTPUT[1] = [[1, 2], [8, 7]]\n\nContrast Example 2 with Example 1. For each data point per feature, the values\nare sorted by the corresponding KEY.\n", + "inputs": [ + { + "description": "Tensor of rank 1.", + "name": "DATA" + }, + { + "description": "Tensor of int32/int64 ranges, of dims (N, M, 2). Where N is number of examples and M is a size of each example. Last dimension represents a range in the format (start, lengths)", + "name": "RANGES" + }, + { + "description": "Tensor of rank 1 and type int64.", + "name": "KEY" + } + ], + "outputs": [ + { + "description": "1-D tensor of size sum of range lengths", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "LambdaRankNdcg", + "schema": { + "description": "\nIt implements the LambdaRank as appeared in Wu, Qiang, et al. \"Adapting boosting\nfor information retrieval measures.\" Information Retrieval 13.3 (2010): 254-270.\n\nThis method heuristically optimizes the NDCG.\n", + "support_level": "default" + } + }, + { + "name": "TileGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ResetCursor", + "schema": { + "description": "\nResets the offsets for the given TreeCursor. This operation is thread safe.\n", + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + } + ], + "support_level": "default" + } + }, + { + "name": "SortedSegmentRangeMax", + "schema": { + "description": "\nApplies 'Max' to each segment of input tensor. In order to allow for more\nefficient implementation of 'Max', the input segments have to be contiguous\nand non-empty.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nMax computation is done element-wise, so that each element of the output slice corresponds to the max value of the respective elements in the input slices. Operation doesn't change the shape of individual blocks. This implementation imitates torch nn.Max operator. If the maximum value occurs more than once, the operator will return the first occurrence of value. When computing the gradient using the backward propagation, the gradient input corresponding to the first occurrence of the maximum value will be used.\n ", + "inputs": [ + { + "description": "Input tensor to be aggregated", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor with the first dimension of K and the other dimentsions inherited from DATA", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "PairWiseLoss", + "schema": { + "description": "\nOperator computes the pair wise loss between all pairs within a batch\n using the logit loss function on the difference in scores between pairs\n", + "inputs": [ + { + "description": "Input blob from the previous layer, which is almost always the result of a softmax operation; X is a 2D array of size N x 1where N is the batch size. For more info: D. Sculley, Large Scale Learning to Rank. https://www.eecs.tufts.edu/~dsculley/papers/large-scale-rank.pdf", + "name": "X" + }, + { + "description": "Blob containing the labels used to compare the input", + "name": "label" + }, + { + "description": "Optional input blob that contains the lengthsof multiple sessions. The summation of this blob must be equalto the size of blob X. If lengths blob is provided, the outputblob has the same size as lengths blob, and the cross entropyis computed within each session.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Output blob after the cross entropy computation", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "NHWC2NCHW", + "schema": { + "description": "\nThe operator switches the order of data in a tensor from NHWC- sample index N,\nheight H, width H and channels C, to the NCHW order (this is for 2D images).\nIn general, this operator switches the order of data in a tensor from N H_1 ...\nH_k C to N C H_1 ... H_k for k-dimensional features, and currently supports\nk=1, 2, and 3.\n", + "inputs": [ + { + "description": "The input data (Tensor) in the NHWC order.", + "name": "data" + } + ], + "outputs": [ + { + "description": "The output tensor (Tensor) in the NCHW order.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "CreateRebatchingQueue", + "schema": { + "attributes": [ + { + "description": "Number of input tensors the queue will support", + "name": "num_blobs", + "option": "optional" + }, + { + "description": "Maximal number of elements the queue can hold at any given point", + "name": "capacity", + "option": "optional" + } + ], + "description": "\nCreates the Queue.\n", + "outputs": [ + { + "description": "object representing the queue", + "name": "queue" + } + ], + "support_level": "default" + } + }, + { + "name": "GE", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise greater or equal than comparison **>=** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GE\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [ True True False True True False]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "SinusoidPositionEncoding", + "schema": { + "attributes": [ + { + "description": "Desired embedding size/number of dimensions -- defaults to 100", + "name": "embedding_size", + "option": "optional" + }, + { + "description": "Sinusoid tuning parameter -- defaults to 10000", + "name": "alpha", + "option": "optional" + }, + { + "description": "Amplitude of Sin/Cos output", + "name": "amplitude", + "option": "optional" + } + ], + "description": "\nCalculates a sinusoid position encoding tensor as described\nin https://arxiv.org/abs/1706.03762. Takes a 2-D tensor\n(of size M x K) of positions as input, the embedding size\nas an argument, and outputs a position encoding tensor of\nsize (M x K x embedding_size). Here M is typically the max\nsequence length and K is typically the batch size.\nThe input tensor must satisfy input[m, 0] == input[m, k] for all k.\n\nEncoded as amplitude * SIN(pos/alpha^(i/embedding_size)) if i is even,\nelse amplitude * COS(pos/alpha^(i/embedding_size)). Here, pos is the position,\nalpha and amplitude are tuning parameters, i is the current dimension for\nthe embedding, and embedding_size is the number of total dimensions in\nthe embedding.\n", + "inputs": [ + { + "description": "2-D tensor of positions to be encoded", + "name": "positions" + } + ], + "outputs": [ + { + "description": "3-D tensor representing the positional encoding", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SortedSegmentRangeMaxGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "RoIAlignGradient", + "schema": { + "description": null, + "inputs": [ + { + "description": "See RoIPoolF.", + "name": "X" + }, + { + "description": "See RoIPoolF.", + "name": "RoIs" + }, + { + "description": "Gradient of forward output 0 (Y)", + "name": "dY" + } + ], + "outputs": [ + { + "description": "Gradient of forward input 0 (X)", + "name": "dX" + } + ], + "support_level": "default" + } + }, + { + "name": "UnsortedSegmentWeightedSum", + "schema": { + "attributes": [ + { + "description": "Optional int argument specifying the number of output segments and thus the first dimension of the output", + "name": "num_segments", + "option": "optional" + }, + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "description": "\nApplies 'WeightedSum' to each segment of input tensor. Segments ids can appear in\narbitrary order (unlike in SortedSegmentWeightedSum).\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Integer vector with the same length as the first dimension of DATA that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "BatchMatMul", + "schema": { + "attributes": [ + { + "description": "Pass 1 to transpose the last two dimensions of A before doing multiplication", + "name": "trans_a", + "option": "optional" + }, + { + "description": "Pass 1 to transpose the last two dimensions of B before doing multiplication", + "name": "trans_b", + "option": "optional" + }, + { + "description": "Pass 1 to allow broadcasting of dimensions. Behavior is the same as numpy.matmul. Gradient is currently not supported when running in broadcast mode.", + "name": "broadcast", + "option": "optional" + } + ], + "description": "\nBatch Matrix multiplication Yi = Ai * Bi, where A has shape (dim0, dim1, ... M, K),\nB has shape (dim0, dim1, ... K, N), Y has shape (dim0, dim1, ... M, N) and i ranges\nfrom 0 to (dim0 * dim1 ...) - 1. rank(A) == rank(B) >= 2. In case of A and B being\ntwo dimensional, it behaves like normal matrix multiplication.\n", + "inputs": [ + { + "description": "tensor of shape (dim0, dim1 ... M, K)", + "name": "A" + }, + { + "description": "tensor of shape (dim0, dim1 ... K, N)", + "name": "B" + } + ], + "outputs": [ + { + "description": "tensor of shape (dim0, dim1 ... M, N)", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LpNormGradient", + "schema": { + "attributes": [ + { + "description": "Order of the norm in p-norm", + "name": "p", + "option": "optional" + }, + { + "description": "whehther we calculate norm or averaged_norm.The Lp_averaged_norm(x) is defined asLp_averaged_normgradient(x) = LpNormGradient(x) / size(x)", + "name": "average", + "option": "optional" + } + ], + "description": "\nGiven one input float tensor X, derivative dout, and produces one output\nfloat tensor dX. dX is the derivative of the Lp norm of tensor X, computed as\ndx = d(sum over |x^p|)/dx, in which p is either 1 or 2(currently only\nsupports l1 and l2 norm) determined by the argument p.\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + }, + { + "description": "1D input tensor", + "name": "dout" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "dx" + } + ], + "support_level": "default" + } + }, + { + "name": "DotProduct", + "schema": { + "description": "\nComputes and outputs the dot product of the two input float tensors `X` and `Y`.\nNote that `X` and `Y` must be either 1D or 2D, and they must be the same shape.\nThe output tensor is 1D, which represents either the product of each element in\na respective dimension if the inputs are 1D, or the sum of the products in a\ngiven dimension if the inputs are 2D matrices. Note that the actual dot product\nis a scalar value, which is effectively the sum of the elements in the 1D\noutput tensor.\n\nFor 1D inputs:\nGiven two vectors $X = [x_0, x_1, x_2]$ and $Y = [y_0, y_1, y_2]$; $Z = [x_0 * y_0, x_1 * y_1, x_2 * y_2]$\n\nFor 2D inputs:\nGiven two matrices:\n$$X = [[x_0^0, x_1^0, x_2^0], \\\\ [x_0^1, x_1^1, x_2^1], \\\\ [x_0^2, x_1^2, x_2^2], \\\\ ..., \\\\ [x_0^n, x_1^n, x_2^n]]$$\n\nand\n\n$$Y = [[y_0^0, y_1^0, y_2^0], \\\\ [y_0^1, y_1^1, y_2^1], \\\\ [y_0^2, y_1^2, y_2^2], \\\\ ..., \\\\ [y_0^n, y_1^n, y_2^n]]$$\n\nthen\n\n$$Z = \\biggl[\\Big((x_0^0 * y_0^0) + (x_1^0 * y_1^0) + (x_2^0 * y_2^0)\\Big), \\\\ \\Big((x_0^1 * y_0^1) + (x_1^1 * y_1^1) + (x_2^1 * y_2^1)\\Big), \\\\ \\Big((x_0^2 * y_0^2) + (x_1^2 * y_1^2) + (x_2^2 * y_2^2)\\Big), \\\\ ..., \\\\ \\Big((x_0^n * y_0^n) + (x_1^n * y_1^n) + (x_2^n * y_2^n)\\Big)\\biggr]$$\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"DotProduct\",\n [\"X\", \"Y\"],\n [\"Z\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(20, size=(5)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", np.random.randint(20, size=(5)).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Z:\\n\", workspace.FetchBlob(\"X\"))\n\n\nworkspace.ResetWorkspace()\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", np.random.randint(10, size=(3,3)).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Z:\\n\", workspace.FetchBlob(\"Z\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [ 2. 15. 2. 7. 12.]\nY:\n [ 3. 12. 9. 3. 18.]\nZ:\n [ 2. 15. 2. 7. 12.]\nX:\n [[2. 0. 4.]\n [7. 7. 4.]\n [7. 9. 9.]]\nY:\n [[2. 0. 8.]\n [9. 6. 1.]\n [7. 8. 0.]]\nZ:\n [ 36. 109. 121.]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* 1D or 2D input tensor.", + "name": "X" + }, + { + "description": "*(type: Tensor``)* 1D or 2D input tensor (must have the same shape as X).", + "name": "Y" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* 1D output tensor.", + "name": "Z" + } + ], + "support_level": "default" + } + }, + { + "name": "NGramFromCategorical", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Int8Slice", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "List of starting indices", + "name": "starts", + "option": "optional" + }, + { + "description": "List of ending indices", + "name": "ends", + "option": "optional" + }, + { + "description": "(Optional) The dimension to slice over. If specified start_idx and end_idx should also be given and it takes precedence over starts and ends", + "name": "dim", + "option": "optional" + }, + { + "description": "(Optional) The dimension to start slice from. Default is 0", + "name": "start_idx", + "option": "optional" + }, + { + "description": "(Optional) The dimension to end the slice. Default is -1", + "name": "end_idx", + "option": "optional" + } + ], + "description": "\nProduces a slice of the input Int8 tensor. Currently, only slicing in a single\ndimension is supported.\nSlices are passed as 2 1D vectors or as two keyword argument lists with starting\nand end indices for each dimension of the input `data` tensor. If a negative\nvalue is passed for any of the start or end indices, it represents the number of\nelements before the end of that dimension. End indices are non-inclusive unless\nnegative (end index -1 means up to and including the last element).\n\n\nExample:\n\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n starts = [0, 1]\n ends = [-1, 3]\n\n result = [\n [2, 3],\n [6, 7],\n ]\n", + "inputs": [ + { + "description": "Int8 Tensor of data to extract slices from.", + "name": "data" + }, + { + "description": "1D tensor: start-indices for each dimension of data.", + "name": "starts" + }, + { + "description": "1D tensor: end-indices for each dimension of data.", + "name": "ends" + } + ], + "outputs": [ + { + "description": "Sliced Int8 data tensor.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseSortedSegmentSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ReduceFrontSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ViterbiPath", + "schema": { + "description": "\nGiven a predictions matrix and a transitions matrix, get the path with the best\nscore\n", + "inputs": [ + { + "description": "N*D predictions matrix", + "name": "predictions" + }, + { + "description": "D*D transitions matrix", + "name": "transitions" + } + ], + "outputs": [ + { + "description": "N*1 vector holds the best path indices", + "name": "viterbi_path" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceMax", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce.", + "name": "axes", + "option": "optional" + }, + { + "description": "Keep the reduced dimension(s) or not, default True keeps the reduced dimension(s).", + "name": "keepdims", + "option": "optional" + } + ], + "description": "\n Computes the max of the input tensor's element along the provided axes.\n The resulted tensor has the same rank as the input if keepdims equal True.\n If keepdims equal false, then the resulted tensor have the reduced dimension\n pruned.\n", + "inputs": [ + { + "description": "An input tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsWeightedMean8BitsRowwise", + "schema": { + "description": "\nVariation of SparseLengthsWeightedMean operator, where\nDATA is stored using 8bits. DATA was quantized with 8Bit row-wise\nquantization (see doc to FloatToRowwiseQuantized8Bits operator). To\nrestore DATA from 8Bit, we use additional input that stores scales\nand biases.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToRowwiseQuantized8Bits", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the length of INDICES", + "name": "SCALARS" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i -- scale and bias for i-th row", + "name": "scale_bias" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Slice", + "schema": { + "attributes": [ + { + "description": "(*Tuple(int)*): list of starting indices", + "name": "starts", + "option": "optional" + }, + { + "description": "(*Tuple(int)*): list of ending indices", + "name": "ends", + "option": "optional" + } + ], + "category": "Tensor", + "description": "\nProduces a slice of the input tensor.\n\n- Currently, only slicing in a single dimension is supported.\n\n- Start and end indices are either passed as two 1D input tensors or using the `starts` and `ends` arguments.\n\n- If a negative value is passed for any of the start or end indices, it represents the number of elements before the end of that dimension. End indices are non-inclusive unless negative (end index -1 means up to and including the last element).\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/slice_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Slice\",\n [\"X\"],\n [\"Y\"],\n starts=(0,1),\n ends=(-1,3)\n)\n\nworkspace.FeedBlob(\"X\", np.array([[1,2,3,4],[5,6,7,8]]))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[1 2 3 4]\n [5 6 7 8]]\nY:\n[[2 3]\n [6 7]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor*): tensor to extract slices from", + "name": "X" + }, + { + "description": "(*Tensor``*): 1D tensor of start-indices for each dimension of data", + "name": "starts" + }, + { + "description": "(*Tensor``*): 1D tensor of end-indices for each dimension of data", + "name": "ends" + } + ], + "outputs": [ + { + "description": "(*Tensor*): sliced output tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseUnsortedSegmentMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "InstanceNormGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "UpsampleBilinearGradient", + "schema": { + "attributes": [ + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "SortAndShuffle", + "schema": { + "description": "\nCompute the sorted indices given a field index to sort by and break the sorted\nindices into chunks of shuffle_size * batch_size and shuffle each chunk,\nfinally we shuffle between batches. If sort_by_field_idx is -1 we skip sort.\n\nFor example, we have data sorted as\n1,2,3,4,5,6,7,8,9,10,11,12\n\nand batchSize = 2 and shuffleSize = 3, when we shuffle we get:\n[3,1,4,6,5,2] [12,10,11,8,9,7]\n\nAfter this we will shuffle among different batches with size 2\n[3,1],[4,6],[5,2],[12,10],[11,8],[9,7]\n\nWe may end up with something like\n[9,7],[5,2],[12,10],[4,6],[3,1],[11,8]\n\nInput(0) is a blob pointing to a TreeCursor, and\n[Input(1),... Input(num_fields)] a list of tensors containing the data for\neach field of the dataset.\n\nSortAndShuffle is thread safe.\n", + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + }, + { + "description": "First dataset field", + "name": "dataset_field_0" + } + ], + "outputs": [ + { + "description": "Tensor containing sorted indices.", + "name": "indices" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8MaxPoolRelu", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "MaxPool \nconsumes an input blob X and applies max pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Max pooling consisting of taking the maximum value of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data tensor from max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Output will go through rectified linearfunction, where y = max(0, x).", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "BitwiseOr", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise bitwise operation `bitwise_or` (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Output tensor. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8SumRelu", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "MaxPool2DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Percentile", + "schema": { + "description": "\n This operator is used to find percentile representations for raw values, given a sample\n set of raw values, labeled with their corresponding percentiles from the same distribution.\n In particular, this operator takes as input a tensor of floats to find the percentile values\n for, a 2D tensor of floats, where the first column of the tensor represents sampled values,\n and the second column represents the percentile labels, and a tensor of integers lengths.\n\n This lengths tensor is used because the operator works on multiple sets of raw values at the same time. For\n example, for an input:\n original_values=[[3, 5, 3],[5, 1, 6]], lengths = [2, 1, 1], value_to_pct = [[3, 0.2], [5, 0.5], [1, 0.3], [3. 0.6]]\n\n Our operator expects that each column i of the input tensor is sampled from distribution i. Lengths tells\n us that the first two elements in value_to_pct are sampled from distribution 1, the next is from distribution two,\n and the last is from distribution 3. We expect the output of our operator to give us [[0.2, 1.0, 0.6], [0.5, 0.3, 1.0]].\n\n To calculate the percentile of an element, we check to see if its value is already mapped to\n a percentile in value_to_pct. If so, we return that value. If not, we linearly interpolate between\n the two closest values in value_to_pct. If the value is larger than all values in value_to_pct, we\n return 1. If it's smaller than all the values, we return 0.\n\n", + "inputs": [ + { + "description": "Input 2D tensor of floats, representing the original, raw data to calculate percentiles for.", + "name": "original_values" + }, + { + "description": "Sorted 2D tensor, with 2 columns. Each element in the first column is a float representing the raw value of a sample. Its corresponding element in the next column represents the percentile it maps to.", + "name": "value_to_pct" + }, + { + "description": "1D tensor, representing the length of each distribution. We expect that the sum of elements of this tensor is equal to the total length of value_to_pct.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "1D tensor of floats, with the same dimensions as the flattened input tensor. Each element of this tensor, percentile_values[i], corresponds to the percentile calculated for original_values[i].", + "name": "percentile_values" + } + ], + "support_level": "default" + } + }, + { + "name": "MergeSingleListFeatureTensors", + "schema": { + "attributes": [ + { + "description": "feature ids", + "name": "feature_ids", + "option": "optional" + } + ], + "description": "Merge given single-feature tensors with list features into one multi-feature tensor.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".values", + "name": "in1_values" + }, + { + "description": ".presence", + "name": "in1_presence" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values.lengths", + "name": "out_values_lengths" + }, + { + "description": ".values.values", + "name": "out_values_values" + } + ], + "support_level": "default" + } + }, + { + "name": "RowwiseMaxGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "rnn_internal_apply_link", + "schema": { + "description": "\nInternal RNN operator.\n", + "support_level": "default" + } + }, + { + "name": "MergeSingleScalarFeatureTensors", + "schema": { + "attributes": [ + { + "description": "feature ids", + "name": "feature_ids", + "option": "optional" + } + ], + "description": "Merge given single-feature tensors with scalar features into one multi-feature tensor.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": "", + "name": "in1" + }, + { + "description": ".presence", + "name": "in1_presence" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values", + "name": "out_values" + } + ], + "support_level": "default" + } + }, + { + "name": "BRGNCHWCToPackedInt8BGRAStylizerDeprocess", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "TrimDataset", + "schema": { + "attributes": [ + { + "description": "List of strings representing the string names in the formatspecified in the doc for CreateTreeCursor.", + "name": "fields", + "option": "optional" + } + ], + "description": "\nTrim the given dataset inplace, given the dataset blobs and the field specs.\nTrimming happens such that the dataset will contain the largest possible number\nof records that is a multiple of the 'multiple_of' argument.\n", + "support_level": "default" + } + }, + { + "name": "PiecewiseLinearTransform", + "schema": { + "attributes": [ + { + "description": "1-D vector of size (prediction_dimensions x (pieces+1)) contain the upper bounds of each piece of linear function. One special case is the first bound is the lower bound of whole piecewise function and we treat it the same as the left most functions. (bounds, slopes, intercepts) can be passed through either arg or input blobs.", + "name": "bounds", + "option": "optional" + }, + { + "description": "1-D vector of size (prediction_dimensions x pieces) containing the slopes of linear function", + "name": "slopes", + "option": "optional" + }, + { + "description": "1-D vector of size (prediction_dimensions x pieces) containing the intercepts of linear function", + "name": "intercepts", + "option": "optional" + }, + { + "description": "If set true, we assume the input is a Nx1 or Nx2 tensor. If it is Nx1 tensor, it is positive predictions. If the input is Nx2 tensor, its first column is negative predictions and second column is positive and negative + positive = 1. We just need one group of piecewise linear functions for the positive predictions.", + "name": "binary", + "option": "optional" + } + ], + "description": "\nPiecewiseLinearTransform takes inputs -- predictions, a 2-D or 1-D tensor\n(Tensor) of size (batch_size x prediction_dimensions). The piecewise\nlinear functions are stored in bounds, slopes and intercepts. The output tensor\nhas the same shape of input `predictions` and contains the predictions\ntransformed by the piecewise linear functions. Each column of predictions has\nits own piecewise linear transformation functions. Therefore the size of\npiecewise function parameters are pieces x prediction_dimensions, except for\nbinary predictions where only the positive prediction needs them. Note that in\neach piece, low bound is excluded while high bound is included. Also the\npiecewise linear function must be continuous.\n\nNotes\n- If the input is binary predictions (Nx2 or Nx1 tensor), set the binary arg\nto true so that one group of piecewise linear functions is needed (see\ndetails below).\n- The transform parameters (bounds, slopes, intercepts) can be passed either\nthrough args or through input blobs.\n- If we have multiple groups of piecewise linear functions, each group has the\nsame number of pieces.\n- If a prediction is out of the bounds, it is capped to the smallest or largest\nbound.\n", + "inputs": [ + { + "description": "2-D tensor (Tensor) of size (num_batches x num_classes) containing scores", + "name": "predictions" + }, + { + "description": "See bounds in Arg. (bounds, slopes, intercepts) can be passed through either arg or input blobs.", + "name": "bounds (optional)" + }, + { + "description": "See slopes in Arg. (bounds, slopes, intercepts) can be passed through either arg or input blobs.", + "name": "slopes (optional)" + }, + { + "description": "See intercepts in Arg. (bounds, slopes, intercepts) can be passed through either arg or input blobs.", + "name": "intercepts (optional)" + } + ], + "outputs": [ + { + "description": "2-D tensor (Tensor) of size (num_batches x num_classes) containing transformed predictions", + "name": "transforms" + } + ], + "support_level": "default" + } + }, + { + "name": "CosineSimilarity", + "schema": { + "description": "\nThis op takes two input float tensors of the same size, $X$ and $Y$, and produces one output float tensor , $Z$, calculated as the cosine similarity between $X$ and $Y$. Recall, the cosine similarity between two tensors $X$ and $Y$ is defined as:\n\n$$\\mathbf{Z}=CosineSimilarity(\\mathbf{X},\\mathbf{Y}) = \\frac{\\mathbf{X}\\cdot\\mathbf{Y}}{\\|\\mathbf{X}\\|\\|\\mathbf{Y}\\|} = \\frac{\\sum_n^{i=1}X_iY_i}{\\sqrt{\\sum_n^{i=1}X_i^2}\\sqrt{\\sum_n^{i=1}Y_i^2}}$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"CosineSimilarity\",\n [\"X\", \"Y\"],\n [\"Z\"]\n)\n\n// Create X\nX = np.random.randn(3, 3)\nprint(\"X:\\n\",X)\n\n// Create Y\nY = np.random.randn(3, 3)\nprint(\"Y:\\n\",Y)\n\n// Feed X & Y into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"Y\", Y.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Z:\\n\", workspace.FetchBlob(\"Z\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-0.42635564 -0.23831588 -0.25515547]\n [ 1.43914719 -1.05613228 1.01717373]\n [ 0.06883105 0.33386519 -1.46648334]]\nY:\n [[-0.90648691 -0.14241514 -1.1070837 ]\n [ 0.92152729 -0.28115511 -0.17756722]\n [-0.88394254 1.34654037 -0.80080998]]\nZ:\n [-1.7849885e-23 1.7849885e-23 -1.0842022e-07]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "1D or 2D input tensor", + "name": "X" + }, + { + "description": "1D or 2D input tensor (must have the same shape as X)", + "name": "Y" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Z" + } + ], + "support_level": "default" + } + }, + { + "name": "ClipTensorByScaling", + "schema": { + "attributes": [ + { + "description": "Threshold to determine whether to scale down the tensor", + "name": "threshold", + "option": "optional" + } + ], + "description": "\n Clips the input tensor by scaling based on the input value and the threshold.\n The value is usually the (pre-computed) norm of the tensor. If the value is\n larger than the threshold, scaling would be performed in this way:\n\n tensor *= (threshold / value).\n\n An optional input called additional_threshold can be provided which\n will scale the original threshold before it is used. That is,\n the final threshold will become threshold * additional_threshold.\n This op could be used for gradient clipping.\n", + "inputs": [ + { + "description": "Tensor of floats to be clipped.", + "name": "input_tensor" + }, + { + "description": "Value to be compared against the threshold", + "name": "val" + }, + { + "description": "An optional additional threshold to scale the original threshold", + "name": "additional_threshold" + } + ], + "outputs": [ + { + "description": "Tensor of floats, which is the same size as the input tensor, representing the clipped tensor.", + "name": "clipped" + } + ], + "support_level": "default" + } + }, + { + "name": "InstanceNorm", + "schema": { + "attributes": [ + { + "default": 1e-05, + "description": "The epsilon value to use to avoid division by zero.", + "name": "epsilon", + "option": "optional", + "type": "float32" + }, + { + "default": "NCHW", + "description": "Specifies the order of the input data blob, where $N$ is batch size, $C$ is number of channels, $H$ is spatial height, and $W$ is spatial width. The only other valid option is \"NHWC\".", + "name": "order", + "option": "optional", + "type": "string" + } + ], + "description": "\nThe *InstanceNorm* op applies Instance Normalization over a 4D input as described in [Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022).\n\n$$output = \\frac{input-\\mu_{input}}{\\sqrt{\\sigma_{input}^2} + \\epsilon}*scale + bias$$\n\nNotice, two of the outputs are optional so there are three output cases for this op. Case 1: output; Case 2: output, saved_mean; Case 3: output, saved_mean, saved_inv_stdev.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/instance_norm_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/instance_norm_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"InstanceNorm\",\n [\"input\", \"scale\", \"bias\"],\n [\"output\"],\n epsilon=1e-5,\n)\n\nworkspace.FeedBlob(\"input\", np.random.randn(2, 1, 3, 3).astype(np.float32))\nprint(\"input:\\n\", workspace.FetchBlob(\"input\"), \"\\n\")\n\nworkspace.FeedBlob(\"scale\", np.array([1.5]).astype(np.float32))\nprint(\"scale: \", workspace.FetchBlob(\"scale\"))\n\nworkspace.FeedBlob(\"bias\", np.array([1.]).astype(np.float32))\nprint(\"bias: \", workspace.FetchBlob(\"bias\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"output:\\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n [[[[ 0.97856593 -1.1832817 -0.2540021 ]\n [-1.3315694 -0.7485018 0.3787225 ]\n [-0.6826597 -1.4637762 0.57116514]]]\n\n\n [[[-0.44948956 0.85544354 -0.9315333 ]\n [-0.37202677 -0.22266895 -0.27194235]\n [ 0.4948163 -0.7296504 1.3393803 ]]]]\n\nscale: [1.5]\nbias: [1.]\noutput:\n [[[[ 3.5017493 -0.3791256 1.2890853 ]\n [-0.6453266 0.40137637 2.4249308 ]\n [ 0.5195738 -0.8826599 2.7703972 ]]]\n\n\n [[[ 0.12639964 2.856744 -0.8821926 ]\n [ 0.28847694 0.60098207 0.49788612]\n [ 2.1021945 -0.45978796 3.869297 ]]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "The input 4-dimensional NCHW tensor to be operated on.", + "name": "input" + }, + { + "description": "The input 1-dimensional scale tensor of size *C*.", + "name": "scale" + }, + { + "description": "The input 1-dimensional bias tensor of size *C*.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "The output 4-dimensional tensor of the same shape as input.", + "name": "output" + }, + { + "description": "(Optional) Saved mean used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_mean" + }, + { + "description": "(Optional) Saved inverse stdev used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_inv_stdev" + } + ], + "support_level": "default" + } + }, + { + "name": "RoIAlignRotated", + "schema": { + "attributes": [ + { + "description": "(float) default 1.0; Spatial scale of the input feature map X relative to the input image. E.g., 0.0625 if X has a stride of 16 w.r.t. the input image.", + "name": "spatial_scale", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's height.", + "name": "pooled_h", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's width.", + "name": "pooled_w", + "option": "optional" + }, + { + "description": "(int) default -1; number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If <= 0, then an adaptive number of grid points are used (computed as ceil(roi_width / pooled_w), and likewise for height).", + "name": "sampling_ratio", + "option": "optional" + } + ], + "description": "\nSimilar to RoIAlign but can handle rotated region proposals.\nBased on https://arxiv.org/abs/1703.01086.\n", + "inputs": [ + { + "description": "4D feature map input of shape (N, C, H, W).", + "name": "X" + }, + { + "description": "2D input of shape (R, 5 or 6) specifying R RoIs representing: batch index in [0, N - 1], center_x, center_y, width, height, angle. The RoI coordinates are in the coordinate system of the input image. `angle` should be specified in degrees and represents the RoI rotated counter-clockwise. For inputs corresponding to a single image, batch index can be excluded to have just 5 columns.", + "name": "RoIs" + } + ], + "outputs": [ + { + "description": "4D output of shape (R, C, pooled_h, pooled_w). The r-th batch element is a pooled feature map cooresponding to the r-th RoI.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "StringJoin", + "schema": { + "attributes": [ + { + "description": "Delimiter for join (Default: \",\").", + "name": "delimiter", + "option": "optional" + }, + { + "description": "Axis for the join (either 0 or 1)", + "name": "axis", + "option": "optional" + } + ], + "description": "\nTakes a 1-D or a 2-D tensor as input and joins elements in each row with the\nprovided delimiter. Output is a 1-D tensor of size equal to the first dimension\nof the input. Each element in the output tensor is a string of concatenated\nelements corresponding to each row in the input tensor. For 1-D input, each\nelement is treated as a row.\n", + "inputs": [ + { + "description": "1-D or 2-D tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "1-D tensor of strings created by joining row elements from the input tensor.", + "name": "strings" + } + ], + "support_level": "default" + } + }, + { + "name": "ConvGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "GatherFused8BitRowwise", + "schema": { + "description": "\nPerform the same operation as Gather, but operating on 8-bit rowwise quantized\nmatrices with fused storage (where each row stores quantized values, and then\nthe scale and offset).\nDATA needs to have rank 2 and INDICES needs to have rank 1.\n", + "inputs": [ + { + "description": "uint8 tensor with rank 2 obtained with operator FloatToFused8BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA forthe rows that are being gathered", + "name": "INDICES" + } + ], + "outputs": [ + { + "description": "output", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "Lars", + "schema": { + "attributes": [ + { + "description": "rescaling offset parameter", + "name": "offset", + "option": "optional" + }, + { + "description": "minimum learning rate for clipping", + "name": "lr_min", + "option": "optional" + } + ], + "description": "\nImplement Layer-wise Adaptive Rate Scaling (LARS) with clipping. Before adding weight\ndecay, given a parameter tensor X and its gradient dX, the local learning rate\nfor X will be\n\nlocal_lr = trust * norm(X) / ( norm(dX) + wd * norm(X) + offset * norm(X) )\n\n = trust / ( norm(dX) / norm(X) + wd + offset ),\n\nwhere offset is a preset hyper-parameter to avoid numerical issue and trust\nindicates how much we trust the layer to change its parameters during one update.\nIn this implementation, we uses l2 norm and the computed local learning rate is\nclipped based on the upper bound lr_max and the lower bound lr_min:\n\nlocal_lr = min(local_lr, lr_max) and local_lr = max(local_lr, lr_min)\n\n", + "inputs": [ + { + "description": "Parameter tensor", + "name": "X" + }, + { + "description": "Gradient tensor", + "name": "dX" + }, + { + "description": "Weight decay", + "name": "wd" + }, + { + "description": "Trust", + "name": "trust" + }, + { + "description": "Upper bound of learning rate", + "name": "lr_max" + } + ], + "outputs": [ + { + "description": "Rescaled local learning rate", + "name": "lr_rescaled" + } + ], + "support_level": "default" + } + }, + { + "name": "MergeMultiMapFeatureTensorsGradient", + "schema": { + "description": "Explode given multi-feature tensors with map features into many.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".values.lengths", + "name": "in1_values_lengths" + }, + { + "description": ".values.values_grad", + "name": "out_values_values_grad" + } + ], + "outputs": [ + { + "description": ".values.values_grad", + "name": "in1_values_values_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsIndicesInGradientMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Tan", + "schema": { + "description": "\nCalculates the tangent of the given input tensor, element-wise.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The tangent of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "BooleanMaskLengths", + "schema": { + "description": "\nGiven a tensor of int32 `lengths` tensor representing segment lengths and a `mask` (boolean) tensor, return the segment lengths of the corresponding segmented tensor after **BooleanMask** is applied.\n\nIf `lengths` tensor is $[a_1, a_2, ..., a_n]$, then length of `mask` tensor must be $a_1 + a_2 + ... + a_n$.\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/boolean_mask_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BooleanMaskLengths\",\n [\"lengths\", \"mask\"],\n [\"masked_lengths\"]\n)\n\nworkspace.FeedBlob(\"lengths\", np.array([1,3,2], dtype=np.int32))\nworkspace.FeedBlob(\"mask\", np.array([False,True,True,False,True,True]))\nprint(\"lengths:\", workspace.FetchBlob(\"lengths\"))\nprint(\"mask:\", workspace.FetchBlob(\"mask\"))\nworkspace.RunOperatorOnce(op)\nprint(\"masked_lengths:\", workspace.FetchBlob(\"masked_lengths\"))\n\n```\n\n**Result**\n\n```\n\nlengths: [1 3 2]\nmask: [False True True False True True]\nmasked_lengths: [0 2 2]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor containing segment lengths", + "name": "lengths" + }, + { + "description": "(*Tensor``*): A 1D bool tensor of values to keep.", + "name": "mask" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): 1D tensor of same type as inputs that contains the sequence", + "name": "masked_lengths" + } + ], + "support_level": "default" + } + }, + { + "name": "Reciprocal", + "schema": { + "description": "\nPerforms element-wise reciprocal ($\\1/x$) of input tensor $X$.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reciprocal_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Reciprocal\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[8. 3. 3.]\n [4. 0. 0.]\n [1. 2. 5.]]\nY:\n[[0.125 0.3333333 0.3333333 ]\n [0.25 inf inf ]\n [1 0.5 0.2 ]]\n\n```\n\n
\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SquaredL2Distance", + "schema": { + "description": "\nGiven two input float tensors X, Y, and produces one output float tensor\nof the L2 difference between X and Y that is computed as ||(X - Y)^2 / 2||.\n", + "inputs": [ + { + "description": "1D or 2D input tensor", + "name": "X" + }, + { + "description": "1D or 2D input tensor (must have the same shape as X)", + "name": "Y" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Z" + } + ], + "support_level": "default" + } + }, + { + "name": "ArgMin", + "schema": { + "attributes": [ + { + "default": -1, + "description": "The axis to get argmin.", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "default": true, + "description": "If True (default), the output tensor shape will match the input tensor shape except the `axis` dimension equals 1. Else, the `axis` dimension of the output tensor is removed.", + "name": "keepdims", + "option": "optional", + "type": "boolean" + } + ], + "description": "\nRetrieve the argmin of an axis dimension specified by the `axis`\nargument. Given an input tensor and two arguments (`axis` and\n`keepdims`), returns a tensor containing the indices of the smallest\nelement along the given axis. If the `keepdims` arg is *True* (default),\nthe shape of the output tensor matches the input tensor except the\n`axis` dimension equals 1. Else, the `axis` dimension of the output\ntensor is removed.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/arg_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ArgMin\",\n [\"X\"],\n [\"Indices\"],\n axis=1\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(5,5))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Indices:\", workspace.FetchBlob(\"Indices\"))\n\n```\n\n**Result**\n\n```\n\nX: [[9. 4. 6. 4. 1.]\n [5. 9. 8. 3. 4.]\n [6. 1. 0. 2. 9.]\n [7. 8. 2. 4. 9.]\n [3. 9. 4. 9. 4.]]\nIndices: [[4]\n [3]\n [2]\n [2]\n [0]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Tensor of indices for the smallest values.", + "name": "Indices" + } + ], + "support_level": "default" + } + }, + { + "name": "RecurrentNetwork", + "schema": { + "description": "\nRun the input network in a recurrent fashion. This can be used to\nimplement fairly general recurrent neural networks (RNNs).\n\nThe operator proceeds as follows.\n\n- First, initialized the states from the input recurrent states\n- For each timestep T, apply the links (that map offsets from input/output\ntensors into the inputs/outputs for the `step` network)\n- Finally, alias the recurrent states to the specified output blobs.\n\nThis is a fairly special-case meta-operator, and so the implementation\nis somewhat complex. It trades of generality (and frankly usability)\nagainst performance and control (compared to e.g. TF\ndynamic_rnn, Theano scan, etc).\n\nSee the usage examples for a flavor of how to use it.\n", + "support_level": "default" + } + }, + { + "name": "Broadcast", + "schema": { + "attributes": [ + { + "description": "(int, default 0) the root to run broadcast from.", + "name": "root", + "option": "optional" + } + ], + "description": "\nDoes a broadcast operation from the root node to every other node. The tensor\non each node should have been pre-created with the same shape and data type.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be broadcasted.", + "name": "X" + } + ], + "outputs": [ + { + "description": "In-place as input 1.", + "name": "X" + } + ], + "support_level": "default" + } + }, + { + "name": "PythonDLPackGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SpaceToBatch", + "schema": { + "attributes": [ + { + "description": "(*int*): exclusive axis that divides the first and second dimension of matrix `A` (default=0)", + "name": "pad", + "option": "optional" + }, + { + "description": "(*int*): height/width of spatial blocks to be moved (default=2)", + "name": "block_size", + "option": "optional" + }, + { + "description": "(*string*): order of dimensions of input and output blobs; only \"NCHW\" order is currently supported (default=\"NCHW\")", + "name": "order", + "option": "optional" + } + ], + "description": "\nZero-pads and then rearranges (permutes) blocks of spatial data into batch. More specifically, this op outputs a copy of the input tensor where values from the height and width dimensions are moved to the batch dimension. After the zero-padding is according to the `pad` argument, both height and width of the input must be divisible by the `block_size`. Only \"NCHW\" order is currently supported.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/space_batch_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"SpaceToBatch\",\n [\"X\"],\n [\"Y\"],\n pad=2,\n block_size=3\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(1,3,5,5).astype(np.float32))\nprint(\"X.shape:\", workspace.FetchBlob(\"X\").shape)\nworkspace.RunOperatorOnce(op)\nprint(\"Y.shape:\", workspace.FetchBlob(\"Y\").shape)\n\n```\n\n**Result**\n\n```\n\nX.shape: (1, 3, 5, 5)\nY.shape: (9, 3, 3, 3)\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor (NCHW order)", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): output tensor (NCHW order)", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "BatchToSpace", + "schema": { + "attributes": [ + { + "description": "(*int*): exclusive axis that divides the first and second dimension of matrix `A` (default=0)", + "name": "pad", + "option": "optional" + }, + { + "description": "(*int*): height/width of spatial blocks to be moved (default=2)", + "name": "block_size", + "option": "optional" + }, + { + "description": "(*string*): order of dimensions of input and output blobs; only \"NCHW\" order is currently supported (default=\"NCHW\")", + "name": "order", + "option": "optional" + } + ], + "description": "\nRearranges (permutes) data from batch into blocks of spatial data, followed by cropping. This is the reverse transformation of `SpaceToBatch`. More specifically, this op outputs a copy of the input tensor where values from the batch dimension are moved in spatial blocks to the height and width dimensions, followed by cropping along the height and width dimensions. Only \"NCHW\" order is currently supported.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/space_batch_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BatchToSpace\",\n [\"X\"],\n [\"Y\"],\n pad=3\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(10,3,32,32).astype(np.float32))\nprint(\"X.shape:\", workspace.FetchBlob(\"X\").shape)\nworkspace.RunOperatorOnce(op)\nprint(\"Y.shape:\", workspace.FetchBlob(\"Y\").shape)\n\n```\n\n**Result**\n\n```\n\nX.shape: (10, 3, 32, 32)\nY.shape: (2, 3, 58, 58)\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor (NCHW order)", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): output tensor (NCHW order)", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Erf", + "schema": { + "description": "\nCalculates the arcsine of the given input tensor, element-wise.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The arcsine of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "AtomicAppend", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "GivenTensorBoolFill", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "ConvRelu", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "StumpFuncIndex", + "schema": { + "description": "\nSplit the elements and return the indices based on the given threshold.\n", + "inputs": [ + { + "description": "tensor of float", + "name": "X" + } + ], + "outputs": [ + { + "description": "tensor of int64 indices for elements below/equal threshold", + "name": "Index_Low" + }, + { + "description": "tensor of int64 indices for elements above threshold", + "name": "Index_High" + } + ], + "support_level": "default" + } + }, + { + "name": "TopK", + "schema": { + "description": "\nRetrieve the top-K elements of the last dimension. \nGiven an input tensor of shape $(a_1, a_2, ..., a_n, r)$. `k` can be passed as an integer argument or a 1D tensor containing a single integer.\nReturns up to three outputs:\n\n1. Value tensor of shape $(a_1, a_2, ..., a_n, k)$ which contains the values of the top k elements along the last dimension\n2. Index tensor of shape $(a_1, a_2, ..., a_n, k)$ which contains the indices of the top k elements (original indices from the input tensor).\n3. [OPTIONAL] Flattened index tensor of shape $(a_1 * a_2 * ... * a_n * k,)$.\n\nGiven two equivalent values, this operator uses the indices along the last dimension as a tiebreaker. That is, the element with the lower index will appear first.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/top_k.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"TopK\",\n [\"X\"],\n [\"Values\", \"Indices\", \"Flattened_indices\"],\n k=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(3,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Values:\", workspace.FetchBlob(\"Values\"))\nprint(\"Indices:\", workspace.FetchBlob(\"Indices\"))\nprint(\"Flattened_indices:\", workspace.FetchBlob(\"Flattened_indices\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[6. 7. 0.]\n [8. 7. 7.]\n [1. 5. 6.]]\n\n [[0. 6. 1.]\n [2. 8. 4.]\n [1. 2. 9.]]\n\n [[4. 3. 7.]\n [0. 1. 7.]\n [0. 1. 8.]]]\nValues:\n[[[7. 6.]\n [8. 7.]\n [6. 5.]]\n\n [[6. 1.]\n [8. 4.]\n [9. 2.]]\n\n [[7. 4.]\n [7. 1.]\n [8. 1.]]]\nIndices:\n[[[1 0]\n [0 1]\n [2 1]]\n\n [[1 2]\n [1 2]\n [2 1]]\n\n [[2 0]\n [2 1]\n [2 1]]]\nFlattened_indices: [ 1 0 3 4 8 7 10 11 13 14 17 16 20 18 23 22 26 25]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): input tensor of shape $(a_1, a_2, ..., a_n, r)$", + "name": "X" + }, + { + "description": "(*int*): number of top elements to retrieve", + "name": "k" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): output tensor of shape $(a_1, a_2, ..., a_n, k)$", + "name": "Values" + }, + { + "description": "(*Tensor``*): tensor of indices of shape $(a_1, a_2, ..., a_n, k)$; indices values refer to each element's index in the last dimension of the `X` input tensor", + "name": "Indices" + }, + { + "description": "(*Tensor``*): tensor of indices of shape $(a_1 * a_2 * ... * a_n * k,)$; indices values refer to each element's index in the flattened input tensor `X`", + "name": "Flattened_indices" + } + ], + "support_level": "default" + } + }, + { + "name": "SpatialBNGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ThrowChildThreadException", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CheckDatasetConsistency", + "schema": { + "attributes": [ + { + "description": "List of strings representing the string names in the formatspecified in the doc for CreateTreeCursor.", + "name": "fields", + "option": "optional" + } + ], + "description": "\nChecks that the given data fields represents a consistent dataset under\nthe schema specified by the `fields` argument. Operator fails if the fields\nare not consistent. If data is consistent, each field's data can be safely\nappended to an existing dataset, keeping it consistent.\n", + "inputs": [ + { + "description": "Data for field 0.", + "name": "field_0" + } + ], + "support_level": "default" + } + }, + { + "name": "RoIPoolGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CreateTextFileReader", + "schema": { + "attributes": [ + { + "description": "Path to the file.", + "name": "filename", + "option": "optional" + }, + { + "description": "Number of passes over the file.", + "name": "num_passes", + "option": "optional" + }, + { + "description": "List with type of each field. Type enum is found at core.DataType.", + "name": "field_types", + "option": "optional" + } + ], + "description": "Create a text file reader. Fields are delimited by .", + "outputs": [ + { + "description": "Pointer to the created TextFileReaderInstance.", + "name": "handler" + } + ], + "support_level": "default" + } + }, + { + "name": "StringSuffix", + "schema": { + "attributes": [ + { + "description": "Maximum size of the suffix, in bytes.", + "name": "length", + "option": "optional" + } + ], + "description": "\nComputes the element-wise string suffix of the string tensor.\nInput strings that are shorter than suffix length will be returned unchanged.\nNOTE: Prefix is computed on number of bytes, which may lead to wrong behavior\nand potentially invalid strings for variable-length encodings such as utf-8.\n", + "inputs": [ + { + "description": "Tensor of std::string.", + "name": "strings" + } + ], + "outputs": [ + { + "description": "Tensor of std::string containing suffixes for each output.", + "name": "suffixes" + } + ], + "support_level": "default" + } + }, + { + "name": "Expand", + "schema": { + "description": "\n Broadcast the input tensor to a materialized new tensor using given shape.\n Broadcast rule is similar to \"numpy.array(input) * numpy.ones(shape)\":\n Dimensions are right alignment;\n Two corresponding dimensions must have the same value, or one of them\n equals to 1.\n In order to align with PyTorch's `expand`, `shape` is allowed to have entries\n equal to -1, which means to preserve the size of the corresponding dimension\n in `X` (so it's actually equivalent to equal to 1).\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): expand shape", + "name": "shape" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): expanded tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Gelu", + "schema": { + "attributes": [ + { + "description": "If true, use y = 0.5x * (1 + tanh(sqrt(2/Pi) * (x + 0.044715x^3))).", + "name": "fast_gelu", + "option": "optional" + } + ], + "description": "\nRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = xP(X <= x) where X ~ N(0, 1),\nis applied to the tensor elementwise.\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LpNorm", + "schema": { + "attributes": [ + { + "default": 2, + "description": "Order of the norm in p-norm.", + "name": "p", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "Whether we calculate norm or averaged_norm.The Lp_averaged_norm(x) is defined as Lp_averaged_norm(x) = LpNorm(x) / size(x)", + "name": "average", + "option": "optional", + "type": "boolean" + } + ], + "description": "\nThis op computes the $L_p$ norm of the one dimensional input tensor $X$, and outputs a one dimensional output tensor $Y$. Here, the $L_p$ norm is calculated as\n\n$$L_p(\\mathbf{x}) = \\sum_i x_i^p$$\n\nThis op supports $p$ values of 1 or 2. If the average argument is set, the norm is calculated as Lp_averaged_norm(x) is defined as Lp_averaged_norm(x) = LpNorm(x) / size(x).\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/lpnorm_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/lpnorm_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LpNorm\",\n [\"X\"],\n [\"Y\"],\n p=2\n)\nX = np.array([5., 2.])\nprint(\"X:\\n\",X)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [5. 2.]\nY:\n [29.]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "1D Input tensor of data to be operated on.", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Z" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsWeightedSumFused8BitRowwise", + "schema": { + "description": "\nPerforms the same operation as SparseLengthsWeightedSum,\nbut operating on 8-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 4-byte scale and 4-byte bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused8BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseSortedSegmentMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Int8Add", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "\n Performs element-wise binary Add (with no broadcast support).\n", + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "Second operand. It should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "AtanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SortedSegmentRangeSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "GetCursorOffset", + "schema": { + "description": "Get the current offset in the cursor.", + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + } + ], + "outputs": [ + { + "description": "Tensor containing the offsets for the cursor.", + "name": "offsets" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8LeakyRelu", + "schema": { + "attributes": [ + { + "description": "Coefficient of leakage, default value is 0.01", + "name": "alpha", + "option": "optional" + }, + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "\nLeakyRelu takes input data (Tensor) and an argument alpha, and produces one\noutput data (Tensor) where the function `f(x) = alpha * x for x < 0`,\n`f(x) = x for x >= 0`, is applied to the data tensor elementwise.\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "TanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ReduceL1Gradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "MergeMultiListFeatureTensorsGradient", + "schema": { + "description": "Explode given multi-feature tensors with list features into many.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".values.lengths", + "name": "in1_values_lengths" + }, + { + "description": ".values.values_grad", + "name": "out_values_values_grad" + } + ], + "outputs": [ + { + "description": ".values.values_grad", + "name": "in1_values_values_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8GivenIntTensorFill", + "schema": { + "attributes": [ + { + "description": "Input array of type int32", + "name": "values", + "option": "optional" + }, + { + "description": "Input tensor shape", + "name": "shape", + "option": "optional" + }, + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "\n Creates quantized tensor of type int32 with scale and zero point info.\n", + "outputs": [ + { + "description": "An Int8TensorCPU with scale and zero point info", + "name": "Tensor" + } + ], + "support_level": "default" + } + }, + { + "name": "TensorProtosDBInput", + "schema": { + "attributes": [ + { + "description": "(int, default 0) the number of samples in a batch. The default value of 0 means that the operator will attempt to insert the entire data in a single output blob.", + "name": "batch_size", + "option": "optional" + } + ], + "description": "\nTensorProtosDBInput is a simple input operator that basically reads things\nfrom a db where each key-value pair stores an index as key, and a TensorProtos\nobject as value. These TensorProtos objects should have the same size, and they\nwill be grouped into batches of the given size. The DB Reader is provided as\ninput to the operator and it returns as many output tensors as the size of the\nTensorProtos object. Each output will simply be a tensor containing a batch of\ndata with size specified by the 'batch_size' argument containing data from the\ncorresponding index in the TensorProtos objects in the DB.\n", + "inputs": [ + { + "description": "A pre-initialized DB reader. Typically, this is obtained by calling CreateDB operator with a db_name and a db_type. The resulting output blob is a DB Reader tensor", + "name": "data" + } + ], + "outputs": [ + { + "description": "The output tensor in which the batches of data are returned. The number of output tensors is equal to the size of (number of TensorProto's in) the TensorProtos objects stored in the DB as values. Each output tensor will be of size specified by the 'batch_size' argument of the operator", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "RemovePadding", + "schema": { + "attributes": [ + { + "description": "Outer-size of padding to remove around each range.", + "name": "padding_width", + "option": "optional", + "type": "int64" + }, + { + "description": "[OPTIONAL] Specifies a different end-padding width. If this is not set, will use same as `padding_width`.", + "name": "end_padding_width", + "option": "optional", + "type": "int64" + } + ], + "description": "\nRemove padding around the edges of each segment of the input data. This is the\nreverse operation of **AddPadding**, and uses the same arguments and conventions\nfor input and output data format.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sequence_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\naddpad_op = core.CreateOperator(\n \"AddPadding\",\n [\"X\", \"lengths_add\"],\n [\"Y\", \"lengths_out_add\"],\n padding_width=1\n)\n\nrmpad_op = core.CreateOperator(\n \"RemovePadding\",\n [\"Y\", \"lengths_rm\"],\n [\"Z\", \"lengths_out_rm\"],\n padding_width=1\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(20, size=(3,5))))\nworkspace.FeedBlob(\"lengths_add\", np.array([3]).astype(np.int32))\nworkspace.FeedBlob(\"lengths_rm\", np.array([5]).astype(np.int32))\n\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(addpad_op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"lengths_out_add:\", workspace.FetchBlob(\"lengths_out_add\"))\n\nworkspace.RunOperatorOnce(rmpad_op)\nprint(\"Z:\", workspace.FetchBlob(\"Z\"))\nprint(\"lengths_out_rm:\", workspace.FetchBlob(\"lengths_out_rm\"))\n```\n\n**Result**\n\n```\nX: [[17 19 1 9 1]\n [19 3 5 19 1]\n [16 0 0 0 4]]\nY: [[ 0 0 0 0 0]\n [17 19 1 9 1]\n [19 3 5 19 1]\n [16 0 0 0 4]\n [ 0 0 0 0 0]]\nlengths_out_add: [5]\nZ: [[17 19 1 9 1]\n [19 3 5 19 1]\n [16 0 0 0 4]]\nlengths_out_rm: [3]\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input tensor ($T$).", + "name": "data_in" + }, + { + "description": "*(type: Tensor``)* Number of elements in each range. sum(lengths) = N. If not provided, considers all data as a single segment.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Padded data tensor ($T$).", + "name": "data_out" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Lengths for each padded range.", + "name": "lengths_out" + } + ], + "support_level": "default" + } + }, + { + "name": "AveragedLoss", + "schema": { + "description": "\nThe *AveragedLoss* op takes a single 1-D input tensor *input* and returns a single output float value *output*. The output represents the average of the values in *input*. This op is commonly used for averaging losses, hence the name, however it does not exclusively operate on losses.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/loss_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/loss_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragedLoss\",\n [\"input\"],\n [\"output\"],\n)\n\nworkspace.FeedBlob(\"input\", np.array([8, 10, 12]).astype(np.float32))\nprint(\"input:\\n\", workspace.FetchBlob(\"input\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"output: \\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n [ 8. 10. 12.]\noutput:\n 10.0\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "The input data as Tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The output tensor of size 1 containing the averaged value.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "ReluGradient", + "schema": { + "description": "\nReluGradient takes both Y and dY and uses this to update dX according to the\nchain rule and derivatives of the rectified linear function.\n", + "support_level": "default" + } + }, + { + "name": "TextFileReaderRead", + "schema": { + "attributes": [ + { + "description": "Maximum number of rows to read.", + "name": "batch_size", + "option": "optional" + } + ], + "description": "Read a batch of rows from the given text file reader instance. Expects the number of fields to be equal to the number of outputs. Each output is a 1D tensor containing the values for the given field for each row. When end of file is reached, returns empty tensors.", + "inputs": [ + { + "description": "Pointer to an existing TextFileReaderInstance.", + "name": "handler" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsTile", + "schema": { + "description": "\nGiven DATA tensor of rank r >= 1, and LENGTHS tensor of rank 1, duplicate each\nentry of the outer-most dimension of DATA according to LENGTHS, and concatenate\nthem in an output tensor of rank r.\n\nExample:\n DATA = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n [6.8, 7.9],\n ]\n LENGTHS = [0, 1, 3, 2]\n OUTPUT = [\n [2.3, 3.4],\n [4.5, 5.7],\n [4.5, 5.7],\n [4.5, 5.7],\n [6.8, 7.9],\n [6.8, 7.9],\n ]\n", + "inputs": [ + { + "description": "Tensor of rank r >= 1. First dimension must be equal to the size of lengths", + "name": "DATA" + }, + { + "description": "Tensor of int32 lengths of rank 1", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Tensor of rank r", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8ChannelShuffle", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "RowWiseSparseAdam", + "schema": { + "attributes": [ + { + "description": "Default 0.9", + "name": "beta1", + "option": "optional" + }, + { + "description": "Default 0.999", + "name": "beta2", + "option": "optional" + }, + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\n Computes a modified Adam Update for the sparse case.\n Given inputs (param, moment1, moment2, indices, grad, lr, iter), runs the\n Adam update on (param, moment1[indices], moment2[indices], lr, iter) and returns\n (new_param, new_moment1, new_moment2), where moment2 is a 1D tensor\n with length equal to the number of rows in param:\n shape(moment2) == shape(param)[0]. Each element of moment2 is\n applied to an entire row of param, and the new moment2 values are\n calculated by averaging across the row.\n\n ", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "First moment history", + "name": "moment_1" + }, + { + "description": "Second moment history", + "name": "moment_2" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "iteration number", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated first moment", + "name": "output_moment_1" + }, + { + "description": "Updated second moment", + "name": "output_moment_2" + }, + { + "description": "Optional Effective gradient", + "name": "output_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "Negative", + "schema": { + "description": "\nComputes the element-wise negative of the input.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/negative_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Negative\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3).astype(np.float32)))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX: [[0.83296907 0.61407167 0.32562155]\n [0.59304523 0.03111175 0.29365504]\n [0.09478621 0.5424558 0.73940724]]\nY: [[-0.83296907 -0.61407167 -0.32562155]\n [-0.59304523 -0.03111175 -0.29365504]\n [-0.09478621 -0.5424558 -0.73940724]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* 1D input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* 1D output tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "StdDevPut", + "schema": { + "attributes": [ + { + "description": "(*str*): name of the stat. If not present, then uses name of input blob", + "name": "name", + "option": "optional" + }, + { + "description": "(*int64_t*): number to multiply input values by (used when inputting floats, as stats can only receive integers", + "name": "magnitude_expand", + "option": "optional" + }, + { + "description": "(*boolean*): whether or not to clamp inputs to the max inputs allowed", + "name": "bound", + "option": "optional" + }, + { + "description": "(*float*): Optionally provide a default value for receiving empty tensors", + "name": "default_value", + "option": "optional" + } + ], + "description": "\n Consume a value and pushes it to the global stat registry as an standard deviation.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_put_ops.cc\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): A scalar tensor, representing any numeric value", + "name": "value" + } + ], + "support_level": "default" + } + }, + { + "name": "Perplexity", + "schema": { + "description": "\nPerplexity calculates how well a probability distribution predicts a sample.\nPerplexity takes a 1-D tensor containing a batch of probabilities. Each value\nin the tensor belongs to a different sample and represents the probability of\nthe model predicting the true label for that sample. The operator returns a\nsingle (float) perplexity value for the batch.\n", + "inputs": [ + { + "description": "The input data as Tensor. It contains a batch oftrue label or target probabilities", + "name": "probabilities" + } + ], + "outputs": [ + { + "description": "The output- a single (float) perplexity value for the batch", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseUnsortedSegmentSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "OneHot", + "schema": { + "description": "\nThe *OneHot* op accepts two inputs *indices* and *index_size_tensor*, and produces a single output *one_hots*. For each index in *indices* the op creates a one-hot row in *one_hots* of length *index_size_tensor* where all entries are zero except the entry at the index is 1. The size of *one_hots* is *len(indices)* x *index_size_tensor*.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/one_hot_ops.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/one_hot_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"OneHot\",\n [\"indices\", \"index_size_tensor\"],\n [\"one_hots\"],\n)\n\nworkspace.FeedBlob(\"indices\", np.array([0,1,2,3,4]).astype(np.long))\nprint(\"indices:\\n\", workspace.FetchBlob(\"indices\"))\n\nworkspace.FeedBlob(\"index_size_tensor\", np.array([5]).astype(np.long))\nprint(\"index_size_tensor:\\n\", workspace.FetchBlob(\"index_size_tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"one_hots: \\n\", workspace.FetchBlob(\"one_hots\"))\n\n```\n\n**Result**\n\n```\n\nindices:\n [0 1 2 3 4]\nindex_size_tensor:\n [5]\none_hots:\n [[1. 0. 0. 0. 0.]\n [0. 1. 0. 0. 0.]\n [0. 0. 1. 0. 0.]\n [0. 0. 0. 1. 0.]\n [0. 0. 0. 0. 1.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "The active index for each example in the batch.", + "name": "indices" + }, + { + "description": "Scalar with the size of the index. Must be in CPU context", + "name": "index_size_tensor" + } + ], + "outputs": [ + { + "description": "Matrix of size len(indices) x index_size", + "name": "one_hots" + } + ], + "support_level": "default" + } + }, + { + "name": "MergeSingleMapFeatureTensors", + "schema": { + "attributes": [ + { + "description": "feature ids", + "name": "feature_ids", + "option": "optional" + } + ], + "description": "Merge given single-feature tensors with map features into one multi-feature tensor.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".keys", + "name": "in1_keys" + }, + { + "description": ".values", + "name": "in1_values" + }, + { + "description": ".presence", + "name": "in1_presence" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values.lengths", + "name": "out_values_lengths" + }, + { + "description": ".values.keys", + "name": "out_values_keys" + }, + { + "description": ".values.values", + "name": "out_values_values" + } + ], + "support_level": "default" + } + }, + { + "name": "Cbrt", + "schema": { + "description": null, + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor calculated as the cbrt of the input tensor, element-wise.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "TensorVectorSize", + "schema": { + "description": "Get the size of the input vector", + "inputs": [ + { + "description": "std::unique_ptr >", + "name": "tensor vector" + } + ], + "outputs": [ + { + "description": "int32_t size", + "name": "size" + } + ], + "support_level": "default" + } + }, + { + "name": "AbsGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "IncrementPut", + "schema": { + "attributes": [ + { + "description": "(*str*): name of the stat. If not present, then uses name of input blob", + "name": "name", + "option": "optional" + }, + { + "description": "(*int64_t*): number to multiply input values by (used when inputting floats, as stats can only receive integers", + "name": "magnitude_expand", + "option": "optional" + }, + { + "description": "(*boolean*): whether or not to clamp inputs to the max inputs allowed", + "name": "bound", + "option": "optional" + }, + { + "description": "(*float*): Optionally provide a default value for receiving empty tensors", + "name": "default_value", + "option": "optional" + } + ], + "description": "\n Consume a value and pushes it to the global stat registry as an sum.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_put_ops.cc\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): A scalar tensor, representing any numeric value", + "name": "value" + } + ], + "support_level": "default" + } + }, + { + "name": "MSRAFill", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LC2DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Accumulate", + "schema": { + "attributes": [ + { + "description": "(float, default 1.0) Accumulation multiplier", + "name": "gamma", + "option": "optional" + } + ], + "description": "\nAccumulate operator accumulates the input tensor to the output tensor. If the\noutput tensor already has the right size, we add to it; otherwise, we first\ninitialize the output tensor to all zeros, and then do accumulation. Any\nfurther calls to the operator, given that no one else fiddles with the output\nin the interim, will do simple accumulations.\nAccumulation is done using Axpby operation as shown:\n Y = 1*X + gamma*Y\nwhere X is the input tensor, Y is the output tensor and gamma is the multiplier\nargument.\n", + "inputs": [ + { + "description": "The input tensor that has to be accumulated to the output tensor. If the output size is not the same as input size, the output tensor is first reshaped and initialized to zero, and only then, accumulation is done.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Accumulated output tensor", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "CheckAtomicBool", + "schema": { + "description": "Copy the value of an atomic to a bool", + "inputs": [ + { + "description": "Blob containing a unique_ptr>", + "name": "atomic_bool" + } + ], + "outputs": [ + { + "description": "Copy of the value for the atomic", + "name": "value" + } + ], + "support_level": "default" + } + }, + { + "name": "StatRegistryCreate", + "schema": { + "description": "\nCreate a StatRegistry object that will contain a map of performance counters\nkeyed by name. A StatRegistry is used to gather and retrieve performance\ncounts throughout the caffe2 codebase.\n", + "outputs": [ + { + "description": "A Blob pointing to the newly created StatRegistry.", + "name": "handle" + } + ], + "support_level": "default" + } + }, + { + "name": "GT", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise greater than comparison **>** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GT\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [False True False False False False]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "SumSqrElements", + "schema": { + "attributes": [ + { + "description": "whether to average or not", + "name": "average", + "option": "optional" + } + ], + "description": "Sums the squares elements of the input tensor.", + "inputs": [ + { + "description": "Tensor to sum up", + "name": "X" + } + ], + "outputs": [ + { + "description": "Scalar sum of squares", + "name": "sum" + } + ], + "support_level": "default" + } + }, + { + "name": "WeightedSample", + "schema": { + "description": "\nThe operator performs sampling based on the input sampling weights for\neach batch. All weights must be non-negative numbers.\nThe input is a 2-D tensor (Tensor) of size (batch_size x weights_dim).\nFor each batch, an index is randomly sampled from the distribution given by\nthe weights of the corresponding batch.\nThe output is a 1-D tensor (Tensor) of size (batch_size x 1) and\ncontains the index(es) of the sampled output.\n", + "inputs": [ + { + "description": "A 2-D Tensor of size (batch_size x weights_dim).All weights must be non-negative numbers.", + "name": "sampling_weights" + }, + { + "description": "An optional 2-D Tensor of size (batch_size x weights_dim).Its values correspond to the sampling weights.", + "name": "sampling_values" + } + ], + "outputs": [ + { + "description": "The output tensor contains index(es) sampled from distribution givenby the weight vector(s) in the input tensorThe output is a 1-D Tensor of size (batch_size x 1)", + "name": "sampled_indexes" + }, + { + "description": "The output tensor contains value(s) selected by the sampled index(es)It is a 1-D Tensor of size (batch_size x 1)", + "name": "sampled_values" + } + ], + "support_level": "default" + } + }, + { + "name": "WeightedMultiSampling", + "schema": { + "attributes": [ + { + "description": "number of samples to sample from the input data", + "name": "num_samples", + "option": "optional" + } + ], + "description": "\nThe operator performs sampling based on the input sampling weights.\nAll weights are cummulative probability thus sorted. The output is\na 1-D tensor (Tensor). If two inputs are given, the second input\nis used to provide shape of the output sample tensor. Otherwise, we use\nargument `num_samples` to determine the number of samples to generate.\n", + "inputs": [ + { + "description": "An optional 1-D Tensor.Input cumulative sampling probability (such as [0.2, 0.5, 0.8, 1.5]). All weights must be non-negative numbers. Note that the last value of CDF is not necessary 1. If the last value is not 1, all values in sampling_cdf will be scaled by this number.", + "name": "sampling_cdf" + }, + { + "description": "Tensor whose shape will be applied to output.", + "name": "shape_tensor (optional)" + } + ], + "outputs": [ + { + "description": "The output tensor contains indices sampled from distribution givenby the weight vector in the input tensorThe output is a 1-D Tensor of size determined by argument`num_samples` or the second input tensor.", + "name": "sampled_indexes" + } + ], + "support_level": "default" + } + }, + { + "name": "SwishGradient", + "schema": { + "description": "\nSwishGradient takes X, Y and dY and uses this to update dX according to the\nchain rule and derivatives of the swish function.\n", + "support_level": "default" + } + }, + { + "name": "CrossEntropyGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LT", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise less than comparison **<** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LT\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [False False True False False True]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "Softsign", + "schema": { + "description": "\n*Softsign* takes one input data tensor $X$ and produces one output data $Y,$ where the softsign function, $y = \\frac{x}{1+ |x|}$, is applied to $X$ elementwise. This operation can be done in an in-place fashion too, by providing the same input and output blobs.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softsign_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Softsign\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-1.3060539 0.7242748 -1.9907674 ]\n [-0.64802396 -0.03244735 0.7455406 ]\n [-0.298492 -0.5774271 2.8364444 ]]\n\nY:\n [[-0.5663588 0.420046 -0.6656376 ]\n [-0.39321268 -0.03142761 0.4271116 ]\n [-0.2298759 -0.36605626 0.739342 ]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input data blob to be operated on.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output data blob with same shape as input", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "FloatToHalf", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "HardSigmoid", + "schema": { + "attributes": [ + { + "description": "float: the slope of the function. Defaults to 0.2", + "name": "alpha", + "option": "optional" + }, + { + "description": "float: the bias value of the function. Defaults to 0.5", + "name": "beta", + "option": "optional" + } + ], + "description": "\nApplies hard sigmoid operation to the input data element-wise.\nThe HardSigmoid operation takes one input $X$, produces one output $Y$, and is defined as:\n\n$$Y = max(0,min(1,x * alpha + beta))$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/hard_sigmoid_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/hard_sigmoid_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"HardSigmoid\",\n [\"X\"],\n [\"Y\"],\n alpha = 0.2,\n beta = 0.5,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(5).astype(np.float32))\nprint(\"input:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"sigmoid:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\ninput: [ 1.5744036 0.31632107 1.7842269 1.4450722 -2.1726978 ]\nhard_sigmoid: [ 0.81488073, 0.56326419, 0.85684538, 0.78901446, 0.06546044]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D output tensor with same shape as input", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "GivenTensorStringFill", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "UniformIntFill", + "schema": { + "attributes": [ + { + "description": "(*int*): minimum value, inclusive", + "name": "min", + "option": "optional" + }, + { + "description": "(*int*): maximum value, inclusive", + "name": "max", + "option": "optional" + }, + { + "description": "(*Tuple(int)*): shape of the output, do not set when `input_as_shape`=1", + "name": "shape", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to use the first input as shape; `shape` input must be in CPU context", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": "\nFill the output tensor with int32 samples from uniform distribution [`min`, `max`].\n\n- The range can be defined either by arguments or input blobs. `min` and `max` are inclusive.\n - If the range is given by input blobs, you also need to give the shape as input.\n - When the range is given as arguments, this operator enforces min <= max. When the range is given as inputs, the constraint is not enforced.\n - When the range is given as inputs and max < min, the first dimension of the output is set to 0. This behavior is allowed so that dynamically sampling indices into a dynamically sized tensor is possible.\n- The shape of the output can be given as argument or input.\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop_1 = core.CreateOperator(\n \"UniformIntFill\",\n [],\n [\"output\"],\n min=5,\n max=10,\n shape=(3,3)\n)\n\nop_2 = core.CreateOperator(\n \"UniformIntFill\",\n [\"shape\", \"min\", \"max\"],\n [\"output\"],\n input_as_shape=1\n)\n\n// Test arg-based op\nworkspace.RunOperatorOnce(op_1)\nprint(\"output (op_1):\\n\", workspace.FetchBlob(\"output\"))\n\n// Test input-based op\nworkspace.ResetWorkspace()\nworkspace.FeedBlob(\"shape\", np.array([5,5]))\nworkspace.FeedBlob(\"min\", np.array(13, dtype=np.int32))\nworkspace.FeedBlob(\"max\", np.array(19, dtype=np.int32))\nworkspace.RunOperatorOnce(op_2)\nprint(\"output (op_2):\\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\noutput (op_1):\n [[ 6 10 7]\n [ 5 10 6]\n [ 7 5 10]]\noutput (op_2):\n [[19 13 15 13 13]\n [14 17 14 15 15]\n [17 14 19 13 13]\n [17 18 16 13 18]\n [14 15 16 18 16]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): 1-D tensor of the shape of the output, must be used with `input_as_shape` argument", + "name": "shape" + }, + { + "description": "(*Tensor``*): scalar tensor containing minimum value, inclusive", + "name": "min" + }, + { + "description": "(*Tensor``*): scalar tensor containing maximum value, inclusive", + "name": "max" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): filled output tensor", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "PackRecords", + "schema": { + "attributes": [ + { + "description": "List of strings representing the string names in the formatspecified in the doc for CreateTreeCursor.", + "name": "fields", + "option": "optional" + } + ], + "description": "\nGiven a dataset under a schema specified by the `fields` argument, pack all\nthe input tensors into one, where each tensor element represents a row of data\n(batch of size 1). This format allows easier use with the rest of Caffe2\noperators.\n", + "outputs": [ + { + "description": "One dimensional tensor having a complex type of SharedTensorVectorPtr. In order to reverse it back to the original input it has to be inserted into UnPackRecordsOp.", + "name": "tensor" + } + ], + "support_level": "default" + } + }, + { + "name": "Conditional", + "schema": { + "description": "\nGiven a 1-D tensor of boolean values, apply conditional operator along the first\ndimension of DataT and DataF and return DataO. Note, DataT and DataF must\nhave the exact same shape and type.\n", + "inputs": [ + { + "description": "Boolean tensor to select DataT or DataF", + "name": "Condition" + }, + { + "description": "Data to use when True", + "name": "DataT" + }, + { + "description": "Data to use when False", + "name": "DataF" + } + ], + "outputs": [ + { + "description": "Output data after applying ConditionalOp", + "name": "DataO" + } + ], + "support_level": "default" + } + }, + { + "name": "CopyFromCPUInput", + "schema": { + "description": "\nTake a CPU input tensor and copy it to an output in the current\nContext (GPU or CPU). This may involves cross-device MemCpy.\n", + "inputs": [ + { + "description": "The input CPU tensor.", + "name": "input" + } + ], + "outputs": [ + { + "description": "either a TensorCUDA or a TensorCPU", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "MaxPoolGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CollectAndDistributeFpnRpnProposals", + "schema": { + "attributes": [ + { + "description": "(int) ROI_CANONICAL_SCALE", + "name": "roi_canonical_scale", + "option": "optional" + }, + { + "description": "(int) ROI_CANONICAL_LEVEL", + "name": "roi_canonical_level", + "option": "optional" + }, + { + "description": "(int) ROI_MAX_LEVEL", + "name": "roi_max_level", + "option": "optional" + }, + { + "description": "(int) ROI_MIN_LEVEL", + "name": "roi_min_level", + "option": "optional" + }, + { + "description": "(int) RPN_MAX_LEVEL", + "name": "rpn_max_level", + "option": "optional" + }, + { + "description": "(int) RPN_MIN_LEVEL", + "name": "rpn_min_level", + "option": "optional" + }, + { + "description": "(int) RPN_POST_NMS_TOP_N", + "name": "rpn_post_nms_topN", + "option": "optional" + } + ], + "description": "\nMerge RPN proposals generated at multiple FPN levels and then\ndistribute those proposals to their appropriate FPN levels for Faster RCNN.\nAn anchor at one FPN level may predict an RoI that will map to another level,\nhence the need to redistribute the proposals.\n\nOnly inference is supported. To train, please use the original Python\noperator in Detectron.\n\nInputs and outputs are examples only; if min/max levels change,\nthe number of inputs and outputs, as well as their level numbering,\nwill change.\n", + "inputs": [ + { + "description": "RPN proposals for FPN level 2, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn2" + }, + { + "description": "RPN proposals for FPN level 3, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn3" + }, + { + "description": "RPN proposals for FPN level 4, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn4" + }, + { + "description": "RPN proposals for FPN level 5, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn5" + }, + { + "description": "RPN proposals for FPN level 6, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.", + "name": "rpn_rois_fpn6" + }, + { + "description": "RPN objectness probabilities for FPN level 2. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn2" + }, + { + "description": "RPN objectness probabilities for FPN level 3. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn3" + }, + { + "description": "RPN objectness probabilities for FPN level 4. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn4" + }, + { + "description": "RPN objectness probabilities for FPN level 5. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn5" + }, + { + "description": "RPN objectness probabilities for FPN level 6. See rpn_roi_probs documentation from GenerateProposals.", + "name": "rpn_roi_probs_fpn6" + } + ], + "outputs": [ + { + "description": "Top proposals limited to rpn_post_nms_topN total, format (image_index, x1, y1, x2, y2)", + "name": "rois" + }, + { + "description": "RPN proposals for ROI level 2, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn2" + }, + { + "description": "RPN proposals for ROI level 3, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn3" + }, + { + "description": "RPN proposals for ROI level 4, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn4" + }, + { + "description": "RPN proposals for ROI level 5, format (image_index, x1, y1, x2, y2)", + "name": "rois_fpn5" + }, + { + "description": "Permutation on the concatenation of all rois_fpni, i=min...max, such that when applied the RPN RoIs are restored to their original order in the input blobs.", + "name": "rois_idx_restore" + } + ], + "support_level": "default" + } + }, + { + "name": "ScatterAssign", + "schema": { + "description": "\nUpdate slices of the tensor in-place by overriding current value.\n\nNote: The op pretty much ignores the exact shapes of the input arguments and\ncares only about sizes. It's done for performance consideration to avoid\nunnecessary reshapes. Only first dimension of X_0 is important, let's call it\nN. If M is the total size of X_0 and K is the size of INDICES then X_i is\nassumed to be of shape K x (M / N) regardless of the real shape.\n\nNote: Each update in INDICES is applied independently which means that if\nduplicated elements are present in INDICES arbitrary one will win.\n\nCurrently only works on CPU because of access to INDICES.\n", + "inputs": [ + { + "description": "Tensor to be updated.", + "name": "DATA" + }, + { + "description": "1-D list of indices on the first dimensionof X_0 that need to be updated", + "name": "INDICES" + }, + { + "description": "Update slices, with shape len(INDICES) + shape(X_0)[1:]", + "name": "SLICES" + } + ], + "outputs": [ + { + "description": "Has to be exactly the same tensor as the input 0", + "name": "DATA" + } + ], + "support_level": "default" + } + }, + { + "name": "FusedRandRowwiseQuantizedToFloat", + "schema": { + "description": "\nDe-quantizes the result of the FloatToFusedRandRowwiseQuantized operator.\nRefer FloatToFusedRandRowwiseQuantized operator for details.\n", + "inputs": [ + { + "description": "Fused bitwidth, tail, min, max and quantized data", + "name": "quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_input" + } + ], + "support_level": "default" + } + }, + { + "name": "MergeSingleMapFeatureTensorsGradient", + "schema": { + "description": "Explode given multi-feature tensors with map features into multiple single-feature tensor.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".presence", + "name": "in1_presence" + }, + { + "description": ".values.values_grad", + "name": "out_values_values_grad" + } + ], + "outputs": [ + { + "description": ".values_grad", + "name": "in1_values_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "TopKGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SinGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "GivenTensorIntFill", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "Int8MaxPool", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "MaxPool \nconsumes an input blob X and applies max pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Max pooling consisting of taking the maximum value of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output data tensor from max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Output will go through rectified linear", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8Dequantize", + "schema": { + "description": null, + "inputs": [ + { + "description": "Int8 Tensor qX.", + "name": "qX" + } + ], + "outputs": [ + { + "description": "FP32 Tensor that represents mapped real value of qX.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Adagrad", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + }, + { + "description": "Default 1. If it is in (0, 1), the gradient square sum is decayed by this factor.", + "name": "decay", + "option": "optional" + } + ], + "description": "\n\nComputes the AdaGrad update for an input gradient and accumulated\nhistory. Concretely, given inputs (param, grad, moment, learning_rate),\ncomputes\n\n new_moment = moment + square(grad)\n effective_lr = learning_rate / (sqrt(new_moment) + epsilon)\n update = learning_rate * grad / (sqrt(new_moment) + epsilon)\n new_param = param + update\nand returns (new_param, new_moment).\n\nOptionally returns effective_lr and update as well.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + }, + { + "description": "(optional) Effective learning rate", + "name": "output_effective_lr" + }, + { + "description": "(optional) Actual update that is applied.", + "name": "output_update" + } + ], + "support_level": "default" + } + }, + { + "name": "ConstantFill", + "schema": { + "attributes": [ + { + "description": "value to populate output tensor with.", + "name": "value", + "option": "optional" + }, + { + "description": "The data type for the elements of the output tensor. Strictly must be one of the types from *DataType* enum in TensorProto.", + "name": "dtype", + "option": "optional", + "type": "int64" + }, + { + "description": "Shape of the output tensor. Cannot pass an input blob and this arg at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "Additional dimensions appended at the end of the shape indicated by the input blob. Cannot set thisargument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": "\nThis operator fills the elements of the output tensor with a constant value\nspecified by the `value` argument.\n\n- The data type is specified by the `dtype` argument\n\n- Currently, the data types supported are *float*, *int32*, *int64*, and *bool*\n\n- If the `dtype` argument is not provided, the data type of `value` is used\n\n- The output tensor shape is either specified by the `shape` argument or will\nmatch the shape of the input tensor if one is provided (if an input tensor is\nprovided, a shape argument should not be set)\n\n- Optional additional dimensions can be appended at the end as specified by\n`extra_shape` argument\n\n- If `input_as_shape` is set to True, the input should be a 1D tensor\ncontaining the desired output shape (the dimensions specified in `extra_shape`\nwill also be appended)\n\nWhen specifying `dtype` argument, use the integer keys from the *DataType* enum\nin TensorProto:\n\n```\nmessage TensorProto {\n ...\n enum DataType {\n UNDEFINED = 0;\n FLOAT = 1; // float\n INT32 = 2; // int\n BYTE = 3; // BYTE, when deserialized, is going to be restored as uint8.\n STRING = 4; // string\n BOOL = 5; // bool\n UINT8 = 6; // uint8_t\n INT8 = 7; // int8_t\n UINT16 = 8; // uint16_t\n INT16 = 9; // int16_t\n INT64 = 10; // int64_t\n FLOAT16 = 12; // at::Half\n DOUBLE = 13; // double\n }\n```\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ConstantFill\",\n [],\n [\"Y\"],\n shape=(1,5,5)\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nY: [[[0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]]]\n```\n
\n\n
\n Example 2 \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ConstantFill\",\n [\"X\"],\n [\"Y\"],\n value=4.0,\n dtype=1,\n extra_shape=(1,2)\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(100, size=(3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX: [[86. 30. 84.]\n [34. 51. 9.]\n [29. 86. 59.]]\nY: [[[[4. 4.]]\n\n [[4. 4.]]\n\n [[4. 4.]]]\n\n\n [[[4. 4.]]\n\n [[4. 4.]]\n\n [[4. 4.]]]\n\n\n [[[4. 4.]]\n\n [[4. 4.]]\n\n [[4. 4.]]]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* [OPTIONAL] Input tensor to provide shape information.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Output tensor of constant values.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "FloatToFused8BitRowwiseQuantized", + "schema": { + "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 8 bytes\nof each row in the output matrix are a float storing the scale\nfollowed by another float containing the scale.\nFor N-dimensional input tensor, the first N-1 dimensions are interpreted as\nrows and the last dimension is interpreted as a column. For example, an\ninput tensor with dimension 5x2x4 is interpreted as 10 rows and 4 columns.\n)\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Conv3D", + "schema": { + "description": "\nThe convolution operator consumes an input vector, a 3D filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.", + "name": "X" + }, + { + "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.", + "name": "filter" + }, + { + "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LSTMUnit", + "schema": { + "attributes": [ + { + "description": "Bias term to add in while calculating forget gate", + "name": "forget_bias", + "option": "optional" + }, + { + "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.", + "name": "sequence_lengths", + "option": "optional" + } + ], + "description": "\nLSTMUnit computes the activations of a standard LSTM (without peephole\nconnections), in a sequence-length aware fashion.\n\nConcretely, given the (fused) inputs X (TxNxD), the previous cell\nstate (NxD), and the sequence lengths (N), computes the LSTM\nactivations, avoiding computation if the input is invalid (as in, the\nvalue at X{t][n] >= seqLengths[n].\n\n", + "support_level": "default" + } + }, + { + "name": "SortedSegmentWeightedSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "GatherByKey", + "schema": { + "description": "\nInverse operation of Partition.\n\nTakes the original, full 'keys' tensor followed by sharded value tensors,\nand returns the full value tensor, combined using the same hash used in\nPartition.\n", + "inputs": [ + { + "description": "The first input is the full keys tensor (same as the first input of Partition).", + "name": "keys" + }, + { + "description": "Subsequented inputs are sharded values tensors.", + "name": "sharded_values" + } + ], + "outputs": [ + { + "description": "Reconstructed values tensor.", + "name": "values" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceMean", + "schema": { + "attributes": [ + { + "description": "(*Tuple(int)*): list of axes to reduce", + "name": "axes", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to keep the reduced dimension(s) (default=1), else set to 0 to not keep the reduced dimension(s)", + "name": "keepdims", + "option": "optional" + } + ], + "description": "\nComputes the **mean** of the input tensor's elements along the provided `axes`. The resulting tensor has the same rank as the input if the `keepdims` argument equals 1 (default). If `keepdims` is set to 0, then the `axes` dimensions are pruned.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceMean\",\n [\"X\"],\n [\"Y\"],\n axes=(0,1),\n keepdims=0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,5,5)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[9. 0. 3. 6. 0.]\n [3. 4. 5. 0. 9.]\n [6. 9. 1. 1. 5.]\n [6. 2. 3. 7. 7.]\n [3. 1. 1. 0. 1.]]\n\n [[4. 3. 9. 8. 1.]\n [8. 2. 0. 4. 0.]\n [8. 9. 9. 0. 2.]\n [7. 2. 5. 8. 9.]\n [5. 9. 1. 9. 0.]]]]\nY:\n[[6.5 1.5 6. 7. 0.5]\n [5.5 3. 2.5 2. 4.5]\n [7. 9. 5. 0.5 3.5]\n [6.5 2. 4. 7.5 8. ]\n [4. 5. 1. 4.5 0.5]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "IntIndexCreate", + "schema": { + "attributes": [ + { + "description": "Max number of elements, including the zero entry.", + "name": "max_elements", + "option": "optional" + } + ], + "description": "\nCreates a dictionary that maps int32 keys to consecutive integers\nfrom 1 to max_elements. Zero is reserved for unknown keys.\n", + "outputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handler" + } + ], + "support_level": "default" + } + }, + { + "name": "Div", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise binary division (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Div\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[18,8],[2,9]]))\nworkspace.FeedBlob(\"B\", np.array([[9,2],[3,2]]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[18 8]\n [ 2 9]]\nB:\n[[9 2]\n [3 2]]\nC:\n[[2 4]\n [0 4]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions and type as A.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "WeightedSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "TimerBegin", + "schema": { + "attributes": [ + { + "description": "(*str*): name of the timer object; if not set use output name", + "name": "counter_name", + "option": "optional" + } + ], + "description": "\nStart a wallclock timer, returning a scalar tensor containing a pointer to it. The timer is stopped by calling **TimerEnd**.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_ops.cc\n\n ", + "outputs": [ + { + "description": "(*Tensor``*): pointer to a timer object", + "name": "timer" + } + ], + "support_level": "default" + } + }, + { + "name": "UnpackSegments", + "schema": { + "attributes": [ + { + "description": "The pre-defined max_length for the packed segments", + "name": "max_length", + "option": "optional" + } + ], + "description": "Map N+1 dim tensor to N dim based on length blob", + "inputs": [ + { + "description": "1-d int/long tensor contains the length in each of the input.", + "name": "lengths" + }, + { + "description": "N+1 dim Tensor.", + "name": "tensor" + } + ], + "outputs": [ + { + "description": "N dim Tensor", + "name": "packed_tensor" + } + ], + "support_level": "default" + } + }, + { + "name": "ThresholdedRelu", + "schema": { + "attributes": [ + { + "description": "(float) defaults to 1.0.", + "name": "alpha", + "option": "optional" + } + ], + "description": "\nThresholdedRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = x for x > alpha, y = 0\notherwise, is applied to the tensor elementwise.\n", + "inputs": [ + { + "description": "1D input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseSortedSegmentSum", + "schema": { + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Sum' to each segment. Segments need to be sorted and contiguous. See also\nSparseUnsortedSegmentSum that doesn't have this requirement.\n\nThis op is basically Gather and SortedSegmentSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same length as INDICES and values in the range 0..K-1 and in increasing order that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "Checkpoint", + "schema": { + "attributes": [ + { + "description": "(int, default 0) if set, use the db path directly and do not prepend the current root folder of the workspace.", + "name": "absolute_path", + "option": "optional" + }, + { + "description": "(string) a template string that one can combine with the iteration to create the final db name. For example, \"/home/lonestarr/checkpoint_%08d.db\"", + "name": "db", + "option": "optional" + }, + { + "description": "(string) the type of the db.", + "name": "db_type", + "option": "optional" + }, + { + "description": "(int, default 1) the checkpointing is carried out when (iter mod every) is zero.", + "name": "every", + "option": "optional" + } + ], + "description": "\nThe Checkpoint operator is similar to the Save operator, but allows one to save\nto db every few iterations, with a db name that is appended with the iteration\ncount. It takes [1, infinity) number of inputs and has no output. The first\ninput has to be a TensorCPU of type int and has size 1 (i.e. the iteration\ncounter). This is determined whether we need to do checkpointing.\n", + "support_level": "default" + } + }, + { + "name": "LengthsIndicesInGradientMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "MaxPool1D", + "schema": { + "description": "MaxPool1D \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Atan", + "schema": { + "description": "\nCalculates the arctangent of the given input tensor, element-wise.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The arctangent of the input tensor computed element-wise", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "PythonDLPack", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CTCBeamSearchDecoder", + "schema": { + "attributes": [ + { + "description": "Maximum number of candidates to carry over to next activation step.", + "name": "beam_width", + "option": "optional" + }, + { + "description": "Probability threshold below which outputs are ignored.", + "name": "prune_threshold", + "option": "optional" + } + ], + "description": "Prefix beam search decoder for connectionist temporal classification.", + "inputs": [ + { + "description": "3D float Tensor sized [max_activation_length, batch_size, alphabet_size] of network logits (before softmax application).", + "name": "INPUTS" + }, + { + "description": "(optional) 1D int vector containing sequence lengths, having size [batch_size] seq_len will be set to max_time if not provided.", + "name": "SEQ_LEN" + } + ], + "outputs": [ + { + "description": "Output_len matrix size (batch_size * num_candidates). Each index stores lengths of candidates for its corresponding batch item.", + "name": "OUTPUT_LEN" + }, + { + "description": "Values vector, size (total_decoded_outputs). The flattened vector of final output sequences, in batch order.", + "name": "VALUES" + }, + { + "description": "Probability vector, size (total_decoded_outputs). Each index stores final output probability of its corresponding batch item.", + "name": "OUTPUT_PROB" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsWeightedSumWithMainInputGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "WeightedSampleDequeueBlobs", + "schema": { + "attributes": [ + { + "description": "Weights for sampling from multiple queues", + "name": "weights", + "option": "optional" + }, + { + "description": "The index of the blob (among the output blob list) that will be used to store the index of the table chosen to read the current batch.", + "name": "table_idx_blob", + "option": "optional" + } + ], + "description": "\nDequeue the blobs from multiple queues. When one of queues is closed and empty,\nthe output status will be set to true which can be used as exit criteria for\nexecution step.\nThe 1st input is the queue and the last output is the status. The rest are\ndata blobs.\n", + "support_level": "default" + } + }, + { + "name": "SigmoidCrossEntropyWithLogitsGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Int8RoIAlign", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "(float) default 1.0; Spatial scale of the input feature map X relative to the input image. E.g., 0.0625 if X has a stride of 16 w.r.t. the input image.", + "name": "spatial_scale", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's height.", + "name": "pooled_h", + "option": "optional" + }, + { + "description": "(int) default 1; Pooled output Y's width.", + "name": "pooled_w", + "option": "optional" + }, + { + "description": "(int) default -1; number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If <= 0, then an adaptive number of grid points are used (computed as ceil(roi_width / pooled_w), and likewise for height).", + "name": "sampling_ratio", + "option": "optional" + } + ], + "description": "\nRegion of Interest (RoI) align operation as used in Mask R-CNN.\n", + "inputs": [ + { + "description": "4D Int8 Tensor feature map input of shape (N, C, H, W).", + "name": "X" + }, + { + "description": "2D input of shape (R, 4 or 5) specifying R RoIs representing: batch index in [0, N - 1], x1, y1, x2, y2. The RoI coordinates are in the coordinate system of the input image. For inputs corresponding to a single image, batch index can be excluded to have just 4 columns.", + "name": "RoIs" + } + ], + "outputs": [ + { + "description": "4D Int8 Tensor output of shape (R, C, pooled_h, pooled_w). The r-th batch element is a pooled feature map cooresponding to the r-th RoI.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SortedSegmentRangeLogMeanExpGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LengthsSum", + "schema": { + "description": "\nApplies 'Sum' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'Sum' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n\n\nThe *LengthsSum* op takes two inputs *DATA* and *LENGTHS*, and produces a single output *OUTPUT*. The op finds the sum in each of the segments of *DATA*, where segments are defined by their lengths.\nFor example, if $DATA = [2,4,3,1,2,10]$ and $LENGTHS = [2,3,1]$ then $OUTPUT = [sum([2,4]), sum([3,1,2]), sum([10])] = [6,6,10]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsSum\",\n [\"DATA\", \"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [ 6. 6. 10.]\n\n```\n\n
\n\n\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "DequeueBlobs", + "schema": { + "attributes": [ + { + "description": "Timeout in secs, default: no timeout", + "name": "timeout_secs", + "option": "optional" + } + ], + "description": "\n Dequeue the blobs from queue.\n ", + "inputs": [ + { + "description": "The shared pointer for the BlobsQueue", + "name": "queue" + } + ], + "outputs": [ + { + "description": "The blob to store the dequeued data", + "name": "blob" + } + ], + "support_level": "default" + } + }, + { + "name": "Elu", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Defines alpha parameter used in calculation.", + "name": "alpha", + "option": "optional", + "type": "float32" + } + ], + "description": "\n\nThis op implements the exponential linear unit (ELU) activation function as described in [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289). The op takes an input tensor $X$ of arbitrary shape, computes the elementwise elu operation, and returns a vector $Y$ of the same shape as output. The alpha parameter may be passed as an argument, but defaults to 1. The elu operation is defined as\n\n$$y=f(x) =\\begin{cases}\\alpha(e^x-1) & x < 0 \\\\ x & otherwise\\end{cases}$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elu_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Elu\",\n [\"X\"],\n [\"Y\"],\n alpha=1.1\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 0.35339102 1.1860217 -0.10710736]\n [-3.1173866 -0.1889988 -0.20330353]\n [ 1.8525308 -0.368949 0.506277 ]]\n\nY:\n [[ 0.35339102 1.1860217 -0.11172786]\n [-1.0513 -0.18943374 -0.20236646]\n [ 1.8525308 -0.33939326 0.506277 ]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "1D input tensor of data to be operated on.", + "name": "X" + } + ], + "outputs": [ + { + "description": "1D input tensor, calculated as described above.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Flatten", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Indicates up to which input dimensions (exclusive) should be flattened to the outer dimension of the output.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nFlattens the input tensor into a 2D matrix. If input tensor has shape\n$(d_0, d_1, ..., d_n)$ then the output will have shape\n$\\bigl((d_0 * d_1 * ... * d_{(axis-1)}), (d_{axis} * d_{(axis+1)} * ... * d_n)\\bigr)$.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/flatten_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Flatten\",\n [\"X\"],\n [\"Y\"],\n axis=1\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(1,3,2,2))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX: [[[[0.53432311 0.23734561]\n [0.56481598 0.52152617]]\n\n [[0.33662627 0.32472711]\n [0.17939016 0.97175851]]\n\n [[0.87226421 0.49045439]\n [0.92470531 0.30935077]]]]\nY: [[0.53432311 0.23734561 0.56481598 0.52152617 0.33662627 0.32472711\n 0.17939016 0.97175851 0.87226421 0.49045439 0.92470531 0.30935077]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* Input Tensor of rank >= axis.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* A 2D tensor with the contents of the input tensor, with input dimensions up to `axis` flattened to the outer dimension of the output and the remaining input dimensions flattened into the inner dimension of the output.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "MaxPool1DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SortedSegmentRangeLogSumExp", + "schema": { + "description": "\nApplies 'LogSumExp' to each segment of input tensor. In order to allow for more\nefficient implementation of 'LogSumExp', the input segments have to be contiguous\nand non-empty.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nLogSumExp computes the element-wise log of the sum of exponentials of input slices. Operation doesn't change the shape of individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor to be aggregated", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor with the first dimension of K and the other dimentsions inherited from DATA", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "SumElementsGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "RetrieveCount", + "schema": { + "description": "\nRetrieve the current value from the counter as an integer.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.", + "name": "counter" + } + ], + "outputs": [ + { + "description": "*(type: int)* Current count value.", + "name": "count" + } + ], + "support_level": "default" + } + }, + { + "name": "CopyRowsToTensorGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "EnsureClipped", + "schema": { + "description": "\nGiven a tensor, apply clip after gradient is applied; when the param is sparse as\nindicated by valid indices and grad, in-place is required\n", + "inputs": [ + { + "description": "Parameters to be normalized", + "name": "param" + }, + { + "description": "Sparse indices, only needed for sparse param", + "name": "indices" + }, + { + "description": "Gradient computed, only needed for sparse param", + "name": "grad" + } + ], + "outputs": [ + { + "description": "param ensured to be clipped within range", + "name": "output_param" + } + ], + "support_level": "default" + } + }, + { + "name": "WeightedSigmoidCrossEntropyWithLogits", + "schema": { + "description": "\nGiven three matrices: logits, targets, weights, all of the same shape,\n(batch_size, num_classes), computes the weighted sigmoid cross entropy between\nlogits and targets. Specifically, at each position r,c, this computes\nweights[r, c] * crossentropy(sigmoid(logits[r, c]), targets[r, c]), and then\naverages over each row.\nReturns a tensor of shape (batch_size,) of losses for each example.\n", + "inputs": [ + { + "description": "matrix of logits for each example and class.", + "name": "logits" + }, + { + "description": "matrix of targets, same shape as logits.", + "name": "targets" + }, + { + "description": "matrix of weights, same shape as logits.", + "name": "weights" + } + ], + "outputs": [ + { + "description": "Vector with the total xentropy for each example.", + "name": "xentropy" + } + ], + "support_level": "default" + } + }, + { + "name": "IndexLoad", + "schema": { + "attributes": [ + { + "description": "If set, skips the first entry of the tensor. This allows to load tensors that are aligned with an embedding, where the first entry corresponds to the default 0 index entry.", + "name": "skip_first_entry", + "option": "optional" + } + ], + "description": "\nLoads the index from the given 1-D tensor. Elements in the tensor will be given\nconsecutive indexes starting at 1. Fails if tensor contains repeated elements.\n", + "inputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + }, + { + "description": "1-D tensor with elements starting with index 1.", + "name": "items" + } + ], + "outputs": [ + { + "description": "The input handle.", + "name": "handle" + } + ], + "support_level": "default" + } + }, + { + "name": "BatchDenseToSparse", + "schema": { + "description": "\nThis Op is a inverse of BatchSparseToDenseOp.\nBasically, given a `lengths` vector, a `indices` vector,\nand a dense matrix `dense`, output `value` vector so that, along with\n`lengths` vector and `indices` vector, forms a sparse representation\nof the dense matrix.\n\nA sparse matrix is represented by `lengths` vector, `indices` vector,\nand `values` vector. Each element in `lengths` vector (lengths[`i`]) represents\nthe number of indices in this batch (batch `i`).\nWith in each batch, `indices` should not have duplicate number.\n\nFor example, with input:\n\n lengths = [2, 3, 1]\n indices = [0, 1, 2, 3, 4, 5]\n output = [[6, 7, 0, 0, 0, 0],\n [0, 0, 8, 9, 10, 0],\n [0, 0, 0, 0, 0, 11]]\n\nThe output is:\n\n values = [6, 7, 8, 9, 10, 11]\n\nafter running this operator.\n", + "inputs": [ + { + "description": "Flatten lengths, Used to break down indices into per batch indices", + "name": "lengths" + }, + { + "description": "Flatten indices, tensor of total size = \\sum lengths, containing the indices ", + "name": "indices" + }, + { + "description": "dense 2-D tensor, first dim = len(lengths), last dim > Any(indices)", + "name": "dense" + } + ], + "outputs": [ + { + "description": "Values, tensor of the same size as `indices` and same data type as dense tensor.", + "name": "values" + } + ], + "support_level": "default" + } + }, + { + "name": "UniformFill", + "schema": { + "attributes": [ + { + "description": "(*float*): minimum value, inclusive", + "name": "min", + "option": "optional" + }, + { + "description": "(*float*): maximum value, inclusive", + "name": "max", + "option": "optional" + }, + { + "description": "(*Tuple(int)*): shape of the output, do not set when `input_as_shape`=1", + "name": "shape", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to use the first input as shape; `shape` input must be in CPU context", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": "\nFill the output tensor with float samples from uniform distribution [`min`, `max`].\n\n- The range can be defined either by arguments or input blobs. `min` and `max` are inclusive.\n - If the range is given by input blobs, you also need to give the shape as input.\n - When the range is given as arguments, this operator enforces min <= max. When the range is given as inputs, the constraint is not enforced.\n - When the range is given as inputs and max < min, the first dimension of the output is set to 0. This behavior is allowed so that dynamically sampling indices into a dynamically sized tensor is possible.\n- The shape of the output can be given as argument or input.\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop_1 = core.CreateOperator(\n \"UniformFill\",\n [],\n [\"output\"],\n min=5.5,\n max=10.5,\n shape=(3,3)\n)\n\nop_2 = core.CreateOperator(\n \"UniformFill\",\n [\"shape\", \"min\", \"max\"],\n [\"output\"],\n input_as_shape=1\n)\n\n// Test arg-based op\nworkspace.RunOperatorOnce(op_1)\nprint(\"output (op_1):\\n\", workspace.FetchBlob(\"output\"))\n\n// Test input-based op\nworkspace.ResetWorkspace()\nworkspace.FeedBlob(\"shape\", np.array([5,5]))\nworkspace.FeedBlob(\"min\", np.array(13.8, dtype=np.float32))\nworkspace.FeedBlob(\"max\", np.array(19.3, dtype=np.float32))\nworkspace.RunOperatorOnce(op_2)\nprint(\"output (op_2):\\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\noutput (op_1):\n [[8.894862 8.225005 6.7890406]\n [9.588293 7.1072135 7.7234955]\n [8.210596 6.0202913 9.665462 ]]\noutput (op_2):\n [[18.965155 15.603871 15.038921 17.14872 18.134571]\n [18.84237 17.845276 19.214737 16.970337 15.494069]\n [18.754795 16.724329 15.311974 16.962536 18.60965 ]\n [15.186268 15.264773 18.73341 19.077969 14.237255]\n [15.917589 15.844325 16.248466 17.006554 17.502048]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): 1-D tensor of the shape of the output, must be used with `input_as_shape` argument", + "name": "shape" + }, + { + "description": "(*Tensor``*): scalar tensor containing minimum value, inclusive", + "name": "min" + }, + { + "description": "(*Tensor``*): scalar tensor containing maximum value, inclusive", + "name": "max" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): filled output tensor", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "ChannelStats", + "schema": { + "description": "\nGiven an input tensor in NCHW format, computes the sum of all elements per\nchannel and the sum of all elements squared per channel. These values can be\nreduced across multiple batches and used to obtain the mean and variance across\nthe full set of batches. Using the new mean and variance as input to SpatialBN\nhas the effect of changing the batch size over which SpatialBN is applied.\n", + "inputs": [ + { + "description": "The input 4-dimensional tensor of shape NCHW", + "name": "X" + } + ], + "outputs": [ + { + "description": "The output 1-dimensional tensor of size C containing the sum of elements of X per channel.", + "name": "sum" + }, + { + "description": "The output 1-dimensional tensor of size C containing the sum of elements squared per channel.", + "name": "sumsq" + } + ], + "support_level": "default" + } + }, + { + "name": "FCTransposed", + "schema": { + "description": "\nSame as FC, but weight matrix is supposed to be already pretransposed.\nFCTransposed stands for calling blass with no noTrans, noTrans\n", + "support_level": "default" + } + }, + { + "name": "SortedSegmentSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LC2D", + "schema": { + "description": "\nThe locally connected operator consumes an input vector, a 2D filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n", + "inputs": [ + { + "description": null, + "name": null + }, + { + "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "RoIPool", + "schema": { + "attributes": [ + { + "description": "If set, run in test mode and skip computation of argmaxes (used for gradient computation). Only one output tensor is produced. (Default: false).", + "name": "is_test", + "option": "optional" + }, + { + "description": "A StorageOrder string (Default: \"NCHW\").", + "name": "order", + "option": "optional" + }, + { + "description": "The pooled output height (Default: 1).", + "name": "pooled_h", + "option": "optional" + }, + { + "description": "The pooled output width (Default: 1).", + "name": "pooled_w", + "option": "optional" + }, + { + "description": "Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling (Default: 1.0).", + "name": "spatial_scale", + "option": "optional" + } + ], + "description": "\nCarries out ROI Pooling for Faster-RCNN.\nDepending on the mode, there are multiple output cases:\n\n Output case #1: Y, argmaxes (train mode)\n Output case #2: Y (test mode)\n", + "inputs": [ + { + "description": "The input 4-D tensor of data. Only NCHW order is currently supported.", + "name": "X" + }, + { + "description": "RoIs (Regions of Interest) to pool over. Should be a 2-D tensor of shape (num_rois, 5) given as [[batch_id, x1, y1, x2, y2], ...].", + "name": "rois" + } + ], + "outputs": [ + { + "description": "RoI pooled output 4-D tensor of shape (num_rois, channels, pooled_h, pooled_w).", + "name": "Y" + }, + { + "description": "Argmaxes corresponding to indices in X used for gradient computation. Only output if arg \"is_test\" is false.", + "name": "argmaxes" + } + ], + "support_level": "default" + } + }, + { + "name": "ElementwiseLinear", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Describes the axis of the inputs; defaults to one because the 0th axis most likely describes the batch size.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nThis op computes the elementwise linear combination of a batch of input vectors with a weight vector and bias vector. As input, the op takes an input tensor $X$ of shape $NxD$, a weight vector $w$ of length $D$, and a bias vector $b$ of length $D$. Here, $N$ represents the batch size and $D$ represents the length of the feature vectors. The output, $Y$, is a tensor of shape $NxD$ and is calculated as\n\n$$Y_{ij} = X_{ij}w_j + b_j \\ for \\ i\\in{N}, j\\in{D}$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_linear_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_linear_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ElementwiseLinear\",\n [\"X\", \"w\", \"b\"],\n [\"Y\"]\n)\n\n// Create X\nX = np.array([[1,2,3,4,5],[6,8,9,16,10]])\nprint(\"X:\\n\",X)\n\n// Create w\nw = np.array([1,1/2.,1/3.,1/4.,1/5.])\nprint(\"w:\\n\",w)\n\n// Create b\nb = np.array([1.,1.,1.,1.,1.])\nprint(\"b:\\n\",b)\n\n\n// Feed X & w & b into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"w\", w.astype(np.float32))\nworkspace.FeedBlob(\"b\", b.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 1 2 3 4 5]\n [ 6 8 9 16 10]]\nw:\n [1. 0.5 0.33333333 0.25 0.2]\nb:\n [1. 1. 1. 1. 1.]\nY:\n [[2. 2. 2. 2. 2.]\n [7. 5. 4. 5. 3.]]\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "2D input tensor of size $NxD$. This input represents the input data to be operated on.", + "name": "X" + }, + { + "description": "1D scaling factors, or weights, of size $D$. This input contains the weights that will be multiplied by the data.", + "name": "w" + }, + { + "description": "1D biases of size $D$. This input contains the biases that will be added to the products of the weights and data.", + "name": "b" + } + ], + "outputs": [ + { + "description": "2D output tensor of size $NxD$. Calculated as described above.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsSplit", + "schema": { + "attributes": [ + { + "description": "Number of splits for each element in LENGTHS", + "name": "n_split", + "option": "optional" + } + ], + "description": "\nGiven input vector LENGTHS, and input n_split, LengthsSplit returns\na single output vector. It \"splits\" each length into n_split values which add\nup to the original length. It will attempt to do equal splits, and if not possible,\nit orders larger values first. If the n_split is larger than the length, zero\npadding will be applied.\n\ne.g. LENGTHS = [9 4 5]\n n_split = 3\n Y = [3 3 3 2 1 1 2 2 1]\n\ne.g. LENGTHS = [2, 1, 2]\n n_split = 3\n Y = [1 1 0 1 0 0 1 1 0]\n", + "inputs": [ + { + "description": "Mx1 Input tensor denoting INT32 lengths", + "name": "LENGTHS" + }, + { + "description": "(Optional) Number of splits for each element in LENGTHS (overrides argument)", + "name": "n_split" + } + ], + "outputs": [ + { + "description": "(M*n_split)x1 Output vector denoting split lengths", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsWeightedSum", + "schema": { + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'WeightedSum' to each segment. Segments are defined by their LENGTHS.\n\nThis op is basically Gather and LengthsWeightedSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nLENGTHS is a vector that defines slice sizes by first dimension of DATA. Values\nbelonging to the same segment are aggregated together. sum(LENGTHS) has\nto match INDICES size.\n\nThe first dimension of the output is equal to the number of input segment,\ni.e. `len(LENGTHS)`. Other dimensions are inherited from the input tensor.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Non negative vector with sum of elements equal to INDICES length", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "Sub", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise binary subtraction (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Sub\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[10,12],[4,14]]))\nworkspace.FeedBlob(\"B\", np.array([[5,16],[1,19]]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[10 12]\n [ 4 14]]\nB:\n[[ 5 16]\n [ 1 19]]\nC:\n[[ 5 -4]\n [ 3 -5]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size as A.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions and type as A.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "Adadelta", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + }, + { + "description": "Default 0.95, the squared gradient sum is decayed by this factor.", + "name": "decay", + "option": "optional" + } + ], + "description": "\n\nComputes the AdaDelta update (https://arxiv.org/abs/1212.5701) for an input\ngradient and accumulated history of squared gradients. Concretely, given\ninputs (param, moment, moment_delta, grad, learning_rate), computes:\n\n new_moment = moment * decay + square(grad) * (1 - decay)\n new_grad = sqrt(moment_delta + epsilon) / sqrt(new_moment + epsilon) * grad\n new_param = param + learning_rate * new_grad\n new_moment_delta = moment_delta * decay + square(new_grad) * (1 - decay)\n\nand returns (new_param, new_moment, new_moment_delta).\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Average of squared gradients", + "name": "moment" + }, + { + "description": "Average of squared parameter updates", + "name": "moment_delta" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "Learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated average squared gradient", + "name": "output_moment" + }, + { + "description": "Updated average of squared parameter updates", + "name": "output_moment_delta" + } + ], + "support_level": "default" + } + }, + { + "name": "If", + "schema": { + "attributes": [ + { + "description": "Net executed when condition is true", + "name": "then_net", + "option": "optional" + }, + { + "description": "Net executed when condition is false (optional)", + "name": "else_net", + "option": "optional" + } + ], + "description": "\n'If' control operator, first input is a scalar boolean blob that stores condition\nvalue. Accepts 'then_net' (required) and 'else_net' (optional) arguments for 'then' and\n'else' subnets respectively. Subnets are executed in the same workspace as 'If'.\n ", + "inputs": [ + { + "description": "Scalar boolean condition", + "name": "condition" + } + ], + "support_level": "default" + } + }, + { + "name": "IntegralImage", + "schema": { + "description": "\nComputes an integral image, which contains the sum of pixel values within\nan image vertically and horizontally. This integral image can then be used\nwith other detection and tracking techniques.\n", + "inputs": [ + { + "description": "Images tensor of the form (N, C, H, W)", + "name": "X" + } + ], + "outputs": [ + { + "description": "Integrated image of the form (N, C, H+1, W+1)", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsWeightedSum", + "schema": { + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "description": "\nApplies 'WeightedSum' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'WeightedSum' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n\n\nThe *LengthsWeightedSum* op takes three inputs *DATA*, *LENGTHS*, and *SCALARS*, and produces a single output *OUTPUT*. The op finds the weighted sum in each of the segments of *DATA*, where segments are defined by their lengths. Before calculating the sums, the input *DATA* is weighted by the contents of *SCALARS*.\nFor example, if $DATA = [2,4,3,1,2,10]$, $SCALARS = [8, 2, 1, 4, 1, 0.6]$, and $LENGTHS = [2,3,1]$, then $OUTPUT = [sum([8*2,2*4]), sum([1*3,4*1,1*2]), sum([0.6*10])] = [24,9,6]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsWeightedSum\",\n [\"DATA\", \"SCALARS\",\"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"SCALARS\", np.array([8, 2, 1, 4, 1, 0.6]).astype(np.float32))\nprint(\"SCALARS:\\n\", workspace.FetchBlob(\"SCALARS\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nSCALARS:\n [8. 2. 1. 4. 1. 0.6]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [24. 9. 6.]\n\n```\n\n
\n\n\n ", + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "Print", + "schema": { + "attributes": [ + { + "description": "(bool) if 1, saves contents to the root folder of the current workspace, appending the tensor contents to a file named after the blob name. Otherwise, logs to stderr.", + "name": "to_file", + "option": "optional" + }, + { + "description": "(int, default 0) If set, prints the first `limit` elements of tensor. If 0, prints the first `k_limit_default`(1000) elements of tensor", + "name": "limit", + "option": "optional" + }, + { + "description": "(int, default 1) Print tensor every `every_n` runs", + "name": "every_n", + "option": "optional" + } + ], + "description": "Logs shape and contents of input tensor to stderr or to a file.", + "inputs": [ + { + "description": "The tensor to print.", + "name": "tensor" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceFrontMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "AsinGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseToDenseMask", + "schema": { + "attributes": [ + { + "description": "list(int) argument with desired ids on the 'dense' output dimension", + "name": "mask", + "option": "optional" + }, + { + "description": "bool whether to return presence mask, false by default", + "name": "return_presence_mask", + "option": "optional" + } + ], + "description": "\nConvert sparse representations to dense with given indices.\n\nTransforms a sparse representation of map represented as `indices`\nvector and `values` tensor into a compacted tensor where the first dimension\ncorresponds to each id provided in mask argument. Missing values are filled with\nthe value of `default_value`. After running this op:\n\n output[j, :] = values[i] // where mask[j] == indices[i]\n output[j, ...] = default_value // when mask[j] doesn't appear in indices\n\nIf `lengths` is provided and not empty, and extra \"batch\" dimension is prepended\nto the output.\n\n`values` and `default_value` can have additional matching dimensions, operation\nis performed on the entire subtensor in thise case.\n\nFor example, if `lengths` is supplied and `values` is 1-D vector of floats and\n`default_value` is a float scalar, the output is going to be a float matrix\nof size `len(lengths) X len(mask)`\n", + "inputs": [ + { + "description": "1-D int32/int64 tensor of concatenated ids of data", + "name": "indices" + }, + { + "description": "Data tensor, first dimension has to match `indices`", + "name": "values" + }, + { + "description": "Default value for the output if the id is not present in `indices`. Must have the same type as `values` and the same shape, but without the first dimension", + "name": "default_value" + }, + { + "description": "Optional lengths to represent a batch of `indices` and `values`.", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Output tensor of the same type as `values` of shape `[len(lengths), len(mask)] + shape(default_value)` (if `lengths` is not provided the first dimension is omitted)", + "name": "output" + }, + { + "description": "Bool tensor of shape `[len(lengths), len(mask)]` (if `lengths` is not provided the first dimension is omitted). True when a value for given id was present, false otherwise.", + "name": "presence_mask" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseToDenseMaskGradient", + "schema": { + "description": "\nThe output is the gradient of the input value from SparseToDenseMask. The\ngradient for default_value has not been implemented.\n", + "support_level": "default" + } + }, + { + "name": "LC3D", + "schema": { + "description": "\nThe locally connected operator consumes an input vector, a 3D filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n", + "inputs": [ + { + "description": null, + "name": null + }, + { + "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.", + "name": "filter" + }, + { + "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "UpsampleBilinear", + "schema": { + "attributes": [ + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "description": "\nResizes the spatial dimensions of the input using bilinear\ninterpolation. The `width_scale` and `height_scale` arguments\ncontrol the size of the output, which is given by:\noutput_width = floor(input_width * width_scale)\noutput_height = floor(output_height * height_scale)\n", + "inputs": [ + { + "description": "Input tensor", + "name": "X" + }, + { + "description": "1D, 2-element, Scales tensor, [height_scale, width_scale]", + "name": "scales" + } + ], + "outputs": [ + { + "description": "Output tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Append", + "schema": { + "description": "\nAppend input `B` to the end of input `A`.\n\n- It is required that this operation run in-place, meaning that the input `A` blob must match the output blob.\n- All except the outer-most dimension must be the same between `A` and `B`.\n- Input `A` may have to be re-allocated in order for accommodate to the new size. Currently, an exponential growth ratio is used in order to ensure amortized constant time complexity.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/dataset_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Append\",\n [\"A\", \"B\"],\n [\"A\"],\n)\n\nworkspace.FeedBlob(\"A\", np.random.randint(10, size=(1,3,3)))\nworkspace.FeedBlob(\"B\", np.random.randint(10, size=(2,3,3)))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"A:\", workspace.FetchBlob(\"A\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[[3 8 7]\n [1 6 6]\n [5 0 6]]]\nB:\n[[[4 3 1]\n [7 9 6]\n [9 4 5]]\n\n [[7 7 4]\n [9 8 7]\n [1 6 6]]]\nA:\n[[[3 8 7]\n [1 6 6]\n [5 0 6]]\n\n [[4 3 1]\n [7 9 6]\n [9 4 5]]\n\n [[7 7 4]\n [9 8 7]\n [1 6 6]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor*): base input tensor of shape $(N, d_1, d_2, ..., d_n)$", + "name": "A" + }, + { + "description": "(*Tensor*): second input tensor of shape $(M, d_1, d_2, ..., d_n)$ to be appended to the base", + "name": "B" + } + ], + "outputs": [ + { + "description": "(*Tensor*): output tensor of shape $(N+M, d_1, d_2, ..., d_n)$", + "name": "A" + } + ], + "support_level": "default" + } + }, + { + "name": "AtomicIter", + "schema": { + "description": "\nSimilar to Iter, but takes a mutex as the first input to make sure that\nupdates are carried out atomically. This can be used in e.g. Hogwild sgd\nalgorithms.\n", + "inputs": [ + { + "description": "The mutex used to do atomic increment.", + "name": "mutex" + }, + { + "description": "The iter counter as an int64_t TensorCPU.", + "name": "iter" + } + ], + "support_level": "default" + } + }, + { + "name": "StatRegistryUpdate", + "schema": { + "description": "\nUpdate the given StatRegistry, or the global StatRegistry,\nwith the values of counters for the given keys.\n", + "inputs": [ + { + "description": "1D string tensor with the key names to update.", + "name": "keys" + }, + { + "description": "1D int64 tensor with the values to update.", + "name": "values" + }, + { + "description": "If provided, update the given StatRegistry. Otherwise, update the global singleton.", + "name": "handle" + } + ], + "support_level": "default" + } + }, + { + "name": "EnqueueRebatchingQueue", + "schema": { + "attributes": [ + { + "description": "Are we enqueuing a batch or just a single element. By default we enqueue single element.", + "name": "enqueue_batch", + "option": "optional" + } + ], + "description": "\nEnqueues Tensors into the queue.\nNumber of input tensors should be equal to the number of components passed\nduring creation of the queue.\nIf the Queue is closed this operation will fail.\nIf enqueue_batch argument is set. We will split the input tensors by the\nfirst dimension to produce single queue elements.\n", + "inputs": [ + { + "description": "object representing the queue", + "name": "queue" + }, + { + "description": "First tensor to enque. ", + "name": "tensor" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseAdam", + "schema": { + "attributes": [ + { + "description": "Default 0.9", + "name": "beta1", + "option": "optional" + }, + { + "description": "Default 0.999", + "name": "beta2", + "option": "optional" + }, + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + }, + { + "description": "Default false", + "name": "enableRAdam", + "option": "optional" + } + ], + "description": "\n\n Computes the Adam Update for the sparse case.\n Given inputs (param, moment1, moment2, indices, grad, lr, iter), runs the dense\n Adam on (param, moment1[indices], momemnt2[indices], lr, iter) and returns\n (new_param, new_moment1, new_moment2) as in dense case.\n Adam can be customized as Rectified Adam (RAdam) by setting enableRAdam = true.\n\n ", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "First moment history", + "name": "moment_1" + }, + { + "description": "Second moment history", + "name": "moment_2" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "iteration number", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated first moment", + "name": "output_moment_1" + }, + { + "description": "Updated second moment", + "name": "output_moment_2" + }, + { + "description": "Optional Effective gradient", + "name": "output_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "NormalizePlanarYUV", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "EnsureCPUOutput", + "schema": { + "description": "\nThis Op always create TensorCPU output, and may involves cross-device MemCpy.\nUnder CPU Context, this Op takes TensorCPU as input. Under the CUDA Context,\nthis Op accepts either CUDA or CPU Tensor input.\n", + "inputs": [ + { + "description": "The input CUDA or CPU tensor.", + "name": "input" + } + ], + "outputs": [ + { + "description": "TensorCPU that is a copy of the input.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "FindDuplicateElements", + "schema": { + "description": "\nThe *FindDuplicateElements* op takes a single 1-D tensor *data* as input and returns a single 1-D output tensor *indices*. The output tensor contains the indices of the duplicate elements of the input, excluding the first occurrences. If all elements of *data* are unique, *indices* will be empty.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/find_duplicate_elements_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/find_duplicate_elements_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"FindDuplicateElements\",\n [\"data\"],\n [\"indices\"],\n)\n\nworkspace.FeedBlob(\"data\", np.array([8,2,1,1,7,8,1]).astype(np.float32))\nprint(\"data:\\n\", workspace.FetchBlob(\"data\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"indices: \\n\", workspace.FetchBlob(\"indices\"))\n\n```\n\n**Result**\n\n```\n\ndata:\n [8. 2. 1. 1. 7. 8. 1.]\nindices:\n [3 5 6]\n\n```\n\n
\n\n\n ", + "inputs": [ + { + "description": "a 1-D tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Indices of duplicate elements in data, excluding first occurrences.", + "name": "indices" + } + ], + "support_level": "default" + } + }, + { + "name": "BernoulliJSDGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Conv1D", + "schema": { + "description": "\nThe convolution operator consumes an input vector, a 1D filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.", + "name": "X" + }, + { + "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.", + "name": "filter" + }, + { + "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.", + "name": "bias" + } + ], + "outputs": [ + { + "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsPad", + "schema": { + "attributes": [ + { + "description": "The value to pad the data", + "name": "padding_value", + "option": "optional" + }, + { + "description": "The target length of each segment", + "name": "target_length", + "option": "optional" + } + ], + "description": "\nGiven DATA tensor of rank r >= 1, and LENGTHS tensor of rank 1, pad each\nsegment in DATA with `value`, so that each segment's length is `target_length`.\nIf will throw, if there is segment of length larger than `target_length`.\n\nExample:\n DATA = [\n [2.3, 3.4],\n [4.5, 5.7],\n [6.8, 7.9],\n ]\n LENGTHS = [0, 1, 1, 1]\n and target_length = 2, padding value = -1.0\n OUTPUT = [\n [-1.0, -1.0],\n [-1.0, -1.0],\n [2.3, 3.4],\n [-1.0, -1.0],\n [4.5, 5.7],\n [-1.0, -1.0],\n [6.8, 7.9],\n [-1.0, -1.0],\n ]\n", + "inputs": [ + { + "description": "Tensor of rank r >= 1. First dimension must be equal to the size of lengths", + "name": "DATA" + }, + { + "description": "Tensor of int32 lengths of rank 1", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Padded DATA tensor", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "ReadNextBatch", + "schema": { + "attributes": [ + { + "description": "Number of top-level entries to read.", + "name": "batch_size", + "option": "optional" + } + ], + "description": "\nRead the next batch of examples out of the given cursor and data blobs.\n\nInput(0) is a blob pointing to a TreeCursor, and\n[Input(1),... Input(num_fields)] a list of tensors containing the data for\neach field of the dataset.\n\nReadNextBatch is thread safe.\n", + "inputs": [ + { + "description": "A blob containing a pointer to the cursor.", + "name": "cursor" + }, + { + "description": "First dataset field", + "name": "dataset_field_0" + } + ], + "outputs": [ + { + "description": "Tensor containing the next batch for field 0.", + "name": "field_0" + } + ], + "support_level": "default" + } + }, + { + "name": "IntegralImageGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Cast", + "schema": { + "attributes": [ + { + "description": "Data type to which the elements of the input tensor are cast. Strictly must be one of the types from *DataType* enum in TensorProto.", + "name": "to", + "option": "optional", + "type": "int64" + } + ], + "description": "\nCasts the elements of a given input tensor to a data type specified by the `to`\nargument and returns an output tensor of the same size in the converted type.\nThe `to` argument must be one of the data types specified in the *DataType*\nenum field in the TensorProto message (see below). If the `to` argument is not\nprovided or is not one of the enumerated types in *DataType*, Caffe2 throws an\nEnforce error.\n\nNOTE: Casting from strings is not supported, and casting to strings is only\nsupported on CPU.\n\nTensorProto *DataType* field:\n```\nmessage TensorProto {\n ...\n enum DataType {\n UNDEFINED = 0;\n FLOAT = 1; // float\n INT32 = 2; // int\n BYTE = 3; // BYTE, when deserialized, is going to be restored as uint8.\n STRING = 4; // string\n BOOL = 5; // bool\n UINT8 = 6; // uint8_t\n INT8 = 7; // int8_t\n UINT16 = 8; // uint16_t\n INT16 = 9; // int16_t\n INT64 = 10; // int64_t\n FLOAT16 = 12; // at::Half\n DOUBLE = 13; // double\n }\n```\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/cast_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Cast\",\n [\"X\"],\n [\"Y\"],\n to=2\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32)*10)\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX: [[9.436466 5.8529844 0.54932857]\n [1.1583444 2.9936118 0.22950427]\n [3.9143739 3.4040766 8.905341 ]]\nY: [[9 5 0]\n [1 2 0]\n [3 3 8]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor to be cast.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor`<'to' type>`)* Output tensor with the same shape as input with type specified by the `to` argument.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Barrier", + "schema": { + "description": "\nDoes a barrier operation among the nodes.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseToDense", + "schema": { + "description": "\nConvert sparse representations to dense with given indices.\n\nTransforms a sparse representation of map represented as `indices`\nvector and `values` tensor into a compacted tensor where the first dimension\nis determined by the first dimension of the 3rd input if it is given or the\nmax index. Missing values are filled with zeros.\n\nThe op supports duplicated indices and performs summation over corresponding\nvalues. This behavior is useful for converting GradientSlices into dense\nrepresentation.\n\nAfter running this op:\n\n output[indices[i], :] += values[i] // sum over all indices[i] equal to the index\n output[j, ...] = 0 if j not in indices\n", + "inputs": [ + { + "description": "1-D int32/int64 tensor of concatenated ids of data", + "name": "indices" + }, + { + "description": "Data tensor, first dimension has to match `indices`, basic numeric types are supported", + "name": "values" + }, + { + "description": "Optional: if provided, the first dimension of output is the first dimension of this tensor.", + "name": "data_to_infer_dim" + } + ], + "outputs": [ + { + "description": "Output tensor of the same type as `values` of shape `[len(lengths), len(mask)] + shape(default_value)` (if `lengths` is not provided the first dimension is omitted)", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Iter", + "schema": { + "description": "\nStores a singe integer, that gets incremented on each call to Run().\nUseful for tracking the iteration count during SGD, for example.\n", + "support_level": "default" + } + }, + { + "name": "SparseLengthsMean8BitsRowwise", + "schema": { + "description": "\nVariation of SparseLengthsMean operator, where DATA is\nstored using 8bits. DATA was quantized with 8Bit row-wise\nquantization (see doc to FloatToRowwiseQuantized8Bits operator). To\nrestore DATA from 8Bit, we use additional input that stores scales\nand biases.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToRowwiseQuantized8Bits", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i -- scale and bias for i-th row", + "name": "scale_bias" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "LE", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise less or equal than comparison **<=** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LE\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [ True False True True True True]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "Split", + "schema": { + "attributes": [ + { + "description": "(*int*): axis to split on", + "name": "axis", + "option": "optional" + }, + { + "description": "Pass non-zero integer to remove the axis specified in `axis` to all input tensors.", + "name": "add_axis", + "option": "optional", + "type": "int64" + }, + { + "description": "(*Tuple(int)*): length of each output", + "name": "split", + "option": "optional" + }, + { + "description": "(*string*): order of dimensions of input and output blobs; either \"NCHW\" or \"NHWC\"", + "name": "order", + "option": "optional" + } + ], + "description": "\nSplit an `input` tensor into a list of tensors, along the axis specified by the `axis` dimension. The lengths of the split can be specified using argument `split` or optional second input blob to the operator. Otherwise, the tensor is split to equal sized parts.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/concat_split_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Split\",\n [\"input\"],\n [\"output_0\",\"output_1\",\"output_2\"],\n split=(3,2,4),\n axis=0\n)\n\nworkspace.FeedBlob(\"input\", np.random.randint(10, size=(9)))\nprint(\"input:\", workspace.FetchBlob(\"input\"))\nworkspace.RunOperatorOnce(op)\nprint(\"output_0:\", workspace.FetchBlob(\"output_0\"))\nprint(\"output_1:\", workspace.FetchBlob(\"output_1\"))\nprint(\"output_2:\", workspace.FetchBlob(\"output_2\"))\n\n```\n\n**Result**\n\n```\n\ninput: [2 2 6 6 6 0 5 7 4]\noutput_0: [2 2 6]\noutput_1: [6 6]\noutput_2: [0 5 7 4]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor*): tensor to split", + "name": "input" + }, + { + "description": "(*Tensor``*): [OPTIONAL] list of output lengths (see also arg `split`)", + "name": "split" + } + ], + "outputs": [ + { + "description": "(*Tensor*): output tensor", + "name": "[output_0, output_1, ...]" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsToWeights", + "schema": { + "attributes": [ + { + "description": "n of 1/pow(length,n) for normalization", + "name": "power", + "option": "optional" + } + ], + "description": "\nSimilar as LengthsToSegmentIds but output vector of segment\nweights derived by lengths. i.e 1/pow(length, power)\n", + "inputs": [ + { + "description": "1-D int32_t or int64_t tensor of lengths", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "1-D float tensor of weights by length", + "name": "a vector of weights" + } + ], + "support_level": "default" + } + }, + { + "name": "CreateAtomicBool", + "schema": { + "description": "Create an unique_ptr blob to hold an atomic", + "outputs": [ + { + "description": "Blob containing a unique_ptr>", + "name": "atomic_bool" + } + ], + "support_level": "default" + } + }, + { + "name": "RsqrtGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "GatherRanges", + "schema": { + "description": "\nGiven DATA tensor of rank 1, and RANGES tensor of rank 3, gather\ncorresponding ranges into a 1-D tensor OUTPUT.\n\nRANGES dimentions description:\n1: represents list of examples within a batch\n2: represents list features\n3: two values which are start and length or a range (to be applied on DATA)\n\nAnother output LENGTHS represents each example length within OUTPUT\n\nExample:\n DATA = [1, 2, 3, 4, 5, 6]\n RANGES = [\n [\n [0, 1],\n [2, 2],\n ],\n [\n [4, 1],\n [5, 1],\n ]\n ]\n OUTPUT = [1, 3, 4, 5, 6]\n LENGTHS = [3, 2]\n", + "inputs": [ + { + "description": "Tensor of rank 1.", + "name": "DATA" + }, + { + "description": "Tensor of int32/int64 ranges, of dims (N, M, 2). Where N is number of examples and M is a size of each example. Last dimension represents a range in the format (start, lengths)", + "name": "RANGES" + } + ], + "outputs": [ + { + "description": "1-D tensor of size sum of range lengths", + "name": "OUTPUT" + }, + { + "description": "1-D tensor of size N with lengths over gathered data for each row in a batch. sum(LENGTHS) == OUTPUT.size()", + "name": "LENGTHS" + } + ], + "support_level": "default" + } + }, + { + "name": "CrossEntropy", + "schema": { + "description": "\nThis operator computes the cross entropy between a $NxD$ dimensional input data tensor $X$ and a $NxD$ dimensional input label tensor $label$. The op produces a single length $N$ output tensor $Y$. Here, $N$ is considered the batch size and $D$ is the size of each element in the batch. In practice, it is most commonly used at the end of models as a part of the loss computation, after the SoftMax operator and before the AveragedLoss operator. The cross entropy operation is defined as follows\n\n$$Y_i = \\sum_j (label_{ij} * log(X_{ij}))$$\n\nwhere ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability.\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"CrossEntropy\",\n [\"X\", \"label\"],\n [\"Y\"]\n)\n\n// Create X: Sample softmax output for 5-class model\nX = np.array([[.01, .05, .02, .02, .9],[.03, .1, .42, .05, .4]])\nprint(\"X:\\n\",X)\n\n// Create label: Sample 1-hot ground truth label vectors\nlabel = np.array([[0.,0.,0.,0.,1.],[0.,0.,1.,0.,0.]])\nprint(\"label:\\n\",label)\n\n// Feed X & label into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"label\", label.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[0.01 0.05 0.02 0.02 0.9 ]\n [0.03 0.1 0.42 0.05 0.4 ]]\nlabel:\n [[0. 0. 0. 0. 1.]\n [0. 0. 1. 0. 0.]]\nY:\n [0.10536055 0.8675006 ]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input tensor which is almost always the result of a softmax operation. $X$ is a 2D array of size $NxD$, where $N$ is the batch size and $D$ is the number of classes.", + "name": "X" + }, + { + "description": "Blob containing the labels used to compare the input. $label$ is the same shape as $X$.", + "name": "label" + } + ], + "outputs": [ + { + "description": "Output blob from the cross entropy computation. $Y$ is 1D length $N$ tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceBackMaxGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ReduceBackMean", + "schema": { + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "description": "\nReduces the input tensor along the last dimension of the by applying **mean**.\n\nCan reduce more than one of the \"last\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the mean operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_0 * d_1 * d_2 * ... * d_{n-1})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{n-1}$ dimension.\n\nFor example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1]$, then $Y = [mean(1,5), mean(4,1,8), mean(2)] = [3, 4.333, 2]$\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_mean_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceBackMean\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[5. 9. 0.]\n [8. 4. 0.]\n [2. 2. 4.]]\n\n [[9. 0. 9.]\n [7. 9. 7.]\n [1. 0. 2.]]]]\nY: [[3.7777777 4.888889 ]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "ResizeNearest", + "schema": { + "attributes": [ + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "description": "\nResizes the spatial dimensions of the input using nearest neighbor\ninterpolation. The `width_scale` and `height_scale` arguments\ncontrol the size of the output, which is given by:\noutput_width = floor(input_width * width_scale)\noutput_height = floor(output_height * height_scale)\n", + "inputs": [ + { + "description": "Input tensor", + "name": "X" + }, + { + "description": "1D, 2-element, Scales tensor, [height_scale, width_scale]", + "name": "scales" + } + ], + "outputs": [ + { + "description": "Output tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseDropoutWithReplacement", + "schema": { + "attributes": [ + { + "default": 0.0, + "description": "Probability of an element to be replaced.", + "name": "ratio", + "option": "optional", + "type": "float32" + }, + { + "default": 0, + "description": "Value elements are replaced with.", + "name": "replacement_value", + "option": "optional", + "type": "int64" + } + ], + "description": "\n\n`SparseDropoutWithReplacement` takes a 1-d input tensor and a lengths tensor.\nValues in the Lengths tensor represent how many input elements consitute each\nexample in a given batch. The set of input values for an example will be\nreplaced with the single dropout value with probability given by the `ratio`\nargument.\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"SparseDropoutWithReplacement\",\n [\"X\", \"Lengths\"],\n [\"Y\", \"OutputLengths\"],\n ratio=0.5,\n replacement_value=-1\n)\n\nworkspace.FeedBlob(\"X\", np.array([1, 2, 3, 4, 5]).astype(np.int64))\nworkspace.FeedBlob(\"Lengths\", np.array([2, 3]).astype(np.int32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Lengths:\", workspace.FetchBlob(\"Lengths\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"OutputLengths:\", workspace.FetchBlob(\"OutputLengths\"))\n```\n\n**Result**\n\n```\nX: [1, 2, 3, 4, 5]\nLengths: [2, 3]\nY: [1, 2, -1]\nOutputLengths: [2, 1]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor.", + "name": "X" + }, + { + "description": "*(type: Tensor``)* Lengths tensor for input.", + "name": "Lengths" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + }, + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "OutputLengths" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseWngrad", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\nThis operator implement the optimization algorithm\nin https://arxiv.org/abs/1803.02865 by Wu, Ward and Bottou.\nGiven inputs (param, seq_b, indices, grad, lr), runs the dense WnGrad\nupdate on (param, grad, seq_b, lr), and returns (new_param,\nnew_seq_b) as in the dense case.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "seq_b history", + "name": "seq_b" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated seq_b", + "name": "output_seq_b" + } + ], + "support_level": "default" + } + }, + { + "name": "Find", + "schema": { + "attributes": [ + { + "description": "Placeholder for items that are not found", + "name": "missing_value", + "option": "optional" + } + ], + "description": "\nFinds elements of second input from first input,\noutputting the last (max) index for each query.\nIf query not find, inserts missing_value.\nSee IndexGet() for a version that modifies the index when\nvalues are not found.\n", + "inputs": [ + { + "description": "Index (integers)", + "name": "index" + }, + { + "description": "Needles / query", + "name": "query" + } + ], + "outputs": [ + { + "description": "Indices of the needles in index or 'missing value'", + "name": "query_indices" + } + ], + "support_level": "default" + } + }, + { + "name": "BatchBucketOneHot", + "schema": { + "description": "\nInput is a matrix tensor. Its first dimension is the batch\nsize. For each column, bucketize it based on the boundary values and then do\none hot encoding. The `lengths` specifies the number of boundary values for each\ncolumn. The final number of buckets is this number plus 1. This would also be\nthe expanded feature size. `boundaries` specifies all the boundary values.\nNote that each bucket is right-inclusive. That is, given boundary values\n[b1, b2, b3], the buckets are defined as (-int, b1], (b1, b2], (b2, b3], (b3, inf).\nFor example\n\n data = [[2, 3], [4, 1], [2, 5]], lengths = [2, 3],\n If boundaries = [0.1, 2.5, 1, 3.1, 4.5], then\n output = [[0, 1, 0, 0, 1, 0, 0], [0, 0, 1, 1, 0, 0, 0], [0, 1, 0, 0, 0, 0, 1]]\n\n If boundaries = [0.1, 2.5, 1, 1, 3.1], then\n output = [[0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 1]]\n\n", + "inputs": [ + { + "description": "input tensor matrix", + "name": "data" + }, + { + "description": "the size is the same as the width of the `data`", + "name": "lengths" + }, + { + "description": "bucket boundaries", + "name": "boundaries" + } + ], + "outputs": [ + { + "description": "output matrix that expands each input column with one hot encodingbased on the bucketization", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "PairWiseLossGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ExpandGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "BatchGather", + "schema": { + "description": "\nBatch gather operation, first dimension in DATA is the batch size.\nGiven DATA tensor of rank r >= 2, and INDICES tensor of rank q >= 1, gather\nentries of the second outer dimension (axis == 1) of DATA indexed by INDICES,\nand concatenate them in an output tensor of rank q + (r - 1).\n\nExample:\n DATA = [\n [1.0, 1.2, 2.4, 4.5],\n [2.3, 3.4, 3.6, 2.3],\n [4.5, 5.7, 1.2, 4.5],\n ]\n INDICES = [0, 2]\n\n OUTPUT = [\n [1.0, 2.4],\n [2.3, 3.6],\n [4.5, 1.2],\n ]\n", + "inputs": [ + { + "description": "Tensor of rank r >= 2.", + "name": "DATA" + }, + { + "description": "Tensor of int32/int64 indices, of any rank q.", + "name": "INDICES" + } + ], + "outputs": [ + { + "description": "Tensor of rank q + (r - 1).", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "Float16ConstantFill", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "value", + "option": "optional" + }, + { + "description": "The shape of the output tensor.", + "name": "shape", + "option": "optional" + } + ], + "description": null, + "outputs": [ + { + "description": "Output tensor of constant values specified by 'value'", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseSortedSegmentWeightedSum", + "schema": { + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'WeightedSum' to each segment. Segments need to be sorted and contiguous. See also\nSparseUnsortedSegmentWeightedSum that doesn't have this requirement.\n\nThis op is basically Gather and SortedSegmentWeightedSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same length as INDICES and values in the range 0..K-1 and in increasing order that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "DequeueRebatchingQueue", + "schema": { + "attributes": [ + { + "description": "Number of elements to dequeue. By default we dequeue one element.", + "name": "num_elements", + "option": "optional" + } + ], + "description": "\nDequeue Tensors from the Queue.\nIf the Queue is closed this might return less elements than asked.\nIf num_elements > 1 the returned elements will be concatenated into one\ntensor per component.\n", + "inputs": [ + { + "description": "object representing the queue", + "name": "rebatching_queue" + }, + { + "description": "First tensor to enqueue", + "name": "tensor" + } + ], + "support_level": "default" + } + }, + { + "name": "StoreGet", + "schema": { + "attributes": [ + { + "description": "alternative key for the blob (optional)", + "name": "blob_name", + "option": "optional" + } + ], + "description": "\nGet a blob from a store. The key is the output blob's name. The key\ncan be overridden by specifying the 'blob_name' argument.\n", + "inputs": [ + { + "description": "unique_ptr", + "name": "handler" + } + ], + "outputs": [ + { + "description": "data blob", + "name": "data" + } + ], + "support_level": "default" + } + }, + { + "name": "MergeMultiScalarFeatureTensors", + "schema": { + "description": "Merge given multi-feature tensors with scalar features into one.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".keys", + "name": "in1_keys" + }, + { + "description": ".values", + "name": "in1_values" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values", + "name": "out_values" + } + ], + "support_level": "default" + } + }, + { + "name": "ReluNGradient", + "schema": { + "attributes": [ + { + "description": "the cap of forward op output", + "name": "n", + "option": "optional" + } + ], + "description": "\nReluGradient takes both Y and dY and uses this to update dX according to the\nchain rule and derivatives of the rectified linear function.\n", + "support_level": "default" + } + }, + { + "name": "PadImageGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Assert", + "schema": { + "attributes": [ + { + "description": "(*string*): custom error message to be thrown when the input does not pass assertion", + "name": "error_msg", + "option": "optional" + } + ], + "description": "\nTakes in a tensor of type *bool*, *int*, *long*, or *long long* and checks if all values are True when coerced into a boolean. In other words, for non-bool types this asserts that all values in the tensor are non-zero. If a value is False after coerced into a boolean, the operator throws an error. Else, if all values are True, nothing is returned. For tracability, a custom error message can be set using the `error_msg` argument.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/assert_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Assert\",\n [\"A\"],\n [],\n error_msg=\"Failed assertion from Assert operator\"\n)\n\nworkspace.FeedBlob(\"A\", np.random.randint(10, size=(3,3)).astype(np.int32))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\ntry:\n workspace.RunOperatorOnce(op)\nexcept RuntimeError:\n print(\"Assertion Failed!\")\nelse:\n print(\"Assertion Passed!\")\n\n```\n\n**Result**\n\n```\n\nA:\n[[7 5 6]\n [1 2 4]\n [5 3 7]]\nAssertion Passed!\n\n```\n\n
\n\n ", + "inputs": [ + { + "description": "(*Tensor*): input tensor", + "name": "X" + } + ], + "support_level": "default" + } + }, + { + "name": "Reduce", + "schema": { + "attributes": [ + { + "description": "(int, default 0) the root to run reduce into.", + "name": "root", + "option": "optional" + } + ], + "description": "\nDoes a reduce operation from every node to the root node. Currently only\nSum is supported.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be reduced.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The reduced result on root, not set for other nodes.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "TimerGet", + "schema": { + "description": "\nQueries the current time of a timer object in nanoseconds.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_ops.cc\n\n ", + "inputs": [ + { + "description": "(*Tensor``*): pointer to a timer object; obtained from **TimerBegin** op", + "name": "timer" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): scalar containing time in nanoseconds", + "name": "nanos" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8GivenTensorFill", + "schema": { + "attributes": [ + { + "description": "Input array of type char(byte)", + "name": "values", + "option": "optional" + }, + { + "description": "Input tensor shape", + "name": "shape", + "option": "optional" + }, + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "\n Creates quantized tensor of type char(byte) with scale and zero point info.\n", + "outputs": [ + { + "description": "An Int8TensorCPU with scale and zero point info", + "name": "Tensor" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsIndicesInGradientSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "DotProductWithPadding", + "schema": { + "attributes": [ + { + "description": "the padding value for tensors with smaller dimension", + "name": "pad_value", + "option": "optional" + }, + { + "description": "whether to replicate the smaller tensor or not", + "name": "replicate", + "option": "optional" + } + ], + "description": "\nGiven two input float tensors X, Y with different shapes and produces one\noutput float tensor of the dot product between X and Y. We currently support\ntwo kinds of strategies to achieve this. Before doing normal dot_product 1)\npad the smaller tensor (using pad_value) to the same shape as the other one.\n2) replicate the smaller tensor to the same shape as the other one. Note the\nfirst dimension of X, Y must be equal. Only the second dimension of X or Y\ncan be padded.\n", + "inputs": [ + { + "description": "1D or 2D input tensor", + "name": "X" + }, + { + "description": "1D or 2D input tensor", + "name": "Y" + } + ], + "outputs": [ + { + "description": "1D output tensor", + "name": "Z" + } + ], + "support_level": "default" + } + }, + { + "name": "MakeTwoClassGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "HardSigmoidGradient", + "schema": { + "description": "\nHardSigmoidGradient takes both Y and dY as well as an argument alpha and uses\nthis to update dX according to the chain rule and derivatives of the hard\nsigmoid function.\n", + "support_level": "default" + } + }, + { + "name": "IndexFreeze", + "schema": { + "description": "\nFreezes the given index, disallowing creation of new index entries.\nShould not be called concurrently with IndexGet.\n", + "inputs": [ + { + "description": "Pointer to an Index instance.", + "name": "handle" + } + ], + "outputs": [ + { + "description": "The input handle.", + "name": "handle" + } + ], + "support_level": "default" + } + }, + { + "name": "Scale", + "schema": { + "attributes": [ + { + "description": "(float, default 1.0) the scale to apply.", + "name": "scale", + "option": "optional" + } + ], + "description": "\nScale takes one input data (Tensor) and produces one output data\n(Tensor) whose value is the input data tensor scaled element-wise.\n", + "support_level": "default" + } + }, + { + "name": "APMeter", + "schema": { + "attributes": [ + { + "description": "(int32_t) indicates how many predictions should the op buffer. defaults to 1000", + "name": "buffer_size", + "option": "optional" + } + ], + "description": "\nAPMeter computes Average Precision for binary or multi-class classification.\nIt takes two inputs: prediction scores P of size (n_samples x n_classes), and\ntrue labels Y of size (n_samples x n_classes). It returns a single float number\nper class for the average precision of that class.\n", + "inputs": [ + { + "description": "2-D tensor (Tensor) of size (num_samples xnum_classes) containing prediction scores", + "name": "predictions" + }, + { + "description": "2-D tensor (Tensor) of size (num_samples) containing true labels for each sample", + "name": "labels" + } + ], + "outputs": [ + { + "description": "1-D tensor (Tensor) of size num_classes containing average precision for each class", + "name": "AP" + } + ], + "support_level": "default" + } + }, + { + "name": "Rowwise8BitQuantizedToFloat", + "schema": { + "description": "\nGiven uint8 tensor, quantized using 8bit row-wise\nquantization, and auxiliary scales and biases, this operator\nrestores float tensor in the following way. We take input 8bits tensor\nof size (m_1, m_2, ..., m_n), n >= 2, reshape it into matrix of size\n(m_1, m_2 x... x m_n). We compute element r_{ij} of output matrix as\nr_{ij} * s_i + b_i and after this we reshape this output matrix into\noutput tensor of size (m_1, m_2, ..., m_n).\n", + "inputs": [ + { + "description": "quantized_input", + "name": "quantized_input" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i -- scale and bias for i-th row", + "name": "scale_bias" + } + ], + "outputs": [ + { + "description": null, + "name": null + }, + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Mod", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Divisor of the modulo operation (must be >= 1).", + "name": "divisor", + "option": "optional", + "type": "int64" + }, + { + "default": false, + "description": "If true, sign of output matches divisor, else if false, sign follows dividend.", + "name": "sign_follow_divisor", + "option": "optional", + "type": "boolean" + } + ], + "description": "\nElement-wise modulo operation. Each element in the output is the modulo result\nof the corresponding element in the input data. The divisor of the modulo is\nprovided by the `divisor` argument.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/mod_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Mod\",\n [\"X\"],\n [\"Y\"],\n divisor=10\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(100, size=(5,5))))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[56 22 43 13 60]\n [ 4 55 58 10 45]\n [64 66 4 3 66]\n [10 36 47 52 78]\n [91 4 36 47 95]]\nX after running op:\n[[6 2 3 3 0]\n [4 5 8 0 5]\n [4 6 4 3 6]\n [0 6 7 2 8]\n [1 4 6 7 5]]\n\n ```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor with int32 or int64 data.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor of data with modulo operation applied.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "StringPrefix", + "schema": { + "attributes": [ + { + "description": "Maximum size of the prefix, in bytes.", + "name": "length", + "option": "optional" + } + ], + "description": "\nComputes the element-wise string prefix of the string tensor.\nInput strings that are shorter than prefix length will be returned unchanged.\nNOTE: Prefix is computed on number of bytes, which may lead to wrong behavior\nand potentially invalid strings for variable-length encodings such as utf-8.\n", + "inputs": [ + { + "description": "Tensor of std::string.", + "name": "strings" + } + ], + "outputs": [ + { + "description": "Tensor of std::string containing prefixes for each input.", + "name": "prefixes" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsSumFused8BitRowwise", + "schema": { + "description": "\nPerforms the same operation as SparseLengthsSum, but operating on\n8-bit rowwise quantized matrices with fused storage (where each row\nstores quantized values, and then 4-byte scale and 4-byte bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused8BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "FileStoreHandlerCreate", + "schema": { + "attributes": [ + { + "description": "base path used by the FileStoreHandler", + "name": "path", + "option": "optional" + }, + { + "description": "prefix for all keys used by this store", + "name": "prefix", + "option": "optional" + } + ], + "description": "\nCreates a unique_ptr that uses the filesystem as backing\nstore (typically a filesystem shared between many nodes, such as NFS).\nThis store handler is not built to be fast. Its recommended use is for\nintegration tests and prototypes where extra dependencies are\ncumbersome. Use an ephemeral path to ensure multiple processes or runs\ndon't interfere.\n", + "outputs": [ + { + "description": "unique_ptr", + "name": "handler" + } + ], + "support_level": "default" + } + }, + { + "name": "AtomicFetchAdd", + "schema": { + "description": "\nGiven a mutex and two int32 scalar tensors, performs an atomic fetch add\nby mutating the first argument and adding it to the second input\nargument. Returns the updated integer and the value prior to the update.\n", + "inputs": [ + { + "description": "Blob containing to a unique_ptr", + "name": "mutex_ptr" + }, + { + "description": "Value to be mutated after the sum.", + "name": "mut_value" + }, + { + "description": "Value to add to the first operand.", + "name": "increment" + } + ], + "outputs": [ + { + "description": "Mutated value after sum. Usually same as input 1.", + "name": "mut_value" + }, + { + "description": "Value of the first operand before sum.", + "name": "fetched_value" + } + ], + "support_level": "default" + } + }, + { + "name": "Tanh", + "schema": { + "description": "\nCalculates the hyperbolic tangent of the given input tensor element-wise. This\noperation can be done in an in-place fashion too, by providing the same input\nand output blobs.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/tanh_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Tanh\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 2.032603 -2.3556721 -0.14955314]\n [ 0.39309832 -1.1020128 -0.92951244]\n [-0.62815386 0.21342885 1.4002231 ]]\n\nX:\n [[ 0.9662601 -0.982175 -0.14844811]\n [ 0.3740282 -0.8012209 -0.73036647]\n [-0.55677974 0.21024609 0.8853999 ]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "1-D input tensor", + "name": "input" + } + ], + "outputs": [ + { + "description": "The hyperbolic tangent values of the input tensor, computed element-wise", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Python", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "QuantDecodeGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "PythonGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "LengthsWeightedSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Squeeze", + "schema": { + "attributes": [ + { + "description": "List of dimensions of *data* to squeeze out.", + "name": "dims", + "option": "optional", + "type": "int64[]" + } + ], + "category": "Transform", + "description": "\nThe *Squeeze* op removes single-dimensional entries from the shape of the input tensor *data,* and produces a single output tensor *squeezed*. The op also takes an argument *dims* with a list of dimensions to squeeze. If the same blob is provided as input and output, the operation is copy-free. This is the exact inverse operation of *ExpandDims* given the same *dims* argument.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Squeeze\",\n [\"data\"],\n [\"squeezed\"],\n dims=[0,1],\n)\n\nworkspace.FeedBlob(\"data\", np.zeros((1,1,100,100)).astype(np.float32))\nprint(\"data.shape:\", workspace.FetchBlob(\"data\").shape)\n\nworkspace.RunOperatorOnce(op)\nprint(\"squeezed.shape:\", workspace.FetchBlob(\"squeezed\").shape)\n\n```\n\n**Result**\n\n```\n\ndata.shape: (1, 1, 100, 100)\nsqueezed.shape: (100, 100)\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input tensor of data to be operated on.", + "name": "data" + } + ], + "outputs": [ + { + "description": "Reshaped tensor with same data as input.", + "name": "squeezed" + } + ], + "support_level": "default" + } + }, + { + "name": "NE", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise not equal to comparison **!=** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"NE\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [False True True False False True]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceSum", + "schema": { + "attributes": [ + { + "description": "(*Tuple(int)*): list of axes to reduce", + "name": "axes", + "option": "optional" + }, + { + "description": "(*int*): set to 1 to keep the reduced dimension(s) (default=1), else set to 0 to not keep the reduced dimension(s)", + "name": "keepdims", + "option": "optional" + } + ], + "description": "\nComputes the **sum** of the input tensor's elements along the provided `axes`. The resulting tensor has the same rank as the input if the `keepdims` argument equals 1 (default). If `keepdims` is set to 0, then the `axes` dimensions are pruned.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceSum\",\n [\"X\"],\n [\"Y\"],\n axes=(0,1),\n keepdims=0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,5,5)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[5. 3. 7. 9. 5.]\n [4. 5. 1. 8. 3.]\n [1. 0. 9. 7. 6.]\n [7. 5. 0. 3. 1.]\n [6. 4. 4. 8. 3.]]\n\n [[8. 9. 6. 7. 7.]\n [5. 5. 4. 7. 0.]\n [9. 7. 6. 6. 7.]\n [7. 5. 2. 4. 2.]\n [4. 5. 1. 9. 4.]]]]\nY:\n[[13. 12. 13. 16. 12.]\n [ 9. 10. 5. 15. 3.]\n [10. 7. 15. 13. 13.]\n [14. 10. 2. 7. 3.]\n [10. 9. 5. 17. 7.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "PackSegments", + "schema": { + "attributes": [ + { + "description": "The pre-defined max_length for the packed segments", + "name": "max_length", + "option": "optional" + }, + { + "description": "Padding number in the packed segments. Use true to pad -infinity, otherwise pad zeros", + "name": "pad_minf", + "option": "optional" + }, + { + "description": "bool whether to return presence mask, false by default", + "name": "return_presence_mask", + "option": "optional" + } + ], + "description": "Map N dim tensor to N+1 dim based on length blob. Sequences that are shorter than the longest sequence are padded with zeros.", + "inputs": [ + { + "description": "1-d int/long tensor contains the length in each of the output.", + "name": "lengths" + }, + { + "description": "N dim Tensor.", + "name": "tensor" + } + ], + "outputs": [ + { + "description": "N + 1 dim Tensorwhere dim(1) is the max length, dim(0) is the batch size.", + "name": "packed_tensor" + }, + { + "description": "2 dim boolean tensor, false where packed_tensor is padded, true otherwise.", + "name": "presence_mask" + } + ], + "support_level": "default" + } + }, + { + "name": "UnPackRecords", + "schema": { + "attributes": [ + { + "description": "List of strings representing the string names in the formatspecified in the doc for CreateTreeCursor.", + "name": "fields", + "option": "optional" + } + ], + "description": "\nGiven a packed dataset (packed by the PackRecordsOp) and the `fields` argument\ndescribing the datasets schema, return the original dataset format. Number of\nreturned tensors is equal to the number of fields in the `fields` argument.\n\nThe first input is the packed tensor to be unpacked. Optionally, you can provide\nprototype tensors to give the expected shapes of the output tensors. This is\nhelpful when you expected to unpack empty tensor, e.g., output of a sampling\nprocess.\n", + "inputs": [ + { + "description": "The tensor to be unpacked", + "name": "packed_tensor" + } + ], + "support_level": "default" + } + }, + { + "name": "Normalize", + "schema": { + "attributes": [ + { + "description": "axis to normalize", + "name": "axis", + "option": "optional" + } + ], + "description": "\nGiven a matrix, apply L2-normalization along the specified dimension.\n", + "support_level": "default" + } + }, + { + "name": "LengthsMax", + "schema": { + "description": "\nApplies 'Max' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'Max' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nMax computes the element-wise max of the input slices. Operation doesn't change the shape of the individual blocks.\n\n\nThe *LengthsMax* op takes two inputs *DATA* and *LENGTHS*, and produces a single output *OUTPUT*. The op finds the maximum value in each of the segments of *DATA*, where segments are defined by their lengths.\nFor example, if $DATA = [2,4,3,1,2,10]$ and $LENGTHS = [2,3,1]$ then $OUTPUT = [max([2,4]), max([3,1,2]), max([10])] = [4,3,10]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsMax\",\n [\"DATA\", \"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [ 4. 3. 10.]\n\n```\n\n
\n\n\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "L1DistanceGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "MomentumSGD", + "schema": { + "description": "\n\nComputes a momentum SGD update for an input gradient and momentum\nparameters. Concretely, given inputs (grad, m, lr) and parameters\n(momentum, nesterov), computes:\n\n if not nesterov:\n adjusted_gradient = lr * grad + momentum * m\n return (adjusted_gradient, adjusted_gradient)\n else:\n m_new = momentum * m + lr * grad\n return ((1 + momentum) * m_new - momentum * m, m_new)\n\nOutput is (grad, momentum)\n\nNote the difference to MomemtumSGDUpdate, which actually performs the\nparameter update (and is thus faster).\n", + "support_level": "default" + } + }, + { + "name": "ReduceSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SortedSegmentRangeSum", + "schema": { + "description": "\nApplies 'Sum' to each segment of input tensor. In order to allow for more\nefficient implementation of 'Sum', the input segments have to be contiguous\nand non-empty.\n\nSEGMENT_IDS is a vector that maps each of the first dimension slices of the\nDATA to a particular group (segment). Values belonging to the same segment are\naggregated together.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor to be aggregated", + "name": "DATA" + }, + { + "description": "Vector with the same length as the first dimension of DATA and values in the range 0..K-1 and in increasing order that maps each slice of DATA to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated tensor with the first dimension of K and the other dimentsions inherited from DATA", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "TT", + "schema": { + "attributes": [ + { + "description": "(int[]) Input sizes of cores. Indicates the input size of the individual cores; the size of the input vector X must match the product of the inp_sizes array.", + "name": "inp_sizes", + "option": "optional" + }, + { + "description": "(int[]) Output sizes of cores. Indicates the output size of the individual cores; the size of the output vector Y must match the product of the out_sizes array.", + "name": "out_sizes", + "option": "optional" + }, + { + "description": "(int[]) Ranks of cores. Indicates the ranks of the individual cores; lower rank means larger compression, faster computation but reduce accuracy.", + "name": "tt_ranks", + "option": "optional" + } + ], + "description": "\nThe TT-layer serves as a low-rank decomposition of a fully connected layer. The\ninputs are the same as to a fully connected layer, but the number of parameters\nare greatly reduced and forward computation time can be drastically reduced\nespecially for layers with large weight matrices. The multiplication is computed\nas a product of the input vector with each of the cores that make up the TT\nlayer. Given the input sizes (inp_sizes), output sizes(out_sizes), and the ranks\nof each of the cores (tt_ranks), the ith core will have size:\n\n inp_sizes[i] * tt_ranks[i] * tt_ranks[i + 1] * out_sizes[i].\n\nThe complexity of the computation is dictated by the sizes of inp_sizes,\nout_sizes, and tt_ranks, where there is the trade off between accuracy of the\nlow-rank decomposition and the speed of the computation.\n", + "inputs": [ + { + "description": "Input tensor from previous layer with size (M x K), where M is the batch size and K is the input size.", + "name": "X" + }, + { + "description": "1D blob containing the bias vector", + "name": "b" + }, + { + "description": "1D blob containing each individual cores with sizes specified above.", + "name": "cores" + } + ], + "outputs": [ + { + "description": "Output tensor from previous layer with size (M x N), where M is the batch size and N is the output size.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "BitwiseXor", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Pass 1 to enable broadcasting.", + "name": "broadcast", + "option": "optional", + "type": "int64" + }, + { + "default": -1, + "description": "Axis to concatenate on. If set, defines the broadcast dimensions.", + "name": "axis", + "option": "optional", + "type": "int64" + } + ], + "description": "\nPerforms element-wise bitwise operation `bitwise_xor` (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* First operand.", + "name": "A" + }, + { + "description": "*(type: Tensor)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "*(type: Tensor)* Output tensor. Has same dimensions as input `A`.", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "StoreSet", + "schema": { + "attributes": [ + { + "description": "alternative key for the blob (optional)", + "name": "blob_name", + "option": "optional" + } + ], + "description": "\nSet a blob in a store. The key is the input blob's name and the value\nis the data in that blob. The key can be overridden by specifying the\n'blob_name' argument.\n", + "inputs": [ + { + "description": "unique_ptr", + "name": "handler" + }, + { + "description": "data blob", + "name": "data" + } + ], + "support_level": "default" + } + }, + { + "name": "CTCGreedyDecoder", + "schema": { + "attributes": [ + { + "description": "When merge_repeated is true, merge repeated classes in output.", + "name": "merge_repeated", + "option": "optional" + } + ], + "description": "Greedy decoder for connectionist temporal classification.", + "inputs": [ + { + "description": "3D float Tensor sized [max_time, batch_size, num_classes]", + "name": "INPUTS" + }, + { + "description": "(optional) 1D int vector containing sequence lengths, having size [batch_size]seq_len will be set to max_time if not provided", + "name": "SEQ_LEN" + } + ], + "outputs": [ + { + "description": "Output_len matrix size (batch). The row store: [decoded_length]", + "name": "OUTPUT_LEN" + }, + { + "description": "Values vector, size (total_decoded_outputs). The vector stores the decoded classes", + "name": "VALUES" + } + ], + "support_level": "default" + } + }, + { + "name": "SegmentOneHot", + "schema": { + "description": "\nGiven a sequence of indices, segmented by the lengths tensor, returns a matrix\nthat has the elements in each sequence set to 1.0, and 0.0 everywhere else.\n", + "inputs": [ + { + "description": "Size of each segment.", + "name": "lengths" + }, + { + "description": "Active indices, of size sum(lengths)", + "name": "indices" + }, + { + "description": "Size of the index", + "name": "index_size_tensor" + } + ], + "outputs": [ + { + "description": "Matrix of size len(lengths) x index_size", + "name": "one_hots" + } + ], + "support_level": "default" + } + }, + { + "name": "Max", + "schema": { + "description": "\nElement-wise max of an arbitrary number of input tensors. This operation can be\nperformed in-place, by using the first input blob as the output blob. All inputs\nmust have the same shape and data type, and the output will have the same shape\nas the inputs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/minmax_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Max\",\n [\"X\", \"Y\", \"Z\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Z\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"Z:\", workspace.FetchBlob(\"Z\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Max:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[0.4496477 0.07061381 0.7139333 ]\n [0.83203 0.05970785 0.72786295]\n [0.75988126 0.04601283 0.32820013]]\nY:\n[[0.05683139 0.16872478 0.671098 ]\n [0.70739156 0.09878621 0.03416285]\n [0.34087983 0.94986707 0.67263436]]\nZ:\n[[0.48051122 0.07141234 0.85264146]\n [0.77086854 0.22082241 0.13154659]\n [0.42401117 0.995431 0.4263775 ]]\nMax:\n[[0.48051122 0.16872478 0.85264146]\n [0.83203 0.22082241 0.72786295]\n [0.75988126 0.995431 0.67263436]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* List of input tensors with the same shape.", + "name": "X, Y, ..." + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as input(s).Contains the maximum valued element at each location.", + "name": "M" + } + ], + "support_level": "default" + } + }, + { + "name": "MaxPool3DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CreateDB", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Rsqrt", + "schema": { + "description": "Computes the element-wise rsqrt of the input.", + "inputs": [ + { + "description": "ND input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "ND output tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "WeightedSum", + "schema": { + "description": "\nElement-wise weighted sum of several data, weight tensor pairs.\nInput should be in the form X_0, weight_0, X_1, weight_1, ... where X_i all\nhave the same shape, and weight_i are size 1 tensors that specifies the weight\nof each vector. Note that if one wants to do in-place computation, it could\nonly be done with X_0 also as the output, but not other X_i.\n", + "inputs": [ + { + "description": "Weight of the first input in the sum.", + "name": "weight_0" + } + ], + "outputs": [ + { + "description": "Result containing weighted elem-wise sum of inputs.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "NCHW2NHWC", + "schema": { + "description": "\nThe operator switches the order of data in a tensor from NCHW- sample index N,\nchannels C, height H and width W, to the NHWC order (this is for 2D images).\nIn general, this operator switches the order of data in a tensor from N C H_1\n... H_k to N H_1 ... H_k C for k-dimensional features, and currently supports\nk=1, 2, and 3.\n", + "inputs": [ + { + "description": "The input data (Tensor) in the NCHW order.", + "name": "data" + } + ], + "outputs": [ + { + "description": "The output tensor (Tensor) in the NHWC order.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "GivenTensorByteStringToUInt8Fill", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": "\nThis op fills a uint8 output tensor with the data specified by the *value* argument. The data must previously be serialized as a byte string. The output tensor shape is specified by the *shape* argument. Beware, when using this argument *value* should have a value for every element of the *output*, as missing values will not be initialized automatically. If *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set.\n\nThis op allows us to write uint8 tensors to Protobuf as byte strings and read them back as uint8 tensors in order to avoid the Protobuf uint32_t varint encoding size penalty.\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nval = np.array([1, 2, 3], dtype=np.uint8)\nop = core.CreateOperator(\n \"GivenTensorByteStringToUInt8Fill\",\n [],\n [\"out\"],\n values=[val.tobytes()],\n shape=val.shape,\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Out:\\n\", workspace.FetchBlob(\"out\"))\n\n```\n\n**Result**\n\n```\n\nOut:\n [1 2 3]\n\n```\n\n
\n\n", + "support_level": "default" + } + }, + { + "name": "LC3DGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "rnn_internal_accumulate_gradient_input", + "schema": { + "description": "\nInternal RNN operator.\n", + "support_level": "default" + } + }, + { + "name": "SumInt", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "DotProductGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "MergeMultiMapFeatureTensors", + "schema": { + "description": "Merge given multi-feature tensors with map features into one.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n", + "inputs": [ + { + "description": ".lengths", + "name": "in1_lengths" + }, + { + "description": ".keys", + "name": "in1_keys" + }, + { + "description": ".values.lengths", + "name": "in1_values_lengths" + }, + { + "description": ".values.keys", + "name": "in1_values_keys" + }, + { + "description": ".values.values", + "name": "in1_values_values" + } + ], + "outputs": [ + { + "description": ".lengths", + "name": "out_lengths" + }, + { + "description": ".keys", + "name": "out_keys" + }, + { + "description": ".values_lengths", + "name": "out_values_lengths" + }, + { + "description": ".values.keys", + "name": "out_values_keys" + }, + { + "description": ".values.values", + "name": "out_values_values" + } + ], + "support_level": "default" + } + }, + { + "name": "SortedSegmentRangeLogSumExpGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "CosineSimilarityGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SoftmaxWithLossGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "EnforceFinite", + "schema": { + "description": "\nRaise if there is NaN or Inf values in the input tensor.\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + } + ], + "support_level": "default" + } + }, + { + "name": "AveragePool3D", + "schema": { + "description": "AveragePool3D \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceFrontSum", + "schema": { + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "description": "\nReduces the input tensor along the last dimension of the by applying **sum**.\n\nCan reduce more than one of the \"first\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the sum operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_1 * d_2 * ... * d_{n})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{0}$ dimension.\n\nFor example, if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1,2]$, then $Y = [sum(1,4), sum(5,1,7), sum(2), sum(9,2)] = [2.5, 4.333, 2, 5.5]$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_sum_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceFrontSum\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[4. 1. 1.]\n [0. 6. 7.]\n [7. 8. 6.]]\n\n [[5. 7. 7.]\n [0. 1. 6.]\n [2. 9. 0.]]]\nY: [18. 32. 27.]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "ConditionalSetAtomicBool", + "schema": { + "description": "\nSet an atomic to true if the given condition bool variable is true\n ", + "inputs": [ + { + "description": "Blob containing a unique_ptr>", + "name": "atomic_bool" + }, + { + "description": "Blob containing a bool", + "name": "condition" + } + ], + "support_level": "default" + } + }, + { + "name": "CollectTensor", + "schema": { + "attributes": [ + { + "description": "The max number of tensors to collect", + "name": "num_to_collect", + "option": "optional" + } + ], + "description": "\nCollect tensor into tensor vector by reservoir sampling,\nargument num_to_collect indicates the max number of tensors that will be\ncollected. The first half of the inputs are tensor vectors, which are also the\noutputs. The second half of the inputs are the tensors to be collected into each\nvector (in the same order). The input tensors are collected in all-or-none\nmanner. If they are collected, they will be placed at the same index in the\noutput vectors.\n", + "support_level": "default" + } + }, + { + "name": "Min", + "schema": { + "description": "\nElement-wise min of an arbitrary number of input tensors. This operation can be performed in-place, by using the first input blob as the output blob. All inputs must have the same shape and data type, and the output will have the same shape as the inputs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/minmax_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Min\",\n [\"X\", \"Y\", \"Z\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(2,2)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", (np.random.rand(2,2)).astype(np.float32))\nworkspace.FeedBlob(\"Z\", (np.random.rand(2,2)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"Z:\", workspace.FetchBlob(\"Z\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Min:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[0.32731926 0.4939747 ]\n [0.29242373 0.43460014]]\nY:\n[[0.40928316 0.916115 ]\n [0.77526504 0.29339448]]\nZ:\n[[0.7899794 0.90335774]\n [0.82599413 0.2843068 ]]\nMin:\n[[0.32731926 0.4939747 ]\n [0.29242373 0.2843068 ]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* List of input tensors with the same shape.", + "name": "X, Y, ..." + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor with same dimensions as input(s).Contains the minimum valued element at each location.", + "name": "M" + } + ], + "support_level": "default" + } + }, + { + "name": "Ceil", + "schema": { + "description": "\nElement-wise application of the ceil function ($y=ceil(x)$) to the input tensor\n`X`. Output tensor shape is the same as the input tensor.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/ceil_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Ceil\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.uniform(-10, 10, (5,5))).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[ 8.44598 -6.5098248 -2.2993476 -7.6859694 0.58566964]\n [-7.846551 -0.03689406 6.9362907 -4.0521703 4.4969673 ]\n [ 0.33355865 -7.895527 -8.393201 9.374202 -2.3930092 ]\n [-6.3061996 3.1403487 3.782099 -8.516556 -2.8387244 ]\n [-2.0164998 4.7663913 -3.422966 0.3636999 8.75713 ]]\nX after running op:\n[[ 9. -6. -2. -7. 1.]\n [-7. -0. 7. -4. 5.]\n [ 1. -7. -8. 10. -2.]\n [-6. 4. 4. -8. -2.]\n [-2. 5. -3. 1. 9.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "RecurrentNetworkBlobFetcher", + "schema": { + "attributes": [ + { + "description": "Prefix string to prepend extracted blobs.", + "name": "prefix", + "option": "optional" + } + ], + "description": "\nRetrieves blobs from scratch workspaces (which contain intermediate recurrent\nnetwork computation for each timestep) and puts them in the global\nworkspace under CPUContext.\n", + "inputs": [ + { + "description": "Name of scratch workspace blob returned by recurrent network.", + "name": "ScratchWorkspaceBlob" + } + ], + "outputs": [ + { + "description": "1D tensor of strings containing extracted blob names.", + "name": "blob_names" + } + ], + "support_level": "default" + } + }, + { + "name": "Selu", + "schema": { + "attributes": [ + { + "default": 1.673263, + "description": "Alpha constant in equation.", + "name": "alpha", + "option": "optional", + "type": "float32" + }, + { + "default": 1.0507, + "description": "Scale constant in equation.", + "name": "scale", + "option": "optional", + "type": "float32" + } + ], + "description": "\n\nThe *Selu* op takes one input tensor $X$, an argument $alpha$, an argument $scale$, and produces one output tensor $Y$ of the same shape as $X.$ The op performs the element wise *Selu* operation, defined as\n\n$$y=selu(x) =\\begin{cases}scale (\\alpha e^{x} - \\alpha) & x < 0\\\\scale * x & otherwise\\end{cases}$$\n\nThe default value of *alpha* is 1.6732632423543772848170429916717 and the default value of *scale* is 1.0507009873554804934193349852946. See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) for more information.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/selu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/selu_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Selu\",\n [\"X\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 1.1613879 -0.27111396 -1.2076733 ]\n [ 1.3442237 -1.0701777 1.2070968 ]\n [ 0.23810555 0.9740916 -1.7872391 ]]\n\nY:\n [[ 1.2202715 -0.4174965 -1.2326177 ]\n [ 1.4123772 -1.1551634 1.2682979 ]\n [ 0.25017774 1.023479 -1.4637551 ]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "Input tensor of data to be operated on.", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output tensor with same shape as input.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "DataCouple", + "schema": { + "description": "\n\nA one to one operator that takes an arbitrary number of input and output blobs\nsuch that each input blob is inplace with it's matching output blob. It then proceedes\nto do nothing with each of these operators. This serves two purposes. It can make it\nappear as if a blob has been written to, as well as can tie together different blobs\nin a data dependency\n\n", + "support_level": "default" + } + }, + { + "name": "Logit", + "schema": { + "attributes": [ + { + "description": "small positive epsilon value, the default is 1e-6.", + "name": "eps (optional)", + "option": "optional" + } + ], + "description": "\nElementwise logit transform: logit(x) = log(x / (1 - x)), where x is the\ninput data clampped in (eps, 1-eps).\n", + "inputs": [ + { + "description": "input float tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "output float tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "SegmentIdsToLengths", + "schema": { + "description": "\nTransfers a vector of segment ids to a vector of segment lengths. This operation\nsupports non-consecutive segment ids. Segments not appearing in the input vector\nwill have length 0. If the second input is provided, the number of segments =\nthe size of its first dimension. Otherwise, the number of segments = the last\nindex in the first input vector + 1.\n\nIn general, for consecutive, zero-based segment IDs, this is the inverse\noperation of LengthsToSegmentIds, except that a vector of segment IDs\ncannot represent empty segments at the end (if the second input is absent).\n", + "inputs": [ + { + "description": "1-D int32_t or int64_t tensor of segment ids", + "name": "segment_ids" + }, + { + "description": "if provided, number of segments = the size of its first dimension", + "name": "data (optional)" + } + ], + "outputs": [ + { + "description": "1-D int64_t tensor of segment lengths", + "name": "lengths" + } + ], + "support_level": "default" + } + }, + { + "name": "YellowFin", + "schema": { + "attributes": [ + { + "description": "Default 0.999", + "name": "beta", + "option": "optional" + }, + { + "description": "Default 20", + "name": "curv_win_width", + "option": "optional" + }, + { + "description": "Default 1e-6", + "name": "epsilon", + "option": "optional" + }, + { + "description": "Default false", + "name": "nesterov", + "option": "optional" + }, + { + "description": "Default true", + "name": "zero_debias", + "option": "optional" + } + ], + "description": "\n\nComputes the YellowFin update (https://arxiv.org/abs/1706.03471) and performs\nmomentum SGD optimization step. lr and mu are not being shared between\nparameters. curv_win, g_avg, g2_avg and scalars_memory are just auxiliary\nmemory for computing moving averages (see the publication). Takes arguments\nbeta: coefficient for moving averages,\ncurv_win_width: timeframe when average squared gradient is being stored,\nepsilon: for numerical purposes,\nnesterov and zero_debias for debias of moving average.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Momentum", + "name": "moment" + }, + { + "description": "Learning rate", + "name": "lr" + }, + { + "description": "Momentum coefficient", + "name": "mu" + }, + { + "description": "Memory for latest curvature ranges", + "name": "curv_win" + }, + { + "description": "Moving average of gradient", + "name": "g_avg" + }, + { + "description": "Moving average of squared gradient", + "name": "g2_avg" + }, + { + "description": "Memory for stateful scalars", + "name": "scalars_memory" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "Iteration number", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Parameters to be updated", + "name": "output_param" + }, + { + "description": "Momentum", + "name": "output_moment" + }, + { + "description": "Output learning rate", + "name": "output_lr" + }, + { + "description": "Output momentum coefficient", + "name": "output_mu" + }, + { + "description": "Output memory for latest curvature ranges", + "name": "output_curv_win" + }, + { + "description": "Output moving average of gradient", + "name": "output_g_avg" + }, + { + "description": "Output moving average of squared gradient", + "name": "output_g2_avg" + }, + { + "description": "Output memory for stateful scalars", + "name": "output_scalars_memory" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceBackMax", + "schema": { + "attributes": [ + { + "description": "(*int*): number of dimensions to reduce (default=1)", + "name": "num_reduce_dims", + "option": "optional" + } + ], + "description": "\nReduces the input tensor along the last dimension of the by applying **max**.\n\nCan reduce more than one of the \"last\" dimensions by setting `num_reduce_dim`.\n\nA second (optional) input, `lengths`, can be passed, which enforces that only a subset of the elements are considered in the max operation.\n- If input tensor `X` has shape $(d_0, d_1, d_2, ..., d_n)$, `lengths` must have shape $(d_0 * d_1 * d_2 * ... * d_{n-1})$.\n- The values of the `lengths` tensor determine how many of the values to consider for each vector in the $d_{n-1}$ dimension.\n\nFor example if $X = [[1,5,2,9],[4,1,8,2],[2,7,0,3]]$ and $lengths = [2,3,1]$, then $Y = [max(1,5), max(4,1,8), max(2)] = [5, 8, 2]$\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduce_front_back_max_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ReduceBackMax\",\n [\"X\"],\n [\"Y\"],\n num_reduce_dim=2\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(1,2,3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[[[2. 5. 1.]\n [6. 1. 9.]\n [8. 5. 9.]]\n\n [[5. 7. 8.]\n [9. 9. 6.]\n [6. 5. 0.]]]]\nY: [[9. 9.]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(*Tensor``*): input tensor", + "name": "X" + }, + { + "description": "(*Tensor``*): number of elements in each sample", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "(*Tensor``*): reduced tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8Concat", + "schema": { + "attributes": [ + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + }, + { + "description": "Which axis to concat on", + "name": "axis", + "option": "optional" + }, + { + "description": "Pass 1 to add the axis specified in arg 'axis' to all input tensors", + "name": "add_axis", + "option": "optional" + } + ], + "description": "Concatenate a list of tensors into a single tensor", + "outputs": [ + { + "description": "Concatenated tensor", + "name": "concat_result" + }, + { + "description": "The dimensions of the inputs.", + "name": "split_info" + } + ], + "support_level": "default" + } + }, + { + "name": "Allgather", + "schema": { + "description": "\nDoes an allgather operation among the nodes.\n", + "inputs": [ + { + "description": "The common world.", + "name": "comm_world" + }, + { + "description": "A tensor to be allgathered.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The allgathered tensor, same on all nodes.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "RecurrentNetworkGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "HalfFloatToFused8BitRowwiseQuantized", + "schema": { + "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 8 bytes\nof each row in the output matrix are a float storing the scale\nfollowed by another float containing the scale.)\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "StopGradient", + "schema": { + "description": "\nStopGradient is a helper operator that does no actual numerical computation,\nand in the gradient computation phase stops the gradient from being computed\nthrough it.\n", + "support_level": "default" + } + }, + { + "name": "Summarize", + "schema": { + "attributes": [ + { + "description": "(int, default 0) flag to indicate if the summarized statistics have to be written to a log file.", + "name": "to_file", + "option": "optional" + } + ], + "description": "\nSummarize computes four statistics of the input tensor (Tensor)- min,\nmax, mean and standard deviation. The output will be written to a 1-D tensor of\nsize 4 if an output tensor is provided. Else, if the argument 'to_file' is\ngreater than 0, the values are written to a log file in the root folder.\n", + "inputs": [ + { + "description": "The input data as Tensor.", + "name": "data" + } + ], + "outputs": [ + { + "description": "1-D tensor (Tensor) of size 4 containing min, max, mean and standard deviation", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "NormalizeL1", + "schema": { + "attributes": [ + { + "description": "axis to normalize", + "name": "axis", + "option": "optional" + } + ], + "description": "\nGiven a matrix, apply L1-normalization along the specified axis.\n", + "support_level": "default" + } + }, + { + "name": "BatchSparseToDense", + "schema": { + "attributes": [ + { + "description": "Optional, output dense last dimension. If both this argument and output_shape_inference are set, it should be consistent with output_shape_inference's last dim", + "name": "dense_last_dim", + "option": "optional" + }, + { + "description": "Optional, missing values are filled with this value.default_value = 0 when not set", + "name": "default_value", + "option": "optional" + } + ], + "description": "\nConvert sparse matrix representation into dense matrix.\n\nA sparse matrix is represented by `lengths` vector, `indices` vector,\nand `values` vector. Each element in `lengths` vector (lengths[`i`]) represents\nthe number of indices in this batch (batch `i`).\nWith in each batch, `indices` should not have duplicate number.\n\nFor example, with input:\n\n lengths = [2, 3, 1]\n indices = [0, 1, 2, 3, 4, 5]\n values = [6, 7, 8, 9, 10, 11]\n dense_dim = 6\n default_value = 0\n\nThe output is:\n\n output = [[6, 7, 0, 0, 0, 0],\n [0, 0, 8, 9, 10, 0],\n [0, 0, 0, 0, 0, 11]]\n\nafter running this operator.\n", + "inputs": [ + { + "description": "Flatten tensor, used to break down indices and values into per batch indices and values.", + "name": "lengths" + }, + { + "description": "Flatten tensor of total size = \\sum lengths, containing the indices ", + "name": "indices" + }, + { + "description": "Data tensor, dimension has to match `indices`", + "name": "values" + }, + { + "description": "Optional, a dense tensor whose shape define the output shape", + "name": "output_shape_inference" + } + ], + "outputs": [ + { + "description": "2-D dense tensor, with 1st dim = len(lengths), 2nd dim = dense_last_dimin the arg list, the tensor is of the same data type as `values`.Missing values are filled with default_value", + "name": "dense" + } + ], + "support_level": "default" + } + }, + { + "name": "MaxPool3D", + "schema": { + "description": "MaxPool3D \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Output data tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "CreateMap", + "schema": { + "attributes": [ + { + "description": "Key's TensorProto::DataType (default INT32)", + "name": "key_dtype", + "option": "optional" + }, + { + "description": "Value's TensorProto::DataType (default INT32)", + "name": "value_dtype", + "option": "optional" + } + ], + "description": "Create an empty map blob", + "outputs": [ + { + "description": "Blob reference to the map", + "name": "map blob" + } + ], + "support_level": "default" + } + }, + { + "name": "BatchMomentsGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "WeightedSigmoidCrossEntropyWithLogitsGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "RmsProp", + "schema": { + "description": "\nComputes the RMSProp update\n(http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf).\nConcretely, given inputs (grad, mean_squares, mom, lr), computes:\n\n mean_squares_o = mean_squares + (1 - decay) * (square(grad) - mean_squares)\n mom_o = momentum * mom + lr * grad / sqrt(epsilon + mean_squares_o)\n grad_o = mom_o\n\nReturns (grad_o, mean_squares_o, mom_o).\n", + "support_level": "default" + } + }, + { + "name": "SoftmaxWithLoss", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Setting to 1 enables inputting labels as probability distribution.", + "name": "label_prob", + "option": "optional", + "type": "int64" + }, + { + "default": 1, + "description": "Axis of the inputs when coerced to 2D.", + "name": "axis", + "option": "optional", + "type": "int64" + }, + { + "description": "Average loss output scaling factor (must be >= 0).", + "name": "scale", + "option": "optional", + "type": "float32" + }, + { + "default": "'NCHW'", + "description": "Order of blob dimensions (only 'NCHW' is supported currently).", + "name": "order", + "option": "optional", + "type": "string" + } + ], + "description": "\nCombined Softmax and Cross-Entropy loss operator. The operator first computes the softmax normalized values for each layer in the batch of the given input, then computes cross-entropy loss. This operator is numerically more stable than separate `Softmax` and `CrossEntropy` ops. The inputs are a 2-D tensor `logits` of size (batch_size x input_feature_dimensions), which represents the unscaled log probabilities, and a 1-dimensional integer `labels` tensor for ground truth. An optional third input blob (`weight_tensor`) can be used to weight the samples for the loss, which is useful if the training set is unbalanced. This operator outputs a `softmax` tensor which contains the probability for each label for each example (same shape is `logits` input), and a scalar `loss` value, which is the averaged cross-entropy loss between the softmax probabilities and the ground truth values. Use parameter `label_prob`=1 to enable inputting labels as a probability distribution.\n\nSoftmax cross-entropy loss function:\n\n$$loss(x, class) = -\\log{\\biggl(\\frac{\\exp(x[class])}{\\sum_{j} \\exp(x[j])}\\biggr)} = -x[class] + \\log{\\biggl(\\sum_{j} \\exp(x[j])\\biggr)}$$\n\nor if the `weight_tensor` has been passed:\n\n$$loss(x, class) = weight[class]\\biggl(-x[class] + \\log{\\biggl(\\sum_{j} \\exp(x[j])\\biggr)}\\biggr)$$\n\nThe `logits` input does not need to explicitly be a 2D vector; rather, it will be coerced into one. For an arbitrary n-dimensional tensor `X` in $[a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}]$, where k is the `axis` provided, then `X` will be coerced into a 2-dimensional tensor with dimensions $[(a_0 * ... * a_{k-1}), (a_k * ... * a_{n-1})]$. For the default case where `axis`=1, the `X` tensor will be coerced into a 2D tensor of dimensions $[a_0, (a_1 * ... * a_{n-1})]$, where $a_0$ is often the batch size. In this situation, we must have $a_0 = N$ and $a_1 * ... * a_{n-1} = D$. Each of these dimensions must be matched correctly, or else the operator will throw errors.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/softmax_with_loss_op.cc\n\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"SoftmaxWithLoss\",\n [\"logits\", \"labels\"],\n [\"softmax\", \"avgloss\"]\n)\n\nworkspace.FeedBlob(\"logits\", np.random.randn(1, 5).astype(np.float32))\nworkspace.FeedBlob(\"labels\", np.asarray([4]).astype(np.int32))\nprint(\"logits:\", workspace.FetchBlob(\"logits\"))\nprint(\"labels:\", workspace.FetchBlob(\"labels\"))\nworkspace.RunOperatorOnce(op)\nprint(\"softmax:\", workspace.FetchBlob(\"softmax\"))\nprint(\"avgloss:\", workspace.FetchBlob(\"avgloss\"))\n\n```\n\n**Result**\n\n```\n\nlogits: [[-0.3429451 -0.80375195 0.23104447 1.4569176 -0.5268362 ]]\nlabels: [4]\nsoftmax: [[0.09721052 0.0613179 0.17258129 0.58800864 0.0808817 ]]\navgloss: 2.5147676\n\n```\n\n
\n\n
\n\n Example 2 \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"SoftmaxWithLoss\",\n [\"logits\", \"labels\"],\n [\"softmax\", \"avgloss\"],\n scale=5.0\n)\n\nworkspace.FeedBlob(\"logits\", np.asarray([[.1, .4, .7, 1.5, .2]]).astype(np.float32))\nworkspace.FeedBlob(\"labels\", np.asarray([4]).astype(np.int32))\nprint(\"logits:\", workspace.FetchBlob(\"logits\"))\nprint(\"labels:\", workspace.FetchBlob(\"labels\"))\nworkspace.RunOperatorOnce(op)\nprint(\"softmax:\", workspace.FetchBlob(\"softmax\"))\nprint(\"avgloss:\", workspace.FetchBlob(\"avgloss\"))\n\n```\n\n**Result**\n\n```\n\nlogits: [[0.1 0.4 0.7 1.5 0.2]]\nlabels: [4]\nsoftmax: [[0.10715417 0.144643 0.19524762 0.4345316 0.11842369]]\navgloss: 10.667433\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor``)* Input tensor.", + "name": "logits" + }, + { + "description": "*(type: Tensor``)* Ground truth label tensor.", + "name": "labels" + }, + { + "description": "*(type: Tensor``)* [OPTIONAL] Blob used to weight the samples for the loss.", + "name": "weight_tensor" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Softmax output tensor.", + "name": "softmax" + }, + { + "description": "*(type: float)* Averaged cross-entropy loss output.", + "name": "loss" + } + ], + "support_level": "default" + } + }, + { + "name": "VariableLengthSequencePadding", + "schema": { + "description": "\nSuper special-case operator. Used to pad a tensor to mimic pytorch's\npad_packed_sequence.\n\nGiven an input tensor INPUT of size NxBxM and an input tensor LENS\nof size B, where\n\nN = maximum sequence length\nB = batch size\nM = hidden size\n\nset each element of INPUT to zero if it is is past the end of the\ncorresponding sequence (i.e. if LENS[j] > i for an index (i,j,k)).\n\n", + "support_level": "default" + } + }, + { + "name": "SparseLengthsMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SoftplusGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseLengthsMean", + "schema": { + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Mean' to each segment. Segments are defined by their LENGTHS.\n\nThis op is basically Gather and LengthsMean fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nLENGTHS is a vector that defines slice sizes by first dimension of DATA. Values\nbelonging to the same segment are aggregated together. sum(LENGTHS) has\nto match INDICES size.\n\nThe first dimension of the output is equal to the number of input segment,\ni.e. `len(LENGTHS)`. Other dimensions are inherited from the input tensor.\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Non negative vector with sum of elements equal to INDICES length", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseUnsortedSegmentWeightedSum", + "schema": { + "attributes": [ + { + "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators", + "name": "grad_on_weights", + "option": "optional" + } + ], + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'WeightedSum' to each segment. Segments ids can appear in arbitrary order (unlike in\nSparseSortedSegmentWeightedSum).\n\nThis op is basically Gather and UnsortedSegmentWeightedSum fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nIf `num_segments` argument is passed it would be used as a first dimension for\nthe output. Otherwise, it'd be dynamically calculated from as the max value of\nSEGMENT_IDS plus one. Other output dimensions are inherited from the input\ntensor.\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor for the summation", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices", + "name": "SCALARS" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Integer vector with the same length as INDICES that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of equal to the number of segments.", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "EnsureDense", + "schema": { + "description": "\nThis operator converts dense or sparse gradients to dense ones.\nTherefore, sparse gradient can be back propagated to Operators that consume\ndense gradients only (e.g., FCGradient).\n\nThe operator's behaviors:\n\n- In forward, simply pass in place or copy input to the output.\n- In backward, if the gradient passed-in is sparse gradient, change it to dense gradient in linear time; otherwise, simply pass the dense gradient.\n", + "inputs": [ + { + "description": "Input tensors.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "LabelCrossEntropy", + "schema": { + "description": "\nThis operator computes the cross entropy between a $NxD$ dimensional input data tensor $X$ and a one dimensional input label tensor $label$. The op produces a single length $N$ output tensor $Y$. Here, $N$ is considered the batch size and $D$ is the size of each element in the batch. In practice, it is most commonly used at the end of models as a part of the loss computation, after the SoftMax operator and before the AveragedLoss operator. The cross entropy operation is defined as follows\n\n$$Y_i = -log(X_{ij})$$\n\nwhere ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability.\n\nThe difference between *LabelCrossEntropy* and *CrossEntropy* is how the labels are specified. Here, the labels are a length $N$ list of integers, whereas in CrossEntropy the labels are a $NxD$ dimensional matrix of one hot label vectors. However, the results of computation should be the same, as shown in the two examples where ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability.\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LabelCrossEntropy\",\n [\"X\", \"label\"],\n [\"Y\"]\n)\n\n// Create X: Sample softmax output for 5-class model\nX = np.array([[.01, .05, .02, .02, .9],[.03, .1, .42, .05, .4]])\nprint(\"X:\\n\",X)\n\n// Create label: Sample 1-hot ground truth label vectors\nlabel = np.array([4,2])\nprint(\"label:\\n\",label)\n\n// Feed X & label into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"label\", label.astype(np.int32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[0.01 0.05 0.02 0.02 0.9 ]\n [0.03 0.1 0.42 0.05 0.4 ]]\nlabel:\n [4 2]\nY:\n [0.10536055 0.8675006 ]\n\n```\n\n
\n\n\n", + "inputs": [ + { + "description": "Input tensor which is almost always the result of a softmax operation. $X$ is a 2D array of size $NxD$, where $N$ is the batch size and $D$ is the number of classes.", + "name": "X" + }, + { + "description": "Blob containing the labels used to compare the input. $label$ is a length $N$ list of integers, where each element is the integer label for the $n$th element of the batch.", + "name": "label" + } + ], + "outputs": [ + { + "description": "Output blob from the cross entropy computation. $Y$ is 1D length $N$ tensor.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "BooleanMaskGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "Save", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If set to non-zero, save the db directly to the path specified by the `db` arg. If not set (default), prepend the path of the current root folder of the workspace to the path specified by the `db` arg.", + "name": "absolute_path", + "option": "optional", + "type": "int64" + }, + { + "default": "", + "description": "Characters in the provided blob names that match `strip_prefix` will be removed prior to saving. Also, characters that precede `strip_prefix` will be removed. Useful for removing device scope from blob names.", + "name": "strip_prefix", + "option": "optional", + "type": "string" + }, + { + "description": "If set, used as blob names instead of original blob names. Must be same length as number of blobs.", + "name": "blob_name_overrides", + "option": "optional", + "type": "string[]" + }, + { + "description": "The output path of the db. See the `absolute_path` arg details for options regarding the current root folder of the workspace.", + "name": "db", + "option": "optional", + "type": "string" + }, + { + "description": "Type of db to save (options: \"lmdb\", \"leveldb\", \"minidb\").", + "name": "db_type", + "option": "optional", + "type": "string" + }, + { + "default": "kDefaultChunkSize", + "description": "The chunk size to split tensor data into. If not set, caffe2_tensor_chunk_size will be used", + "name": "chunk_size", + "option": "optional", + "type": "string" + } + ], + "description": "\nSaves a set of blobs to a db. It takes $[1, \\infty)$ number of inputs and has\nno output. The contents of the inputs are written into the db using the\nsettings specified by the arguments.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/load_save_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Save\",\n [\"X\", \"Y\", \"Z\"],\n [],\n db=\"test_db2\",\n db_type=\"leveldb\",\n blob_name_overrides=[\"x_scores\", \"y_scores\", \"z_scores\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(20, size=(5,5)))\nworkspace.FeedBlob(\"Y\", np.random.randint(20, size=(5,5)))\nworkspace.FeedBlob(\"Z\", np.random.randint(20, size=(5,5)))\nworkspace.RunOperatorOnce(op)\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor(s).", + "name": "X" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseSortedSegmentMean", + "schema": { + "description": "\nPulls in slices of the input tensor, groups them into segments and applies\n'Mean' to each segment. Segments need to be sorted and contiguous. See also\nSparseUnsortedSegmentMean that doesn't have this requirement.\n\nThis op is basically Gather and SortedSegmentMean fused together.\n\nINDICES should contain integers in range 0..N-1 where N is the first dimension\nof DATA. INDICES represent which slices of DATA need to be pulled in.\n\nSEGMENT_IDS is a vector that maps each referenced slice of the DATA to a\nparticular group (segment). Values belonging to the same segment are aggregated\ntogether. SEGMENT_IDS should have the same dimension as INDICES.\n\nThe first dimension of the output is equal to the number of input segments,\ni.e. `SEGMENT_IDS[-1]+1`. Other dimensions are inherited from the input tensor.\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n ", + "inputs": [ + { + "description": "Input tensor, slices of which are aggregated.", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same length as INDICES and values in the range 0..K-1 and in increasing order that maps each slice of DATA referenced by INDICES to one of the segments", + "name": "SEGMENT_IDS" + } + ], + "outputs": [ + { + "description": "Aggregated output tensor. Has the first dimension of K (the number of segments).", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "GaussianFill", + "schema": { + "attributes": [ + { + "default": 0.0, + "description": "Mean of the distribution to draw from.", + "name": "mean", + "option": "optional", + "type": "float32" + }, + { + "default": 1.0, + "description": "Standard deviation of the distribution to draw from.", + "name": "std", + "option": "optional", + "type": "float32" + }, + { + "description": "Desired shape of the *output* tensor.", + "name": "shape", + "option": "optional", + "type": "int64[]" + }, + { + "description": "The additional dimensions appended at the end of the *shape* indicated by the input blob. Cannot set the *extra_shape* argument when there is no input blob.", + "name": "extra_shape", + "option": "optional", + "type": "int64[]" + }, + { + "default": false, + "description": "set to *True* to use the *input* as shape. First, input must be in CPU context.", + "name": "input_as_shape", + "option": "optional", + "type": "boolean" + } + ], + "description": "\nThis op fills an output tensor with samples drawn from a normal distribution specified by the mean and standard deviation arguments. The output tensor shape is specified by the *shape* argument. However, if *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set.\n\n*Note: cannot set the shape argument and pass in an input at the same time.*\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GaussianFill\",\n [],\n [\"out\"],\n shape=[3,3],\n mean=2.0,\n std=1.1\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Out:\\n\", workspace.FetchBlob(\"out\"))\n\n```\n\n**Result**\n\n```\n\nOut:\n [[1.2084167 2.3336504 2.827349 ]\n [2.7108908 0.9374752 1.7173369 ]\n [0.03320992 2.1775863 1.0894578 ]]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "(Optional) 1D tensor specifying the shape of the output. Must be used with *input_as_shape=True*", + "name": "input" + } + ], + "outputs": [ + { + "description": "Output tensor of random values drawn from a normal distribution. If the shape argument is set, this is the shape specified, and if the *input* exists and *input_as_shape=True*, it is the shape specified by the *input* tensor.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "ReduceMinGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "GivenTensorInt16Fill", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "values" + }, + { + "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.", + "name": "shape", + "option": "optional" + }, + { + "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.", + "name": "extra_shape", + "option": "optional" + }, + { + "description": "1D tensor containing the desired output shape. First input must be in CPU context.", + "name": "input_as_shape", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "Abs", + "schema": { + "description": "\nCalculates the absolute value of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/abs_op.cc\n\n
\n\n Example \n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Abs\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [ 0.3005476 1.551666 -1.3591481 0.39191285 -0.21866608]\nY: [0.3005476 1.551666 1.3591481 0.39191285 0.21866608]\n\n```\n\n
\n\n", + "inputs": [ + { + "description": "*(type: Tensor)* Input tensor.", + "name": "X" + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Absolute value of input element-wise.", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsTopK", + "schema": { + "attributes": [ + { + "description": "the number of top values to return for each segment, if the number of values is smaller than k, the values would be padded with 0 and indices would be padded with -1.", + "name": "k", + "option": "optional" + } + ], + "description": "\nApply TopK to each segment of the input tensor, where segments are defined by\ntheir LENGTHS, and concatenate them in an output tensor of\nshape=(SIZE(LENGTHs), k). In case there's less than k values in a segment,\nthe output value will be padded by 0, and the corresponding output indices will\nbe padded by -1.\n", + "inputs": [ + { + "description": "Tensor of rank 1. First dimension must be equal to the sum of lengths", + "name": "DATA" + }, + { + "description": "Tensor of int32 lengths of rank 1", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "Output top k elements for each segment, withshape=(SIZE(lengths), k)", + "name": "TopKValue" + }, + { + "description": "Output indices in DATA corresponding to value in TopKValue", + "name": "TopKIndices" + } + ], + "support_level": "default" + } + }, + { + "name": "RMACRegions", + "schema": { + "attributes": [ + { + "description": "Number of scales to sample regions at.", + "name": "scales", + "option": "optional" + }, + { + "description": "Overlap between consecutive regions.", + "name": "overlap", + "option": "optional" + } + ], + "description": "\nComputes a fixed-grid of RMAC region coordinates at various levels\nas described in https://arxiv.org/abs/1511.05879.\n", + "inputs": [ + { + "description": "The input 4D tensor of shape NCHW.", + "name": "X" + } + ], + "outputs": [ + { + "description": "The output RMAC regions for all items in the batch. Tensor of shape (N x 5) following the ROIPoolOp format - each row is of the format (batch_index x1 y1 x2 y2) where x1, y1, x2, y2 are the region co-ordinates. Each region is repeated N times corresponding to each item in the batch.", + "name": "RMAC_REGIONS" + } + ], + "support_level": "default" + } + }, + { + "name": "SumReduceLike", + "schema": { + "attributes": [ + { + "description": "If set, defines the starting dimension for reduction. Args `axis` and `axis_str` cannot be used simultaneously.", + "name": "axis", + "option": "optional" + }, + { + "description": "If set, it could only be N or C or H or W. `order` arg should also be provided. It defines the reduction dimensions on NCHW or NHWC. Args `axis` and `axis_str` cannot be used simultaneously.", + "name": "axis_str", + "option": "optional" + }, + { + "description": "Either NHWC or HCWH", + "name": "order", + "option": "optional" + } + ], + "description": "\nSumReduceLike operator takes 2 tensors as input. It performs reduce sum to the\nfirst input so that the output looks like the second one.\nIt assumes that the first input\nhas more dimensions than the second, and the dimensions of the second input is\nthe contiguous subset of the dimensions of the first.\nFor example, the following tensor shapes are supported:\n\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 2, 5), shape(B) = (2), with axis=0\n ", + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A" + }, + { + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.", + "name": "B" + } + ], + "outputs": [ + { + "description": "Result, has same dimensions and type as B", + "name": "C" + } + ], + "support_level": "default" + } + }, + { + "name": "MaxGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ReplaceNaN", + "schema": { + "attributes": [ + { + "description": "the value to replace NaN, the default is 0", + "name": "value (optional)", + "option": "optional" + } + ], + "description": "\nReplace the NaN (not a number) element in the input tensor with argument `value`\n", + "inputs": [ + { + "description": "Input tensor", + "name": "input" + }, + { + "description": "Output tensor", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "HasScope", + "schema": { + "description": "\nChecks whether scope blob has any saved scopes left\n ", + "support_level": "default" + } + }, + { + "name": "ReduceBackSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ZeroGradient", + "schema": { + "description": "\nZeroGradient operators doesn't produce any output blobs. One can use\nthis operator to produce 0 gradient for the input blob.\n", + "support_level": "default" + } + }, + { + "name": "ReduceBackMeanGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "ClipGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseLengthsWeightedSum8BitsRowwise", + "schema": { + "description": "\nVariation of SparseLengthsWeightedSum operator, where\nDATA is stored using 8bits. DATA was quantized with 8Bit row-wise\nquantization (see doc to FloatToRowwiseQuantized8Bits operator). To\nrestore DATA from 8Bit, we use additional input that stores scales\nand biases.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToRowwiseQuantized8Bits", + "name": "DATA" + }, + { + "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the length of INDICES", + "name": "SCALARS" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i -- scale and bias for i-th row", + "name": "scale_bias" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "HuffmanTreeHierarchy", + "schema": { + "attributes": [ + { + "description": "The number of classes used to build the hierarchy.", + "name": "num_classes", + "option": "optional" + } + ], + "description": "\nHuffmanTreeHierarchy is an operator to generate huffman tree hierarchy given\nthe input labels. It returns the tree as serialized HierarchyProto\n", + "inputs": [ + { + "description": "The labels vector", + "name": "Labels" + } + ], + "outputs": [ + { + "description": "Huffman coding hierarchy of the labels", + "name": "Hierarch" + } + ], + "support_level": "default" + } + }, + { + "name": "ChannelShuffle", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "AveragedLossGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "SparseLengthsIndicesInGradientSumGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "BooleanUnmask", + "schema": { + "description": "\nGiven a series of masks and values, reconstruct values together according to masks. A comprehensive example:\n```\nmask1 = True, False, True, False, False\nvalues1 = 1.0, 3.0\nmask2 = False, True, False, False, False\nvalues2 = 2.0\nmask3 = False, False, False, True, True\nvalues3 = 4.0, 5.0\n```\n\nReconstruct by:\n\n```\noutput = net.BooleanUnmask([mask1, values1, mask2, values2, mask3, values3], [\"output\"])\noutput = 1.0, 2.0, 3.0, 4.0, 5.0\n```\n\nNote that for all mask positions, there must be at least one True. This is not allowed:\n\n```\nmask1 = True, False\nvalues1 = 1.0\nmask2 = False, False\nvalues2 =\n\noutput = net.BooleanUnmask([mask1, values1, mask2, values2], [\"output\"])\n```\n\nIf there are multiple True values for a field, we accept the first value, and no longer expect a value for that location:\n\n```\nmask1 = True, False\nvalues1 = 1.0\nmask2 = True, True\nvalues2 = 2.0\n\noutput = net.BooleanUnmask([mask1, values1, mask2, values2], [\"output\"])\noutput = 1.0, 2.0\n```\n\n*** Note that we alternate `data` and `mask` inputs\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/boolean_unmask_ops.cc\n\n
\n\n Example \n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BooleanUnmask\",\n [\"mask1\", \"data1\", \"mask2\", \"data2\"],\n [\"unmasked_data\"]\n)\n\nworkspace.FeedBlob(\"mask1\", np.array([True,False,False,True,True,False]))\nworkspace.FeedBlob(\"data1\", np.array([1,4,5]))\nworkspace.FeedBlob(\"mask2\", np.array([False,True,True,False,False,True]))\nworkspace.FeedBlob(\"data2\", np.array([2,3,6]))\n\nprint(\"data1:\", workspace.FetchBlob(\"data1\"))\nprint(\"mask1:\", workspace.FetchBlob(\"mask1\"))\nprint(\"data2:\", workspace.FetchBlob(\"data2\"))\nprint(\"mask2:\", workspace.FetchBlob(\"mask2\"))\nworkspace.RunOperatorOnce(op)\nprint(\"unmasked_data:\", workspace.FetchBlob(\"unmasked_data\"))\n\n```\n\n**Result**\n\n```\n\ndata1: [1 4 5]\nmask1: [ True False False True True False]\ndata2: [2 3 6]\nmask2: [False True True False False True]\nunmasked_data: [1 2 3 4 5 6]\n\n```\n\n
\n", + "inputs": [ + { + "description": "(*Tensor*): 1D input tensor(s)", + "name": "data" + }, + { + "description": "(*Tensor``*): 1D boolean mask tensor(s)", + "name": "mask" + } + ], + "outputs": [ + { + "description": "(*Tensor*): 1D tensor of same type as `data` input that contains the unmasked input tensor", + "name": "unmasked_data" + } + ], + "support_level": "default" + } + }, + { + "name": "Fused8BitRowwiseQuantizedToHalfFloat", + "schema": { + "description": "\nDe-quantizes the result of the\nHalfFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 32-bit float in the second to the last 4 bytes of each\nrow, followed by the bias as a 32-bit float in the next 4 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float16 data", + "name": "float16_output" + } + ], + "support_level": "default" + } + }, + { + "name": "LengthsGather", + "schema": { + "description": "\nGather items from sparse tensor. Sparse tensor is described by items and\nlengths. This operator gathers items corresponding to lengths at the given\nindices. This deliberately doesn't return lengths of OUTPUTS so that both lists\nand maps can be supported without special cases. If you need lengths tensor for\n OUTPUT, use `Gather`.\n\nExample:\n ITEMS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n LENGTHS = [0, 2, 3, 1, 4]\n INDICES = [0, 2, 4]\n\n OUTPUT = [2, 3, 4, 6, 7, 8, 9]\n", + "inputs": [ + { + "description": "items tensor", + "name": "ITEMS" + }, + { + "description": "lengths tensor", + "name": "LENGTHS" + }, + { + "description": "indices into LENGTHS where items should be gathered", + "name": "INDICES" + } + ], + "outputs": [ + { + "description": "1-D tensor containing gathered items", + "name": "OUTPUT" + } + ], + "support_level": "default" + } + }, + { + "name": "SequenceMask", + "schema": { + "attributes": [ + { + "description": "(string) Mode selection. Possible values: 'sequence', 'upper', 'lower', 'upperdiag', 'lowerdiag'", + "name": "mode", + "option": "optional" + }, + { + "description": "(int) Beginning axis of row elements. All dimensions to the left will be treated as row indices and those to the right (inclusive) will be treated as column indices in the 2D mask", + "name": "axis", + "option": "optional" + }, + { + "description": "(bool) operate in gradient mode", + "name": "grad", + "option": "optional" + }, + { + "description": "(int) radius of windows in window mode", + "name": "radius", + "option": "optional" + }, + { + "description": "(int) batch dimension of tensor (optional)", + "name": "batch", + "option": "optional" + }, + { + "description": "(int) used when mask should be repeated for one or more data dimensions (beginning at this axis). (currently only supported for sequence mode without batch argument)", + "name": "repeat_from_axis", + "option": "optional" + } + ], + "description": "\nMask op designed for use in attention mechanisms for sequence modeling tasks.\nSupports batching: given batch_dim, collapses dims 0 through batch_dim into a\nsingle dimension, e.g. if tensor dims are [4,2,1,3,4] and batch_dim=2, first\ncollapse tensor to [4*2*1,3,4], then mask each batch [i,:,:].\n\n\nTwo current operating modes:\n\n\n1) Given a 2D input tensor and 1D tensor of sequence lengths, for each row i in\nthe input tensor, set elements in that row to -inf if their column index\nj >= sequence_lengths[i]. This mode takes two inputs and argument mode =\n'sequence'\n\n\n2) Triangular mask. Given row index i and column index j, set elements to -inf\ngiven the following conditions:\n\n mode='upper', x_ij = -inf if j < i\n mode='lower', x_ij = -inf if j > i\n mode='upperdiag', x_ij = -inf if j <= i\n mode='lowerdiag', x_ij = -inf if j >= i\n\nThis mode takes one input.\n\n\n3) Window Mask. Given a 2D input tensor and 1D tensor of window centers,\nfor each row i in the input tensor, set elements in that row to -inf\nif their column index j outside [center - radius, center + radius].\nThis mode takes two inputs and argument mode = 'sequence'.\nArgument 'radius' should be provided.\n", + "inputs": [ + { + "description": "Tensor to apply masking to", + "name": "input" + }, + { + "description": "1D Tensor of sequence lengths for mode #1", + "name": "sequence_lengths" + } + ], + "outputs": [ + { + "description": "Input tensor with masking applied", + "name": "masked_tensor" + } + ], + "support_level": "default" + } + }, + { + "name": "Int8Transpose", + "schema": { + "attributes": [ + { + "description": "Order to permute axes of input tensor. Reverses the dimensions by default.", + "name": "axes", + "option": "optional" + }, + { + "description": "Output tensor quantization scale", + "name": "Y_scale", + "option": "optional" + }, + { + "description": "Output tensor quantization offset", + "name": "Y_zero_point", + "option": "optional" + } + ], + "description": "\nTranspose the input tensor by permuting the axes of the input according\nto the `axes` argument. Similar to numpy's\n[transpose](https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html)\nfunction.\n\nFor example, when axes=(1, 0, 2), given an input tensor of shape\n(1, 2, 3), the output shape will be (2, 1, 3).\n", + "inputs": [ + { + "description": "Input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "Transposed output", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "ResizeNearest3DGradient", + "schema": { + "attributes": [ + { + "description": "Scale along temporal dimension", + "name": "temporal_scale", + "option": "optional" + }, + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "description": null, + "support_level": "default" + } + }, + { + "name": "ResizeNearest3D", + "schema": { + "attributes": [ + { + "description": "Scale along temporal dimension", + "name": "temporal_scale", + "option": "optional" + }, + { + "description": "Scale along width dimension", + "name": "width_scale", + "option": "optional" + }, + { + "description": "Scale along height dimension", + "name": "height_scale", + "option": "optional" + } + ], + "description": "\nResizes the spatial dimensions of the input tensor using nearest neighbor\ninterpolation. The `width_scale` and `height_scale` arguments\ncontrol the size of the output, which is given by:\noutput_width = floor(input_width * width_scale)\noutput_height = floor(output_height * height_scale)\nAssumptions:\n - Only resize height and width\n - Both width_scale and height_scale scale are 2\n", + "inputs": [ + { + "description": "Input tensor", + "name": "X" + } + ], + "outputs": [ + { + "description": "Output tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "BatchPermutation", + "schema": { + "description": "\nBatch permutation of an input tensor X given input indices. First dimension of\nX equals batch size N. The indices stores a be permutation of N.\nThe output Y is a tensor of same shape as X, with data re-ordered according to\nthe indices within the batch size.\n\nExample of batch permutation on a 2-D tensor with batch size 4:\n X = [\n [1, 5, 2, 3, 4, 6, 0],\n [4, 3, 3, 5, 2, 3, 1],\n [2, 2, 3, 6, 0, 0, 1],\n [0, 0, 1, 1, 2, 2, 3]\n ]\n indices = [2, 0, 1, 3]\n Y = [\n [2, 2, 3, 6, 0, 0, 1],\n [1, 5, 2, 3, 4, 6, 0],\n [4, 3, 3, 5, 2, 3, 1],\n [0, 0, 1, 1, 2, 2, 3]\n ]\n\nExample of batch permutation on a 3-D tensor with batch size 4:\n X = [\n [[1, 5, 2], [3, 4, 6, 0]],\n [[4, 3, 3], [5, 2, 3, 1]],\n [[2, 2, 3], [6, 0, 0, 1]],\n [[0, 0, 1], [1, 2, 2, 3]]\n ]\n indices = [2, 0, 1, 3]\n Y = [\n [[2, 2, 3], [6, 0, 0, 1]],\n [[1, 5, 2], [3, 4, 6, 0]],\n [[4, 3, 3], [5, 2, 3, 1]],\n [[0, 0, 1], [1, 2, 2, 3]]\n ]\n", + "inputs": [ + { + "description": "Input tensor, where 1st dimension equals batch size", + "name": "X" + }, + { + "description": "Input indices of batch to permute", + "name": "indices" + } + ], + "outputs": [ + { + "description": "Output permuted tensor", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "BatchPermutationGradient", + "schema": { + "description": null, + "support_level": "default" + } + }, + { + "name": "AliasWithName", + "schema": { + "attributes": [ + { + "description": "name of the aliasing", + "name": "name", + "option": "optional" + }, + { + "description": "weather or not to alias forward or backward", + "name": "is_backward", + "option": "optional" + } + ], + "description": "\nSimilar with AliasOp, storing the alias name as operator argument.\n", + "inputs": [ + { + "description": "Input tensor whose storage will be shared.", + "name": "input" + } + ], + "outputs": [ + { + "description": "Tensor of same shape as input, sharing its storage.", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "RowWiseCounter", + "schema": { + "attributes": [ + { + "description": "Default -1: off", + "name": "counter_halflife", + "option": "optional" + } + ], + "description": "\n Count the number recent update on rows. Exponential decay is\n applied on the counter with decay rate r, such that\n r^{counter_halflife} = 0.5; If counter_halflife is nonpositive,\n this operator is turned off.\n", + "inputs": [ + { + "description": "Iter at last update", + "name": "prev_iter" + }, + { + "description": "update counter", + "name": "update_counter" + }, + { + "description": "Sparse indices", + "name": "indices" + }, + { + "description": "current iteration", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Updated iter at last update", + "name": "output_prev_iter" + }, + { + "description": "Output update counter", + "name": "output_update_counter" + } + ], + "support_level": "default" + } + }, + { + "name": "Fused8BitRowwiseQuantizedHalfScaleBiasToHalfFloat", + "schema": { + "description": "\nDe-quantizes the result of the\nFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 16-bit float in the second to the last 2 bytes of each\nrow, followed by the bias as a 16-bit float in the next 2 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_output" + } + ], + "support_level": "default" + } + }, + { + "name": "FloatToFused8BitRowwiseQuantizedHalfScaleBias", + "schema": { + "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 4 bytes\nof each row in the output matrix are a half float storing the scale\nfollowed by another half float containing the scale.)\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "HalfFloatToFused8BitRowwiseQuantizedHalfScaleBias", + "schema": { + "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 4 bytes\nof each row in the output matrix are a float storing the scale\nfollowed by another float containing the scale.)\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Fused8BitRowwiseQuantizedHalfScaleBiasToFloat", + "schema": { + "description": "\nDe-quantizes the result of the\nFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 16-bit float in the second to the last 2 bytes of each\nrow, followed by the bias as a 16-bit float in the next 2 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_output" + } + ], + "support_level": "default" + } + }, + { + "name": "AtomicFetchAdd64", + "schema": { + "description": "\nLike, AtomicFetchAdd but with int64_t scalar tensors,\nperforms an atomic fetch add\nby mutating the first argument and adding it to the second input\nargument. Returns the updated integer and the value prior to the update.\n", + "inputs": [ + { + "description": "Blob containing to a unique_ptr", + "name": "mutex_ptr" + }, + { + "description": "Value to be mutated after the sum.", + "name": "mut_value" + }, + { + "description": "Value to add to the first operand.", + "name": "increment" + } + ], + "outputs": [ + { + "description": "Mutated value after sum. Usually same as input 1.", + "name": "mut_value" + }, + { + "description": "Value of the first operand before sum.", + "name": "fetched_value" + } + ], + "support_level": "default" + } + }, + { + "name": "Quantile", + "schema": { + "attributes": [ + { + "description": "If true (default), apply abs() on the tensor values.", + "name": "abs", + "option": "optional" + }, + { + "description": "multiplicative tolerance of the quantile_value.", + "name": "tol", + "option": "optional" + } + ], + "description": "\n Calculate the quantile for the value in the given list of tensors.\n", + "inputs": [ + { + "description": "*(type: Tensor``)* List of input tensors.", + "name": "X1, X2, ..." + } + ], + "outputs": [ + { + "description": "Value at the given quantile", + "name": "quantile_value" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsSumFused2BitRowwise", + "schema": { + "description": "\nPerforms the same operation as SparseLengthsSum, but operating on\n2-bit rowwise quantized matrices with fused storage (where each row\nstores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "FloatToFused4BitRowwiseQuantized", + "schema": { + "description": "\nApplies 4-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 4-bit number between 0 and\n15. To later de-quantize values, the scale (range / 15) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsWeightedSum2BitRowwiseSparse", + "schema": { + "description": "\nPerforms SparseLengthsWeightedSum, but operating on 2-bit rowwise quantized\nmatrices with fused storage (where each row stores quantized values, and then\n2-byte fp16 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "FloatToFused2BitRowwiseQuantized", + "schema": { + "description": "\nApplies 2-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 2-bit number between 0 and\n3. To later de-quantize values, the scale (range / 3) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsWeightedSum4BitRowwiseSparse", + "schema": { + "description": "\nPerforms SparseLengthsWeightedSum, but operating on 4-bit rowwise quantized\nmatrices with fused storage (where each row stores quantized values, and then\n2-byte fp16 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsMean4BitRowwiseSparse", + "schema": { + "description": "\nPerforms SparseLengthsMean, but operating on 4-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 2-byte\nfp16 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsMean2BitRowwiseSparse", + "schema": { + "description": "\nPerforms SparseLengthsMean, but operating on 2-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 2-byte\nfp16 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsWeightedSumFused2BitRowwise", + "schema": { + "description": "\nPerforms the same operation as SparseLengthsWeightedSum,\nbut operating on 2-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Fused4BitRowwiseQuantizedToFloat", + "schema": { + "description": "\nDe-quantizes the result of the\nFloatToFused4BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsMean8BitRowwiseSparse", + "schema": { + "description": "\nPerforms SparseLengthsMean, but operating on 8-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 4-byte\nfp32 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "HalfToFused4BitRowwiseQuantized", + "schema": { + "description": "\nApplies 4-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 4-bit number between 0 and\n15. To later de-quantize values, the scale (range / 15) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsSumFused4BitRowwise", + "schema": { + "description": "\nPerforms the same operation as SparseLengthsSum, but operating on\n4-bit rowwise quantized matrices with fused storage (where each row\nstores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Fused4BitRowwiseQuantizedToHalf", + "schema": { + "description": "\nDe-quantizes the result of the\nFloatToFused4BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float16 data", + "name": "float16_output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsMeanFused4BitRowwise", + "schema": { + "description": "\nPerforms the same operation as SparseLengthsMean, but\noperating on 4-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "HalfToFused2BitFakeRowwiseQuantized", + "schema": { + "description": "\nApplies 2-bit row-wise fake quantization to a tensor of half floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "FloatToFused2BitFakeRowwiseQuantized", + "schema": { + "description": "\nApplies 2-bit row-wise fake quantization to a tensor of floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "FloatToFused4BitFakeRowwiseQuantized", + "schema": { + "description": "\nApplies 4-bit row-wise fake quantization to a tensor of floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n", + "inputs": [ + { + "description": "Float32 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Fused2BitRowwiseQuantizedToFloat", + "schema": { + "description": "\nDe-quantizes the result of the\nFloatToFused2BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float32 data", + "name": "float_output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsSum4BitRowwiseSparse", + "schema": { + "description": "\nPerforms SparseLengthsSum, but operating on 4-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 2-byte\nfp16 scale and 2-byte fp16 bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "HalfToFused2BitRowwiseQuantized", + "schema": { + "description": "\nApplies 2-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 2-bit number between 0 and\n3. To later de-quantize values, the scale (range / 3) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsMeanFused2BitRowwise", + "schema": { + "description": "\nPerforms the same operation as SparseLengthsMean, but\noperating on 2-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsWeightedSumFused4BitRowwise", + "schema": { + "description": "\nPerforms the same operation as SparseLengthsWeightedSum,\nbut operating on 4-bit rowwise quantized matrices with fused storage\n(where each row stores quantized values, and then 2-byte fp16 scale and bias).\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsSum8BitRowwiseSparse", + "schema": { + "description": "\nPerforms SparseLengthsSum, but operating on 8-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 4-byte\nfp32 scale and 4-byte fp32 bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsSum2BitRowwiseSparse", + "schema": { + "description": "\nPerforms SparseLengthsSum, but operating on 2-bit rowwise quantized matrices\nwith fused storage (where each row stores quantized values, and then 2-byte\nfp16 scale and 2-byte fp16 bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused2BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "HalfToFused4BitFakeRowwiseQuantized", + "schema": { + "description": "\nApplies 4-bit row-wise fake quantization to a tensor of half floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n", + "inputs": [ + { + "description": "Float16 input data", + "name": "input" + } + ], + "outputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsWeightedSum8BitRowwiseSparse", + "schema": { + "description": "\nPerforms SparseLengthsWeightedSum, but operating on 8-bit rowwise quantized\nmatrices with fused storage (where each row stores quantized values, and then\n4-byte fp32 scale and bias), and where rows are pruned.\n", + "inputs": [ + { + "description": "uint8 tensor obtained with operator FloatToFused4BitRowwiseQuantized", + "name": "DATA" + }, + { + "description": "Integer vector containing indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of DATA", + "name": "LENGTHS" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction", + "name": "WEIGHTS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + } + ], + "outputs": [ + { + "description": "output", + "name": "output" + } + ], + "support_level": "default" + } + }, + { + "name": "Fused2BitRowwiseQuantizedToHalf", + "schema": { + "description": "\nDe-quantizes the result of the\nFloatToFused2BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n", + "inputs": [ + { + "description": "Fused scale, bias and quantized data", + "name": "scale_bias_quantized_input" + } + ], + "outputs": [ + { + "description": "Float16 data", + "name": "float16_output" + } + ], + "support_level": "default" + } + }, + { + "name": "WeightScale", + "schema": { + "attributes": [ + { + "description": "Every iteration number to do weight scaling", + "name": "stepsize", + "option": "optional" + }, + { + "description": "After iter passes this bound, do not perform the weight rescaling", + "name": "upper_bound_iter", + "option": "optional" + }, + { + "description": "The multiplicative factor applied to weights.", + "name": "scale", + "option": "optional" + } + ], + "description": "\nEvery `stepsize` iterations, multiply the weights by a constant `scale`:\n nw = w * scale\n", + "inputs": [ + { + "description": "Current weights", + "name": "w" + }, + { + "description": "Training Iteration", + "name": "iter" + } + ], + "outputs": [ + { + "description": "Updated weights", + "name": "nw" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseAdagradFusedWithSparseLengthsSumGradient", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\nFused operator of\nSparseLengthsIndicesInGradientSumGradient (gradient of SparseLengthsSum) +\nSparseAdagrad.\n\nGiven inputs (param, moment, indices, grad, lr), runs the sparse AdaGrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case. Additional input (lengths) is for fused\nSparseLengthsIndicesInGradientSumGradient operator.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\nApproximately fused operator of\nSparseLengthsIndicesInGradientWeightedSumWithMainInputGradient\n(gradient of SparseLengthsWeightedSum) + SparseAdagrad, where weights are\npositional weights computed with LengthsRangeFill + Gather pattern.\n\nGiven inputs (param, moment, indices, grad, lr), runs the sparse AdaGrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case.\nThere's race condition w.r.t. ordering between reading params and writing to\nparam, hence the name Approx.\nThere're auxiliary inputs (aux_param) for which gradient is computed and\nreturns (aux_grad).\nYet additional input (lengths) is for fused\nSparseLengthsIndicesInGradientWeightedSumWithMainInputGradient operator.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Auxiliary parameters to be updated", + "name": "aux_param" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + }, + { + "description": "Auxiliary gradients", + "name": "aux_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradient", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\nFused operator of SparseLengthsIndicesInGradientWeightedSumWithMainInputGradient\n(gradient of SparseLengthsWeightedSum) + RowWiseSparseAdagrad, where weights are\npositional weights computed with LengthsRangeFill + Gather pattern.\n\nGiven inputs (param, moment, indices, grad, lr), runs the row-wise sparse\nAdaGrad update on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case.\nThere're auxiliary inputs (aux_param) for which gradient is computed and\nreturns (aux_grad).\nYet additional input (lengths) is for fused SparseLengthsWeightedSumGradient\noperator.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Auxiliary parameters to be updated", + "name": "aux_param" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + }, + { + "description": "Auxiliary gradient", + "name": "aux_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "RowWiseSparseAdagradFusedWithSparseLengthsSumGradient", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\nFused operator of\nSparseLengthsIndicesInGradientSumGradient (gradient of SparseLengthsSum) +\nRowWiseSparseAdagrad.\n\nGiven inputs (param, moment, indices, grad, lr), runs the row-wise sparse\nAdaGrad update on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case. Additional input (lengths) is for fused\nSparseLengthsSumGradient operator.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseAdagradFusedWithSparseLengthsWeightedSumGradient", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\nFused operator of SparseLengthsIndicesInGradientWeightedSumWithMainInputGradient\n(gradient of SparseLengthsWeightedSum) + SparseAdagrad, where weights are\npositional weights computed with LengthsRangeFill + Gather pattern.\n\nGiven inputs (param, moment, indices, grad, lr), runs the sparse AdaGrad\nupdate on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case.\nThere're auxiliary inputs (aux_param) for which gradient is computed\nand returns (aux_grad).\nYet additional input (lengths) is for fused\nSparseLengthsIndicesInGradientWeightedSumWithMainInputGradient operator.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Auxiliary parameters to be updated", + "name": "aux_param" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + }, + { + "description": "Auxiliary gradient", + "name": "aux_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox", + "schema": { + "attributes": [ + { + "description": "Default 1e-5", + "name": "epsilon", + "option": "optional" + } + ], + "description": "\n\nApproximately fused operator of\nSparseLengthsIndicesInGradientWeightedSumWithMainInputGradient\n(gradient of SparseLengthsWeightedSum) + RowWiseSparseAdagrad, where weights are\npositional weights computed with LengthsRangeFill + Gather pattern.\n\nGiven inputs (param, moment, indices, grad, lr), runs the row-wise sparse\nAdaGrad update on (param, grad, moment[indices], lr), and returns (new_param,\nnew_moment) as in the dense case.\nThere's race condition w.r.t. ordering between reading params and writing to\nparam, hence the name Approx.\nThere're auxiliary inputs (aux_param) for which gradient is computed\nand returns (aux_grad).\nYet additional input (lengths) is for fused SparseLengthsWeightedSumGradient\noperator.\n\n", + "inputs": [ + { + "description": "Parameters to be updated", + "name": "param" + }, + { + "description": "Moment history", + "name": "moment" + }, + { + "description": "Auxiliary parameters to be updated", + "name": "aux_param" + }, + { + "description": "Integer vector containing indices of the first dimension of param for the slices that are being updated", + "name": "indices" + }, + { + "description": "Gradient computed", + "name": "grad" + }, + { + "description": "learning rate", + "name": "lr" + }, + { + "description": "Non negative vector with sum of elements equal to indices length", + "name": "lengths" + } + ], + "outputs": [ + { + "description": "Updated parameters", + "name": "output_param" + }, + { + "description": "Updated moment", + "name": "output_moment" + }, + { + "description": "Auxiliary gradient", + "name": "aux_grad" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseLengthsSumSparseLookup", + "schema": { + "description": "\nThis op converts compressed indices of SparseLengthsSum*Sparse to\nuncompressed indices of SparseLengthsSum*. For compressed indices that maps\nto -1. It means it will correspond to a zero row in the uncompressed data.\nTherefore we will remove this indices and adjust the lengths.\n", + "inputs": [ + { + "description": "Integer vector containing compressed indices of the first dimension of DATA for the slices that are being aggregated", + "name": "INDICES" + }, + { + "description": "Vector with the same sum of elements as the first dimension of INDICES", + "name": "LENGTHS" + }, + { + "description": "Integer vector mapping uncompressed indices to compressed indices", + "name": "COMPRESSED_INDICES_MAPPING" + }, + { + "description": "Vector of weights to scale rows of DATA with before reduction. Same size as INDICES.", + "name": "WEIGHTS" + } + ], + "outputs": [ + { + "description": "Uncompressed indices", + "name": "output_indices" + }, + { + "description": "Adjusted lengths", + "name": "output_lengths" + }, + { + "description": "Adjusted weights", + "name": "output_weights" + } + ], + "support_level": "default" + } + }, + { + "name": "Storm", + "schema": { + "attributes": [ + { + "description": "Momentum hyperparameter, c in the original paper.", + "name": "momentum", + "option": "optional" + }, + { + "description": "denominator in adaptive learning rate, w in the original paper.", + "name": "beta", + "option": "optional" + } + ], + "description": "\n\nComputes the STORM (https://arxiv.org/abs/1905.10018) update for an input\ngradient and accumulated history of gradients. Concretely, given inputs\n(param, moment, grad_sq_sum, grad, lr), computes:\n\n new_grad_sq_sum = grad_sq_sum + norm(grad)^2\n effective_lr = lr / (beta + new_grad_sq_sum)^1/3\n alpha = momentum * square(effective_lr)\n new_moment = grad + (1 - alpha) * (moment - grad)\n new_param = param + effective_lr * new_moment\n\nand returns (new_param, new_moment, new_grad_sq_sum).\n\nNote that due to caffe2 limitation, it is difficult to re-calculate gradient\nin the previous iteration using the current example. We simplied calculation\nfor new_moment by using the gradient from the current iteration.\n\n", + "inputs": [ + { + "description": "Parameters to be updated.", + "name": "param" + }, + { + "description": "Moment history.", + "name": "moment" + }, + { + "description": "Sum of observed squared gradients.", + "name": "grad_sq_sum" + }, + { + "description": "Gradients computed.", + "name": "grad" + }, + { + "description": "Learning rate, k in the original paper.", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters.", + "name": "output_param" + }, + { + "description": "Updated moment.", + "name": "output_moment" + }, + { + "description": "Updated sum of squared gradients.", + "name": "output_grad_sq_sum" + } + ], + "support_level": "default" + } + }, + { + "name": "SparseStorm", + "schema": { + "attributes": [ + { + "description": "Momentum hyperparameter, c in the original paper.", + "name": "momentum", + "option": "optional" + }, + { + "description": "denominator in adaptive learning rate, w in the original paper.", + "name": "beta", + "option": "optional" + } + ], + "description": "\n\nThis operator implement the STORM (https://arxiv.org/abs/1905.10018)\noptimization algorithm. Given inputs (param, moment, grad_sq_sum, grad,\nindices, lr), computes the dense STORM update on (param, moment[indices],\ngrad_sq_sum, grad, lr), and returns (new_param, new_moment, new_grad_sq_sum)\nas in the dense case.\n", + "inputs": [ + { + "description": "Parameters to be updated.", + "name": "param" + }, + { + "description": "Moment history.", + "name": "moment" + }, + { + "description": "Sum of observed squared gradients.", + "name": "grad_sq_sum" + }, + { + "description": "Gradients computed.", + "name": "grad" + }, + { + "description": "Sparse indices.", + "name": "indices" + }, + { + "description": "Learning rate, k in the original paper.", + "name": "lr" + } + ], + "outputs": [ + { + "description": "Updated parameters.", + "name": "output_param" + }, + { + "description": "Updated moment.", + "name": "output_moment" + }, + { + "description": "Updated sum of squared gradients.", + "name": "output_grad_sq_sum" + } + ], + "support_level": "default" + } + }, + { + "name": "FbGemmPackTranspose", + "schema": { + "description": "Prepack weight for fbgemm", + "inputs": [ + { + "description": "col major format weight matrix", + "name": "X" + } + ], + "outputs": [ + { + "description": "Block col major packed format weight matrix", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "FbGemmPack", + "schema": { + "description": "Prepack weight for fbgemm", + "inputs": [ + { + "description": "row major format weight matrix", + "name": "X" + } + ], + "outputs": [ + { + "description": "Block row major packed format weight matrix", + "name": "Y" + } + ], + "support_level": "default" + } + }, + { + "name": "FbFCPacked", + "schema": { + "description": "Same as FC,\n but the weight is prepacked as a fbgemm::PackedGemmMatrixFP16", + "support_level": "default" + } + } +] diff --git a/frontend/packages/core/public/netron/caffe2-proto.js b/frontend/packages/core/public/netron/caffe2-proto.js new file mode 100644 index 00000000..d2c18a4d --- /dev/null +++ b/frontend/packages/core/public/netron/caffe2-proto.js @@ -0,0 +1,2253 @@ +/*eslint-disable block-scoped-var, id-length, no-control-regex, no-magic-numbers, no-prototype-builtins, no-redeclare, no-shadow, no-var, sort-vars*/ +(function($protobuf) { + "use strict"; + + var $Reader = $protobuf.Reader, $util = $protobuf.util; + + var $root = $protobuf.roots.caffe2 || ($protobuf.roots.caffe2 = {}); + + $root.caffe2 = (function() { + + var caffe2 = {}; + + caffe2.ExternalDataProto = (function() { + + function ExternalDataProto(properties) { + this.strides = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ExternalDataProto.prototype.source_type = 0; + ExternalDataProto.prototype.record_id = ""; + ExternalDataProto.prototype.record_size = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + ExternalDataProto.prototype.offset = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ExternalDataProto.prototype.strides = $util.emptyArray; + + ExternalDataProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.ExternalDataProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.source_type = reader.int32(); + break; + case 2: + message.record_id = reader.string(); + break; + case 5: + message.record_size = reader.uint64(); + break; + case 3: + message.offset = reader.int64(); + break; + case 4: + if (!(message.strides && message.strides.length)) + message.strides = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.strides.push(reader.int64()); + } else + message.strides.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ExternalDataProto.decodeText = function decodeText(reader) { + var message = new $root.caffe2.ExternalDataProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "source_type": + message.source_type = reader.enum($root.caffe2.ExternalDataProto.SourceType); + break; + case "record_id": + message.record_id = reader.string(); + break; + case "record_size": + message.record_size = reader.uint64(); + break; + case "offset": + message.offset = reader.int64(); + break; + case "strides": + if (!(message.strides && message.strides.length)) + message.strides = []; + if (reader.first()) + while (!reader.last()) { + message.strides.push(reader.int64()); + reader.next(); + } + else + message.strides.push(reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + ExternalDataProto.SourceType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "INLINE_CONTAINER"] = 0; + values[valuesById[1] = "SIMPLE_FILE"] = 1; + return values; + })(); + + return ExternalDataProto; + })(); + + caffe2.TensorProto = (function() { + + function TensorProto(properties) { + this.dims = []; + this.float_data = []; + this.int32_data = []; + this.string_data = []; + this.double_data = []; + this.int64_data = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorProto.prototype.dims = $util.emptyArray; + TensorProto.prototype.data_type = 1; + TensorProto.prototype.storage_type = 1; + TensorProto.prototype.float_data = $util.emptyArray; + TensorProto.prototype.int32_data = $util.emptyArray; + TensorProto.prototype.byte_data = $util.newBuffer([]); + TensorProto.prototype.string_data = $util.emptyArray; + TensorProto.prototype.double_data = $util.emptyArray; + TensorProto.prototype.int64_data = $util.emptyArray; + TensorProto.prototype.raw_data = $util.newBuffer([]); + TensorProto.prototype.external_data = null; + TensorProto.prototype.name = ""; + TensorProto.prototype.device_detail = null; + TensorProto.prototype.segment = null; + + TensorProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.TensorProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.dims && message.dims.length)) + message.dims = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dims.push(reader.int64()); + } else + message.dims.push(reader.int64()); + break; + case 2: + message.data_type = reader.int32(); + break; + case 12: + message.storage_type = reader.int32(); + break; + case 3: + if (!(message.float_data && message.float_data.length)) + message.float_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.float_data.push(reader.float()); + } else + message.float_data.push(reader.float()); + break; + case 4: + if (!(message.int32_data && message.int32_data.length)) + message.int32_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.int32_data.push(reader.int32()); + } else + message.int32_data.push(reader.int32()); + break; + case 5: + message.byte_data = reader.bytes(); + break; + case 6: + if (!(message.string_data && message.string_data.length)) + message.string_data = []; + message.string_data.push(reader.bytes()); + break; + case 9: + if (!(message.double_data && message.double_data.length)) + message.double_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.double_data.push(reader.double()); + } else + message.double_data.push(reader.double()); + break; + case 10: + if (!(message.int64_data && message.int64_data.length)) + message.int64_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.int64_data.push(reader.int64()); + } else + message.int64_data.push(reader.int64()); + break; + case 13: + message.raw_data = reader.bytes(); + break; + case 14: + message.external_data = $root.caffe2.ExternalDataProto.decode(reader, reader.uint32()); + break; + case 7: + message.name = reader.string(); + break; + case 8: + message.device_detail = $root.caffe2.DeviceOption.decode(reader, reader.uint32()); + break; + case 11: + message.segment = $root.caffe2.TensorProto.Segment.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorProto.decodeText = function decodeText(reader) { + var message = new $root.caffe2.TensorProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dims": + if (!(message.dims && message.dims.length)) + message.dims = []; + if (reader.first()) + while (!reader.last()) { + message.dims.push(reader.int64()); + reader.next(); + } + else + message.dims.push(reader.int64()); + break; + case "data_type": + message.data_type = reader.enum($root.caffe2.TensorProto.DataType); + break; + case "storage_type": + message.storage_type = reader.enum($root.caffe2.TensorProto.StorageType); + break; + case "float_data": + if (!(message.float_data && message.float_data.length)) + message.float_data = []; + if (reader.first()) + while (!reader.last()) { + message.float_data.push(reader.float()); + reader.next(); + } + else + message.float_data.push(reader.float()); + break; + case "int32_data": + if (!(message.int32_data && message.int32_data.length)) + message.int32_data = []; + if (reader.first()) + while (!reader.last()) { + message.int32_data.push(reader.int32()); + reader.next(); + } + else + message.int32_data.push(reader.int32()); + break; + case "byte_data": + message.byte_data = reader.bytes(); + break; + case "string_data": + if (!(message.string_data && message.string_data.length)) + message.string_data = []; + if (reader.first()) + while (!reader.last()) { + message.string_data.push(reader.bytes()); + reader.next(); + } + else + message.string_data.push(reader.bytes()); + break; + case "double_data": + if (!(message.double_data && message.double_data.length)) + message.double_data = []; + if (reader.first()) + while (!reader.last()) { + message.double_data.push(reader.double()); + reader.next(); + } + else + message.double_data.push(reader.double()); + break; + case "int64_data": + if (!(message.int64_data && message.int64_data.length)) + message.int64_data = []; + if (reader.first()) + while (!reader.last()) { + message.int64_data.push(reader.int64()); + reader.next(); + } + else + message.int64_data.push(reader.int64()); + break; + case "raw_data": + message.raw_data = reader.bytes(); + break; + case "external_data": + message.external_data = $root.caffe2.ExternalDataProto.decodeText(reader, true); + break; + case "name": + message.name = reader.string(); + break; + case "device_detail": + message.device_detail = $root.caffe2.DeviceOption.decodeText(reader, true); + break; + case "segment": + message.segment = $root.caffe2.TensorProto.Segment.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TensorProto.DataType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNDEFINED"] = 0; + values[valuesById[1] = "FLOAT"] = 1; + values[valuesById[2] = "INT32"] = 2; + values[valuesById[3] = "BYTE"] = 3; + values[valuesById[4] = "STRING"] = 4; + values[valuesById[5] = "BOOL"] = 5; + values[valuesById[6] = "UINT8"] = 6; + values[valuesById[7] = "INT8"] = 7; + values[valuesById[8] = "UINT16"] = 8; + values[valuesById[9] = "INT16"] = 9; + values[valuesById[10] = "INT64"] = 10; + values[valuesById[12] = "FLOAT16"] = 12; + values[valuesById[13] = "DOUBLE"] = 13; + values[valuesById[14] = "ZERO_COLLISION_HASH"] = 14; + return values; + })(); + + TensorProto.StorageType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[1] = "TYPED"] = 1; + values[valuesById[2] = "RAW"] = 2; + values[valuesById[3] = "EXTERNAL"] = 3; + values[valuesById[4] = "NO_CONTENT"] = 4; + return values; + })(); + + TensorProto.Segment = (function() { + + function Segment(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Segment.prototype.begin = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Segment.prototype.end = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + Segment.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.TensorProto.Segment(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.begin = reader.int64(); + break; + case 2: + message.end = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("begin")) + throw $util.ProtocolError("missing required 'begin'", { instance: message }); + if (!message.hasOwnProperty("end")) + throw $util.ProtocolError("missing required 'end'", { instance: message }); + return message; + }; + + Segment.decodeText = function decodeText(reader) { + var message = new $root.caffe2.TensorProto.Segment(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "begin": + message.begin = reader.int64(); + break; + case "end": + message.end = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!message.hasOwnProperty("begin")) + throw $util.ProtocolError("missing required 'begin'", { instance: message }); + if (!message.hasOwnProperty("end")) + throw $util.ProtocolError("missing required 'end'", { instance: message }); + return message; + }; + + return Segment; + })(); + + return TensorProto; + })(); + + caffe2.QTensorProto = (function() { + + function QTensorProto(properties) { + this.dims = []; + this.data = []; + this.scales = []; + this.biases = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + QTensorProto.prototype.dims = $util.emptyArray; + QTensorProto.prototype.precision = 0; + QTensorProto.prototype.scale = 0; + QTensorProto.prototype.bias = 0; + QTensorProto.prototype.is_signed = false; + QTensorProto.prototype.data = $util.emptyArray; + QTensorProto.prototype.name = ""; + QTensorProto.prototype.data_type = 2; + QTensorProto.prototype.scales = $util.emptyArray; + QTensorProto.prototype.biases = $util.emptyArray; + QTensorProto.prototype.axis = 0; + QTensorProto.prototype.is_multiparam = false; + + QTensorProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.QTensorProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.dims && message.dims.length)) + message.dims = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dims.push(reader.int64()); + } else + message.dims.push(reader.int64()); + break; + case 2: + message.precision = reader.int32(); + break; + case 3: + message.scale = reader.double(); + break; + case 4: + message.bias = reader.double(); + break; + case 5: + message.is_signed = reader.bool(); + break; + case 6: + if (!(message.data && message.data.length)) + message.data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.data.push(reader.int32()); + } else + message.data.push(reader.int32()); + break; + case 7: + message.name = reader.string(); + break; + case 8: + message.data_type = reader.int32(); + break; + case 9: + if (!(message.scales && message.scales.length)) + message.scales = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.scales.push(reader.double()); + } else + message.scales.push(reader.double()); + break; + case 10: + if (!(message.biases && message.biases.length)) + message.biases = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.biases.push(reader.double()); + } else + message.biases.push(reader.double()); + break; + case 11: + message.axis = reader.int32(); + break; + case 12: + message.is_multiparam = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("precision")) + throw $util.ProtocolError("missing required 'precision'", { instance: message }); + if (!message.hasOwnProperty("scale")) + throw $util.ProtocolError("missing required 'scale'", { instance: message }); + if (!message.hasOwnProperty("bias")) + throw $util.ProtocolError("missing required 'bias'", { instance: message }); + if (!message.hasOwnProperty("is_signed")) + throw $util.ProtocolError("missing required 'is_signed'", { instance: message }); + return message; + }; + + QTensorProto.decodeText = function decodeText(reader) { + var message = new $root.caffe2.QTensorProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dims": + if (!(message.dims && message.dims.length)) + message.dims = []; + if (reader.first()) + while (!reader.last()) { + message.dims.push(reader.int64()); + reader.next(); + } + else + message.dims.push(reader.int64()); + break; + case "precision": + message.precision = reader.int32(); + break; + case "scale": + message.scale = reader.double(); + break; + case "bias": + message.bias = reader.double(); + break; + case "is_signed": + message.is_signed = reader.bool(); + break; + case "data": + if (!(message.data && message.data.length)) + message.data = []; + if (reader.first()) + while (!reader.last()) { + message.data.push(reader.int32()); + reader.next(); + } + else + message.data.push(reader.int32()); + break; + case "name": + message.name = reader.string(); + break; + case "data_type": + message.data_type = reader.enum($root.caffe2.TensorProto.DataType); + break; + case "scales": + if (!(message.scales && message.scales.length)) + message.scales = []; + if (reader.first()) + while (!reader.last()) { + message.scales.push(reader.double()); + reader.next(); + } + else + message.scales.push(reader.double()); + break; + case "biases": + if (!(message.biases && message.biases.length)) + message.biases = []; + if (reader.first()) + while (!reader.last()) { + message.biases.push(reader.double()); + reader.next(); + } + else + message.biases.push(reader.double()); + break; + case "axis": + message.axis = reader.int32(); + break; + case "is_multiparam": + message.is_multiparam = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!message.hasOwnProperty("precision")) + throw $util.ProtocolError("missing required 'precision'", { instance: message }); + if (!message.hasOwnProperty("scale")) + throw $util.ProtocolError("missing required 'scale'", { instance: message }); + if (!message.hasOwnProperty("bias")) + throw $util.ProtocolError("missing required 'bias'", { instance: message }); + if (!message.hasOwnProperty("is_signed")) + throw $util.ProtocolError("missing required 'is_signed'", { instance: message }); + return message; + }; + + return QTensorProto; + })(); + + caffe2.TensorProtos = (function() { + + function TensorProtos(properties) { + this.protos = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorProtos.prototype.protos = $util.emptyArray; + + TensorProtos.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.TensorProtos(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.protos && message.protos.length)) + message.protos = []; + message.protos.push($root.caffe2.TensorProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorProtos.decodeText = function decodeText(reader) { + var message = new $root.caffe2.TensorProtos(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "protos": + if (!(message.protos && message.protos.length)) + message.protos = []; + message.protos.push($root.caffe2.TensorProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return TensorProtos; + })(); + + caffe2.TensorShape = (function() { + + function TensorShape(properties) { + this.dims = []; + this.unknown_dims = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorShape.prototype.dims = $util.emptyArray; + TensorShape.prototype.data_type = 1; + TensorShape.prototype.unknown_dims = $util.emptyArray; + TensorShape.prototype.unknown_shape = false; + TensorShape.prototype.name = ""; + + TensorShape.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.TensorShape(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.dims && message.dims.length)) + message.dims = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dims.push(reader.int64()); + } else + message.dims.push(reader.int64()); + break; + case 2: + message.data_type = reader.int32(); + break; + case 3: + if (!(message.unknown_dims && message.unknown_dims.length)) + message.unknown_dims = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.unknown_dims.push(reader.int32()); + } else + message.unknown_dims.push(reader.int32()); + break; + case 4: + message.unknown_shape = reader.bool(); + break; + case 5: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorShape.decodeText = function decodeText(reader) { + var message = new $root.caffe2.TensorShape(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dims": + if (!(message.dims && message.dims.length)) + message.dims = []; + if (reader.first()) + while (!reader.last()) { + message.dims.push(reader.int64()); + reader.next(); + } + else + message.dims.push(reader.int64()); + break; + case "data_type": + message.data_type = reader.enum($root.caffe2.TensorProto.DataType); + break; + case "unknown_dims": + if (!(message.unknown_dims && message.unknown_dims.length)) + message.unknown_dims = []; + if (reader.first()) + while (!reader.last()) { + message.unknown_dims.push(reader.int32()); + reader.next(); + } + else + message.unknown_dims.push(reader.int32()); + break; + case "unknown_shape": + message.unknown_shape = reader.bool(); + break; + case "name": + message.name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return TensorShape; + })(); + + caffe2.TensorShapes = (function() { + + function TensorShapes(properties) { + this.shapes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorShapes.prototype.shapes = $util.emptyArray; + + TensorShapes.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.TensorShapes(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shapes && message.shapes.length)) + message.shapes = []; + message.shapes.push($root.caffe2.TensorShape.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorShapes.decodeText = function decodeText(reader) { + var message = new $root.caffe2.TensorShapes(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "shapes": + if (!(message.shapes && message.shapes.length)) + message.shapes = []; + message.shapes.push($root.caffe2.TensorShape.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return TensorShapes; + })(); + + caffe2.TensorBoundShape = (function() { + + function TensorBoundShape(properties) { + this.dim_type = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorBoundShape.prototype.shape = null; + TensorBoundShape.prototype.dim_type = $util.emptyArray; + TensorBoundShape.prototype.name = ""; + + TensorBoundShape.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.TensorBoundShape(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shape = $root.caffe2.TensorShape.decode(reader, reader.uint32()); + break; + case 2: + if (!(message.dim_type && message.dim_type.length)) + message.dim_type = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dim_type.push(reader.int32()); + } else + message.dim_type.push(reader.int32()); + break; + case 3: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorBoundShape.decodeText = function decodeText(reader) { + var message = new $root.caffe2.TensorBoundShape(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "shape": + message.shape = $root.caffe2.TensorShape.decodeText(reader, true); + break; + case "dim_type": + if (!(message.dim_type && message.dim_type.length)) + message.dim_type = []; + if (reader.first()) + while (!reader.last()) { + message.dim_type.push(reader.enum($root.caffe2.TensorBoundShape.DimType)); + reader.next(); + } + else + message.dim_type.push(reader.enum($root.caffe2.TensorBoundShape.DimType)); + break; + case "name": + message.name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TensorBoundShape.DimType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNKNOWN"] = 0; + values[valuesById[1] = "CONSTANT"] = 1; + values[valuesById[2] = "BATCH"] = 2; + values[valuesById[3] = "BATCH_OF_FEATURE_MAX"] = 3; + values[valuesById[4] = "BATCH_OF_FEATURE_MAX_DEFAULT"] = 4; + values[valuesById[5] = "FEATURE_MAX"] = 5; + values[valuesById[6] = "FEATURE_MAX_DEFAULT"] = 6; + return values; + })(); + + return TensorBoundShape; + })(); + + caffe2.TensorBoundShapes = (function() { + + function TensorBoundShapes(properties) { + this.shapes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorBoundShapes.prototype.shapes = $util.emptyArray; + TensorBoundShapes.prototype.max_batch_size = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + TensorBoundShapes.prototype.max_feature_len = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + TensorBoundShapes.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.TensorBoundShapes(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shapes && message.shapes.length)) + message.shapes = []; + message.shapes.push($root.caffe2.TensorBoundShape.decode(reader, reader.uint32())); + break; + case 2: + message.max_batch_size = reader.int64(); + break; + case 3: + message.max_feature_len = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorBoundShapes.decodeText = function decodeText(reader) { + var message = new $root.caffe2.TensorBoundShapes(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "shapes": + if (!(message.shapes && message.shapes.length)) + message.shapes = []; + message.shapes.push($root.caffe2.TensorBoundShape.decodeText(reader, true)); + break; + case "max_batch_size": + message.max_batch_size = reader.int64(); + break; + case "max_feature_len": + message.max_feature_len = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return TensorBoundShapes; + })(); + + caffe2.Argument = (function() { + + function Argument(properties) { + this.floats = []; + this.ints = []; + this.strings = []; + this.tensors = []; + this.nets = []; + this.qtensors = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Argument.prototype.name = ""; + Argument.prototype.f = 0; + Argument.prototype.i = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Argument.prototype.s = $util.newBuffer([]); + Argument.prototype.t = null; + Argument.prototype.n = null; + Argument.prototype.floats = $util.emptyArray; + Argument.prototype.ints = $util.emptyArray; + Argument.prototype.strings = $util.emptyArray; + Argument.prototype.tensors = $util.emptyArray; + Argument.prototype.nets = $util.emptyArray; + Argument.prototype.qtensors = $util.emptyArray; + + Argument.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.Argument(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.f = reader.float(); + break; + case 3: + message.i = reader.int64(); + break; + case 4: + message.s = reader.bytes(); + break; + case 10: + message.t = $root.caffe2.TensorProto.decode(reader, reader.uint32()); + break; + case 8: + message.n = $root.caffe2.NetDef.decode(reader, reader.uint32()); + break; + case 5: + if (!(message.floats && message.floats.length)) { + if (message.floats != -1) { + message.floats = []; + message.floatsCount = 0; + } + } + if (message.floatsCount < 1000000) { + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.floats.push(reader.float()); + message.floatsCount++; + } + } + else { + message.floats.push(reader.float()); + message.floatsCount++; + } + } + else { + message.floats = -1; + if ((tag & 7) === 2) { + var endx = reader.uint32() + reader.pos; + while (reader.pos < endx) + reader.float(); + } + else { + reader.float(); + } + } + break; + case 6: + if (!(message.ints && message.ints.length)) + message.ints = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.ints.push(reader.int64()); + } else + message.ints.push(reader.int64()); + break; + case 7: + if (!(message.strings && message.strings.length)) + message.strings = []; + message.strings.push(reader.bytes()); + break; + case 11: + if (!(message.tensors && message.tensors.length)) + message.tensors = []; + message.tensors.push($root.caffe2.TensorProto.decode(reader, reader.uint32())); + break; + case 9: + if (!(message.nets && message.nets.length)) + message.nets = []; + message.nets.push($root.caffe2.NetDef.decode(reader, reader.uint32())); + break; + case 12: + if (!(message.qtensors && message.qtensors.length)) + message.qtensors = []; + message.qtensors.push($root.caffe2.QTensorProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Argument.decodeText = function decodeText(reader) { + var message = new $root.caffe2.Argument(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "f": + message.f = reader.float(); + break; + case "i": + message.i = reader.int64(); + break; + case "s": + message.s = reader.bytes(); + break; + case "t": + message.t = $root.caffe2.TensorProto.decodeText(reader, true); + break; + case "n": + message.n = $root.caffe2.NetDef.decodeText(reader, true); + break; + case "floats": + if (!(message.floats && message.floats.length)) + message.floats = []; + if (reader.first()) + while (!reader.last()) { + message.floats.push(reader.float()); + reader.next(); + } + else + message.floats.push(reader.float()); + break; + case "ints": + if (!(message.ints && message.ints.length)) + message.ints = []; + if (reader.first()) + while (!reader.last()) { + message.ints.push(reader.int64()); + reader.next(); + } + else + message.ints.push(reader.int64()); + break; + case "strings": + if (!(message.strings && message.strings.length)) + message.strings = []; + if (reader.first()) + while (!reader.last()) { + message.strings.push(reader.bytes()); + reader.next(); + } + else + message.strings.push(reader.bytes()); + break; + case "tensors": + if (!(message.tensors && message.tensors.length)) + message.tensors = []; + message.tensors.push($root.caffe2.TensorProto.decodeText(reader, true)); + break; + case "nets": + if (!(message.nets && message.nets.length)) + message.nets = []; + message.nets.push($root.caffe2.NetDef.decodeText(reader, true)); + break; + case "qtensors": + if (!(message.qtensors && message.qtensors.length)) + message.qtensors = []; + message.qtensors.push($root.caffe2.QTensorProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Argument; + })(); + + caffe2.DeviceTypeProto = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "PROTO_CPU"] = 0; + values[valuesById[1] = "PROTO_CUDA"] = 1; + values[valuesById[2] = "PROTO_MKLDNN"] = 2; + values[valuesById[3] = "PROTO_OPENGL"] = 3; + values[valuesById[4] = "PROTO_OPENCL"] = 4; + values[valuesById[5] = "PROTO_IDEEP"] = 5; + values[valuesById[6] = "PROTO_HIP"] = 6; + values[valuesById[7] = "PROTO_FPGA"] = 7; + values[valuesById[8] = "PROTO_MSNPU"] = 8; + values[valuesById[9] = "PROTO_XLA"] = 9; + values[valuesById[10] = "PROTO_COMPILE_TIME_MAX_DEVICE_TYPES"] = 10; + values[valuesById[20901] = "PROTO_ONLY_FOR_TEST"] = 20901; + return values; + })(); + + caffe2.DeviceOption = (function() { + + function DeviceOption(properties) { + this.extra_info = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DeviceOption.prototype.device_type = 0; + DeviceOption.prototype.device_id = 0; + DeviceOption.prototype.random_seed = 0; + DeviceOption.prototype.node_name = ""; + DeviceOption.prototype.numa_node_id = 0; + DeviceOption.prototype.extra_info = $util.emptyArray; + + DeviceOption.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.DeviceOption(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.device_type = reader.int32(); + break; + case 2: + message.device_id = reader.int32(); + break; + case 3: + message.random_seed = reader.uint32(); + break; + case 4: + message.node_name = reader.string(); + break; + case 5: + message.numa_node_id = reader.int32(); + break; + case 6: + if (!(message.extra_info && message.extra_info.length)) + message.extra_info = []; + message.extra_info.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + DeviceOption.decodeText = function decodeText(reader) { + var message = new $root.caffe2.DeviceOption(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "device_type": + message.device_type = reader.int32(); + break; + case "device_id": + message.device_id = reader.int32(); + break; + case "random_seed": + message.random_seed = reader.uint32(); + break; + case "node_name": + message.node_name = reader.string(); + break; + case "numa_node_id": + message.numa_node_id = reader.int32(); + break; + case "extra_info": + if (!(message.extra_info && message.extra_info.length)) + message.extra_info = []; + if (reader.first()) + while (!reader.last()) { + message.extra_info.push(reader.string()); + reader.next(); + } + else + message.extra_info.push(reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return DeviceOption; + })(); + + caffe2.OperatorDef = (function() { + + function OperatorDef(properties) { + this.input = []; + this.output = []; + this.arg = []; + this.control_input = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OperatorDef.prototype.input = $util.emptyArray; + OperatorDef.prototype.output = $util.emptyArray; + OperatorDef.prototype.name = ""; + OperatorDef.prototype.type = ""; + OperatorDef.prototype.arg = $util.emptyArray; + OperatorDef.prototype.device_option = null; + OperatorDef.prototype.engine = ""; + OperatorDef.prototype.control_input = $util.emptyArray; + OperatorDef.prototype.is_gradient_op = false; + OperatorDef.prototype.debug_info = ""; + OperatorDef.prototype.domain = ""; + OperatorDef.prototype.op_version = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + OperatorDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.OperatorDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.input && message.input.length)) + message.input = []; + message.input.push(reader.string()); + break; + case 2: + if (!(message.output && message.output.length)) + message.output = []; + message.output.push(reader.string()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.type = reader.string(); + break; + case 5: + if (!(message.arg && message.arg.length)) + message.arg = []; + message.arg.push($root.caffe2.Argument.decode(reader, reader.uint32())); + break; + case 6: + message.device_option = $root.caffe2.DeviceOption.decode(reader, reader.uint32()); + break; + case 7: + message.engine = reader.string(); + break; + case 8: + if (!(message.control_input && message.control_input.length)) + message.control_input = []; + message.control_input.push(reader.string()); + break; + case 9: + message.is_gradient_op = reader.bool(); + break; + case 10: + message.debug_info = reader.string(); + break; + case 11: + message.domain = reader.string(); + break; + case 12: + message.op_version = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + OperatorDef.decodeText = function decodeText(reader) { + var message = new $root.caffe2.OperatorDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "input": + if (!(message.input && message.input.length)) + message.input = []; + if (reader.first()) + while (!reader.last()) { + message.input.push(reader.string()); + reader.next(); + } + else + message.input.push(reader.string()); + break; + case "output": + if (!(message.output && message.output.length)) + message.output = []; + if (reader.first()) + while (!reader.last()) { + message.output.push(reader.string()); + reader.next(); + } + else + message.output.push(reader.string()); + break; + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "arg": + if (!(message.arg && message.arg.length)) + message.arg = []; + message.arg.push($root.caffe2.Argument.decodeText(reader, true)); + break; + case "device_option": + message.device_option = $root.caffe2.DeviceOption.decodeText(reader, true); + break; + case "engine": + message.engine = reader.string(); + break; + case "control_input": + if (!(message.control_input && message.control_input.length)) + message.control_input = []; + if (reader.first()) + while (!reader.last()) { + message.control_input.push(reader.string()); + reader.next(); + } + else + message.control_input.push(reader.string()); + break; + case "is_gradient_op": + message.is_gradient_op = reader.bool(); + break; + case "debug_info": + message.debug_info = reader.string(); + break; + case "domain": + message.domain = reader.string(); + break; + case "op_version": + message.op_version = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return OperatorDef; + })(); + + caffe2.MapFieldEntry = (function() { + + function MapFieldEntry(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MapFieldEntry.prototype.key = ""; + MapFieldEntry.prototype.val = ""; + + MapFieldEntry.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.MapFieldEntry(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.val = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("key")) + throw $util.ProtocolError("missing required 'key'", { instance: message }); + if (!message.hasOwnProperty("val")) + throw $util.ProtocolError("missing required 'val'", { instance: message }); + return message; + }; + + MapFieldEntry.decodeText = function decodeText(reader) { + var message = new $root.caffe2.MapFieldEntry(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.string(); + break; + case "val": + message.val = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + if (!message.hasOwnProperty("key")) + throw $util.ProtocolError("missing required 'key'", { instance: message }); + if (!message.hasOwnProperty("val")) + throw $util.ProtocolError("missing required 'val'", { instance: message }); + return message; + }; + + return MapFieldEntry; + })(); + + caffe2.BackendOptions = (function() { + + function BackendOptions(properties) { + this.option = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BackendOptions.prototype.backend_name = ""; + BackendOptions.prototype.option = $util.emptyArray; + + BackendOptions.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.BackendOptions(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.backend_name = reader.string(); + break; + case 2: + if (!(message.option && message.option.length)) + message.option = []; + message.option.push($root.caffe2.MapFieldEntry.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("backend_name")) + throw $util.ProtocolError("missing required 'backend_name'", { instance: message }); + return message; + }; + + BackendOptions.decodeText = function decodeText(reader) { + var message = new $root.caffe2.BackendOptions(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "backend_name": + message.backend_name = reader.string(); + break; + case "option": + if (!(message.option && message.option.length)) + message.option = []; + message.option.push($root.caffe2.MapFieldEntry.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + if (!message.hasOwnProperty("backend_name")) + throw $util.ProtocolError("missing required 'backend_name'", { instance: message }); + return message; + }; + + return BackendOptions; + })(); + + caffe2.PartitionInfo = (function() { + + function PartitionInfo(properties) { + this.device_id = []; + this.backend_options = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PartitionInfo.prototype.name = ""; + PartitionInfo.prototype.device_id = $util.emptyArray; + PartitionInfo.prototype.extra_info = ""; + PartitionInfo.prototype.backend_options = $util.emptyArray; + + PartitionInfo.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.PartitionInfo(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + if (!(message.device_id && message.device_id.length)) + message.device_id = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.device_id.push(reader.int32()); + } else + message.device_id.push(reader.int32()); + break; + case 3: + message.extra_info = reader.string(); + break; + case 4: + if (!(message.backend_options && message.backend_options.length)) + message.backend_options = []; + message.backend_options.push($root.caffe2.BackendOptions.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("name")) + throw $util.ProtocolError("missing required 'name'", { instance: message }); + return message; + }; + + PartitionInfo.decodeText = function decodeText(reader) { + var message = new $root.caffe2.PartitionInfo(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "device_id": + if (!(message.device_id && message.device_id.length)) + message.device_id = []; + if (reader.first()) + while (!reader.last()) { + message.device_id.push(reader.int32()); + reader.next(); + } + else + message.device_id.push(reader.int32()); + break; + case "extra_info": + message.extra_info = reader.string(); + break; + case "backend_options": + if (!(message.backend_options && message.backend_options.length)) + message.backend_options = []; + message.backend_options.push($root.caffe2.BackendOptions.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + if (!message.hasOwnProperty("name")) + throw $util.ProtocolError("missing required 'name'", { instance: message }); + return message; + }; + + return PartitionInfo; + })(); + + caffe2.NetDef = (function() { + + function NetDef(properties) { + this.op = []; + this.arg = []; + this.external_input = []; + this.external_output = []; + this.partition_info = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NetDef.prototype.name = ""; + NetDef.prototype.op = $util.emptyArray; + NetDef.prototype.type = ""; + NetDef.prototype.num_workers = 0; + NetDef.prototype.device_option = null; + NetDef.prototype.arg = $util.emptyArray; + NetDef.prototype.external_input = $util.emptyArray; + NetDef.prototype.external_output = $util.emptyArray; + NetDef.prototype.partition_info = $util.emptyArray; + + NetDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.NetDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + if (!(message.op && message.op.length)) + message.op = []; + message.op.push($root.caffe2.OperatorDef.decode(reader, reader.uint32())); + break; + case 3: + message.type = reader.string(); + break; + case 4: + message.num_workers = reader.int32(); + break; + case 5: + message.device_option = $root.caffe2.DeviceOption.decode(reader, reader.uint32()); + break; + case 6: + if (!(message.arg && message.arg.length)) + message.arg = []; + message.arg.push($root.caffe2.Argument.decode(reader, reader.uint32())); + break; + case 7: + if (!(message.external_input && message.external_input.length)) + message.external_input = []; + message.external_input.push(reader.string()); + break; + case 8: + if (!(message.external_output && message.external_output.length)) + message.external_output = []; + message.external_output.push(reader.string()); + break; + case 9: + if (!(message.partition_info && message.partition_info.length)) + message.partition_info = []; + message.partition_info.push($root.caffe2.PartitionInfo.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NetDef.decodeText = function decodeText(reader) { + var message = new $root.caffe2.NetDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "op": + if (!(message.op && message.op.length)) + message.op = []; + message.op.push($root.caffe2.OperatorDef.decodeText(reader, true)); + break; + case "type": + message.type = reader.string(); + break; + case "num_workers": + message.num_workers = reader.int32(); + break; + case "device_option": + message.device_option = $root.caffe2.DeviceOption.decodeText(reader, true); + break; + case "arg": + if (!(message.arg && message.arg.length)) + message.arg = []; + message.arg.push($root.caffe2.Argument.decodeText(reader, true)); + break; + case "external_input": + if (!(message.external_input && message.external_input.length)) + message.external_input = []; + if (reader.first()) + while (!reader.last()) { + message.external_input.push(reader.string()); + reader.next(); + } + else + message.external_input.push(reader.string()); + break; + case "external_output": + if (!(message.external_output && message.external_output.length)) + message.external_output = []; + if (reader.first()) + while (!reader.last()) { + message.external_output.push(reader.string()); + reader.next(); + } + else + message.external_output.push(reader.string()); + break; + case "partition_info": + if (!(message.partition_info && message.partition_info.length)) + message.partition_info = []; + message.partition_info.push($root.caffe2.PartitionInfo.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return NetDef; + })(); + + caffe2.ExecutionStep = (function() { + + function ExecutionStep(properties) { + this.substep = []; + this.network = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ExecutionStep.prototype.name = ""; + ExecutionStep.prototype.substep = $util.emptyArray; + ExecutionStep.prototype.network = $util.emptyArray; + ExecutionStep.prototype.num_iter = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ExecutionStep.prototype.criteria_network = ""; + ExecutionStep.prototype.report_net = ""; + ExecutionStep.prototype.report_interval = 0; + ExecutionStep.prototype.run_every_ms = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ExecutionStep.prototype.concurrent_substeps = false; + ExecutionStep.prototype.should_stop_blob = ""; + ExecutionStep.prototype.only_once = false; + ExecutionStep.prototype.create_workspace = false; + ExecutionStep.prototype.num_concurrent_instances = 0; + + ExecutionStep.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.ExecutionStep(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + if (!(message.substep && message.substep.length)) + message.substep = []; + message.substep.push($root.caffe2.ExecutionStep.decode(reader, reader.uint32())); + break; + case 3: + if (!(message.network && message.network.length)) + message.network = []; + message.network.push(reader.string()); + break; + case 4: + message.num_iter = reader.int64(); + break; + case 5: + message.criteria_network = reader.string(); + break; + case 7: + message.report_net = reader.string(); + break; + case 8: + message.report_interval = reader.int32(); + break; + case 11: + message.run_every_ms = reader.int64(); + break; + case 6: + message.concurrent_substeps = reader.bool(); + break; + case 9: + message.should_stop_blob = reader.string(); + break; + case 10: + message.only_once = reader.bool(); + break; + case 12: + message.create_workspace = reader.bool(); + break; + case 13: + message.num_concurrent_instances = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ExecutionStep.decodeText = function decodeText(reader) { + var message = new $root.caffe2.ExecutionStep(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "substep": + if (!(message.substep && message.substep.length)) + message.substep = []; + message.substep.push($root.caffe2.ExecutionStep.decodeText(reader, true)); + break; + case "network": + if (!(message.network && message.network.length)) + message.network = []; + if (reader.first()) + while (!reader.last()) { + message.network.push(reader.string()); + reader.next(); + } + else + message.network.push(reader.string()); + break; + case "num_iter": + message.num_iter = reader.int64(); + break; + case "criteria_network": + message.criteria_network = reader.string(); + break; + case "report_net": + message.report_net = reader.string(); + break; + case "report_interval": + message.report_interval = reader.int32(); + break; + case "run_every_ms": + message.run_every_ms = reader.int64(); + break; + case "concurrent_substeps": + message.concurrent_substeps = reader.bool(); + break; + case "should_stop_blob": + message.should_stop_blob = reader.string(); + break; + case "only_once": + message.only_once = reader.bool(); + break; + case "create_workspace": + message.create_workspace = reader.bool(); + break; + case "num_concurrent_instances": + message.num_concurrent_instances = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ExecutionStep; + })(); + + caffe2.PlanDef = (function() { + + function PlanDef(properties) { + this.network = []; + this.execution_step = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PlanDef.prototype.name = ""; + PlanDef.prototype.network = $util.emptyArray; + PlanDef.prototype.execution_step = $util.emptyArray; + + PlanDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.PlanDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + if (!(message.network && message.network.length)) + message.network = []; + message.network.push($root.caffe2.NetDef.decode(reader, reader.uint32())); + break; + case 3: + if (!(message.execution_step && message.execution_step.length)) + message.execution_step = []; + message.execution_step.push($root.caffe2.ExecutionStep.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + PlanDef.decodeText = function decodeText(reader) { + var message = new $root.caffe2.PlanDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "network": + if (!(message.network && message.network.length)) + message.network = []; + message.network.push($root.caffe2.NetDef.decodeText(reader, true)); + break; + case "execution_step": + if (!(message.execution_step && message.execution_step.length)) + message.execution_step = []; + message.execution_step.push($root.caffe2.ExecutionStep.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return PlanDef; + })(); + + caffe2.BlobProto = (function() { + + function BlobProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BlobProto.prototype.name = ""; + BlobProto.prototype.type = ""; + BlobProto.prototype.tensor = null; + BlobProto.prototype.content = $util.newBuffer([]); + BlobProto.prototype.qtensor = null; + BlobProto.prototype.content_num_chunks = 0; + BlobProto.prototype.content_chunk_id = 0; + + BlobProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.BlobProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.tensor = $root.caffe2.TensorProto.decode(reader, reader.uint32()); + break; + case 4: + message.content = reader.bytes(); + break; + case 5: + message.qtensor = $root.caffe2.QTensorProto.decode(reader, reader.uint32()); + break; + case 6: + message.content_num_chunks = reader.int32(); + break; + case 7: + message.content_chunk_id = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BlobProto.decodeText = function decodeText(reader) { + var message = new $root.caffe2.BlobProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "tensor": + message.tensor = $root.caffe2.TensorProto.decodeText(reader, true); + break; + case "content": + message.content = reader.bytes(); + break; + case "qtensor": + message.qtensor = $root.caffe2.QTensorProto.decodeText(reader, true); + break; + case "content_num_chunks": + message.content_num_chunks = reader.int32(); + break; + case "content_chunk_id": + message.content_chunk_id = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return BlobProto; + })(); + + caffe2.DBReaderProto = (function() { + + function DBReaderProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DBReaderProto.prototype.name = ""; + DBReaderProto.prototype.source = ""; + DBReaderProto.prototype.db_type = ""; + DBReaderProto.prototype.key = ""; + + DBReaderProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.caffe2.DBReaderProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.source = reader.string(); + break; + case 3: + message.db_type = reader.string(); + break; + case 4: + message.key = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + DBReaderProto.decodeText = function decodeText(reader) { + var message = new $root.caffe2.DBReaderProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "source": + message.source = reader.string(); + break; + case "db_type": + message.db_type = reader.string(); + break; + case "key": + message.key = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return DBReaderProto; + })(); + + return caffe2; + })(); + + return $root; +})(protobuf); diff --git a/frontend/packages/core/public/netron/caffe2.js b/frontend/packages/core/public/netron/caffe2.js new file mode 100644 index 00000000..ce217d5d --- /dev/null +++ b/frontend/packages/core/public/netron/caffe2.js @@ -0,0 +1,849 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var caffe2 = caffe2 || {}; +var protobuf = protobuf || require('protobufjs'); +var prototxt = prototxt || require('protobufjs/ext/prototxt'); + +caffe2.ModelFactory = class { + + match(context) { + const identifier = context.identifier.toLowerCase(); + const extension = identifier.split('.').pop().toLowerCase(); + if (extension == 'pb') { + if (identifier.endsWith('predict_net.pb') || identifier.endsWith('init_net.pb') || + identifier.startsWith('predict_net') || identifier.startsWith('init_net')) { + return true; + } + const tags = context.tags('pb'); + // ignore input_0.pb, output_0.pb + if (tags.size > 0 && + tags.has(1) && tags.get(1) == 0 && + tags.has(2) && tags.get(2) == 0 && + tags.has(9) && tags.get(9) == 2) { + return false; + } + if (tags.size > 0 && + Array.from(tags.values()).some((v) => v === 5)) { + return false; + } + if (tags.size > 0 && + (!tags.has(1) || tags.get(1) === 2) && + (!tags.has(2) || tags.get(2) === 2) && + (!tags.has(7) || tags.get(7) === 2) && + (!tags.has(8) || tags.get(8) === 2)) { + let buffer = context.buffer; + if (buffer.length > 3 && buffer[0] == 0x0A) { + let size = buffer[1]; + if (size < 64 && buffer.length > 2 + size + 1 && buffer.slice(2, 2 + size).every((c) => c >= 32 && c <= 127) && buffer[2 + size] == 0x12) { + return true; + } + } + if (buffer.length > 3 && buffer[0] == 0x12) { + return true; + } + } + } + if (extension == 'pbtxt' || extension == 'prototxt') { + if (identifier.endsWith('predict_net')) { + return true; + } + const tags = context.tags('pbtxt'); + if (tags.has('op')) { + if (context.identifier === 'ops.pbtxt' && context.text.indexOf(' attr {') !== -1) { + return false; + } + return true; + } + } + return false; + } + + open(context, host) { + return host.require('./caffe2-proto').then(() => { + return caffe2.Metadata.open(host).then((metadata) => { + const identifier = context.identifier; + const parts = identifier.split('.'); + const extension = parts.pop().toLowerCase(); + const base = parts.join('.'); + if (extension == 'pbtxt' || extension == 'prototxt') { + const open_text = (predict, init) => { + let predict_net = null; + let init_net = null; + try { + caffe2.proto = protobuf.roots.caffe2.caffe2; + const reader = prototxt.TextReader.create(predict); + reader.field = function(tag, message) { + if (message instanceof caffe2.proto.DeviceOption) { + message[tag] = this.skip(); + return; + } + throw new Error("Unknown field '" + tag + "'" + this.location()); + }; + predict_net = caffe2.proto.NetDef.decodeText(reader); + } + catch (error) { + throw new caffe2.Error("File text format is not caffe2.NetDef (" + error.message + ") in '" + identifier + "'."); + } + try { + caffe2.proto = protobuf.roots.caffe2.caffe2; + init_net = (typeof init === 'string') ? + caffe2.proto.NetDef.decodeText(prototxt.TextReader.create(init)) : + caffe2.proto.NetDef.decode(init); + } + catch (error) { + // continue regardless of error + } + try { + return new caffe2.Model(metadata, predict_net, init_net); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new caffe2.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }; + if (base.toLowerCase().endsWith('init_net') || base.toLowerCase().startsWith('init_net')) { + return context.request(identifier.replace('init_net', 'predict_net'), 'utf-8').then((text) => { + return open_text(text, context.text); + }).catch(() => { + return open_text(context.text, null); + }); + } + else if (base.toLowerCase().endsWith('predict_net') || base.toLowerCase().startsWith('predict_net')) { + return context.request(identifier.replace('predict_net', 'init_net').replace(/\.pbtxt/, '.pb'), null).then((buffer) => { + return open_text(context.text, buffer); + }).catch(() => { + return context.request(identifier.replace('predict_net', 'init_net'), 'utf-8').then((text) => { + return open_text(context.text, text); + }).catch(() => { + return open_text(context.text, null); + }); + }); + } + else { + return context.request(base + '_init.pb', null).then((buffer) => { + return open_text(context.text, buffer); + }).catch(() => { + return open_text(context.text, null); + }); + } + } + else { + const open_binary = (predict, init) => { + let predict_net = null; + let init_net = null; + try { + caffe2.proto = protobuf.roots.caffe2.caffe2; + predict_net = caffe2.proto.NetDef.decode(predict); + } + catch (error) { + throw new caffe2.Error("File format is not caffe2.NetDef (" + error.message + ") in '" + identifier + "'."); + } + try { + if (init) { + caffe2.proto = protobuf.roots.caffe2.caffe2; + init_net = caffe2.proto.NetDef.decode(init); + } + } + catch (error) { + // continue regardless of error + } + try { + return new caffe2.Model(metadata, predict_net, init_net); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new caffe2.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }; + if (base.toLowerCase().endsWith('init_net')) { + return context.request(base.replace(/init_net$/, '') + 'predict_net.' + extension, null).then((buffer) => { + return open_binary(buffer, context.buffer); + }).catch(() => { + return open_binary(context.buffer, null); + }); + } + else if (base.toLowerCase().endsWith('_init')) { + return context.request(base.replace(/_init$/, '') + '.' + extension, null).then((buffer) => { + return open_binary(buffer, context.buffer); + }).catch(() => { + return open_binary(context.buffer, null); + }); + } + else if (base.toLowerCase().endsWith('predict_net') || base.toLowerCase().startsWith('predict_net')) { + return context.request(identifier.replace('predict_net', 'init_net'), null).then((buffer) => { + return open_binary(context.buffer, buffer); + }).catch(() => { + return open_binary(context.buffer, null); + }); + } + else { + return context.request(base + '_init.' + extension, null).then((buffer) => { + return open_binary(context.buffer, buffer); + }).catch(() => { + return open_binary(context.buffer, null); + }); + } + } + }); + }); + } +}; + +caffe2.Model = class { + + constructor(metadata, predict_net, init_net) { + this._domain = predict_net.domain || null; + const graph = new caffe2.Graph(metadata, predict_net, init_net); + this._graphs = [ graph ]; + } + + get format() { + return 'Caffe2'; + } + + get domain() { + return this._domain; + } + + get graphs() { + return this._graphs; + } +}; + +caffe2.Graph = class { + + constructor(metadata, netDef, init) { + this._name = netDef.name || ''; + this._type = netDef.type || ''; + this._nodes = []; + + let inputs = new Map(); + for (const input of netDef.external_input) { + inputs.set(input, {}); + } + if (init) { + for (const op of init.op) { + if (op.output && op.output.length == 1) { + const name = op.output[0]; + if (!inputs.has(name)) { + inputs.set(name, {}); + } + let initializer = inputs.get(name); + for (const arg of op.arg) { + initializer[arg.name] = arg; + } + switch (op.type) { + case 'GivenTensorFill': + initializer.dataType = 'float32'; + break; + case 'GivenTensorDoubleFill': + initializer.dataType = 'float64'; + break; + case 'GivenTensorBoolFill': + initializer.dataType = 'boolean'; + break; + case 'GivenTensorByteStringToUInt8Fill': + initializer.dataType = 'uint8'; + break; + case 'GivenTensorInt16Fill': + case 'GivenTensorSInt16Fill': + initializer.dataType = 'int16'; + break; + case 'GivenTensorIntFill': + initializer.dataType = 'int32'; + break; + case 'GivenTensorInt64Fill': + initializer.dataType = 'int64'; + break; + case 'GivenTensorStringFill': + initializer.dataType = 'string'; + break; + case 'Int8GivenIntTensorFill': + initializer.dataType = 'int32'; + break; + case 'Int8GivenTensorFill': + initializer.dataType = 'int8'; + break; + case 'XavierFill': + break; + case 'ConstantFill': + break; + default: + throw new caffe2.Error("Unknown init op '" + op.type + "'."); + } + if (initializer.values && (initializer.values.floats.length !== 1 || initializer.values.floats[0] !== 0)) { + initializer.input = false; + } + } + } + } + + let scope = {}; + let index = 0; + for (let op of netDef.op) { + op.input = op.input.map((input) => scope[input] ? scope[input] : input); + op.output = op.output.map((output) => { + if (scope[output]) { + const next = output + '\n' + index.toString(); // custom argument id + scope[output] = next; + return next; + } + scope[output] = output; + return output; + }); + index++; + } + + let lastNode = null; + let lastOutput = null; + for (let op of netDef.op) { + let node = new caffe2.Node(metadata, op, inputs); + if (op.input.length == 1 && + op.output.length >= 1 && + op.input[0].split('\n').shift() == op.output[0].split('\n').shift() && + lastNode && + lastOutput == op.input[0].split('\n').shift()) { + lastNode.chain.push(node); + } + else { + this._nodes.push(node); + lastNode = null; + lastOutput = null; + if (op.output.length == 1) { + lastNode = node; + lastOutput = op.output[0].split('\n').shift(); + } + } + } + + this._inputs = []; + for (let input of netDef.external_input) { + if (netDef.external_input.length > 1) { + const initializer = inputs.get(input); + if (initializer && initializer.input === false) { + continue; + } + } + this._inputs.push(new caffe2.Parameter(input, [ new caffe2.Argument(input, null, null) ])); + } + + this._outputs = []; + for (let output of netDef.external_output) { + this._outputs.push(new caffe2.Parameter(output, [ new caffe2.Argument(output, null, null) ])); + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + toString() { + return 'graph(' + this.name + ')'; + } +}; + +caffe2.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +caffe2.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new caffe2.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get quantization() { + if (this._initializer) { + return this._initializer.quantization; + } + return null; + } + + get initializer() { + return this._initializer; + } +}; + +caffe2.Node = class { + + constructor(metadata, op, initializers) { + this._name = op.name || ''; + this._device = op.engine || ''; + this._metadata = metadata; + this._type = op.type; + this._chain = []; + + this._attributes = []; + for (let arg of op.arg) { + this._attributes.push(new caffe2.Attribute(metadata, metadata.attribute(this._type, arg.name), arg)); + } + + const schema = metadata.type(this._type); + + const inputs = op.input; + const outputs = op.output; + + let tensors = {}; + let index = 0; + for (const input of inputs) { + if (index > 0 && initializers.has(input)) { + const initializer = initializers.get(input); + tensors[input] = new caffe2.Tensor(input, initializer); + initializer.input = false; + } + index++; + } + for (const output of outputs) { + if (initializers.has(output)) { + const initializer = initializers.get(output); + initializer.input = false; + } + } + this._inputs = []; + let inputIndex = 0; + if (schema && schema.inputs) { + for (let inputDef of schema.inputs) { + if (inputIndex < inputs.length || inputDef.option != 'optional') { + let inputCount = (inputDef.option == 'variadic') ? (inputs.length - inputIndex) : 1; + let inputArguments = inputs.slice(inputIndex, inputIndex + inputCount).filter((id) => id != '' || inputDef.option != 'optional').map((id) => { + return new caffe2.Argument(id, null, tensors[id]); + }); + this._inputs.push(new caffe2.Parameter(inputDef.name, inputArguments)); + inputIndex += inputCount; + } + } + } + else { + this._inputs = this._inputs.concat(inputs.slice(inputIndex).map((input, index) => { + let inputName = ((inputIndex + index) == 0) ? 'input' : (inputIndex + index).toString(); + return new caffe2.Parameter(inputName, [ + new caffe2.Argument(input, null, tensors[input]) + ]); + })); + } + + this._outputs = []; + let outputIndex = 0; + if (schema && schema.outputs) { + for (let outputDef of schema.outputs) { + if (outputIndex < outputs.length || outputDef.option != 'optional') { + let outputCount = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1; + let outputArguments = outputs.slice(outputIndex, outputIndex + outputCount).map((id) => new caffe2.Argument(id)); + this._outputs.push(new caffe2.Parameter(outputDef.name, outputArguments)); + outputIndex += outputCount; + } + } + } + else { + this._outputs = this._outputs.concat(outputs.slice(outputIndex).map((output, index) => { + let outputName = ((outputIndex + index) == 0) ? 'output' : (outputIndex + index).toString(); + return new caffe2.Parameter(outputName, [ + new caffe2.Argument(output, null, null) + ]); + })); + } + } + + get name() { + return this._name || ''; + } + + get device() { + return this._device || ''; + } + + get type() { + return this._type; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } + + get chain() { + return this._chain; + } +}; + +caffe2.Attribute = class { + + constructor(metadata, schema, arg) { + this._name = arg.name; + if (arg.floats && arg.floats.length > 0) { + this._value = arg.floats; + } + else if (arg.ints && arg.ints.length > 0) { + this._value = arg.ints; + } + else if (arg.nets && arg.nets.length > 0) { + this._value = arg.nets.map((net) => new caffe2.Graph(metadata, net, null)); + this._type = 'graph[]'; + } + else if (arg.n) { + this._value = new caffe2.Graph(metadata, arg.n, null); + this._type = 'graph'; + } + else if (arg.i != 0) { + this._value = arg.i; + } + else { + this._value = arg.i; + } + if (schema) { + if (Object.prototype.hasOwnProperty.call(schema, 'type')) { + this._type = schema.type; + if (this._type == 'boolean') { + switch (this._value) { + case 1: this._value = true; break; + case 0: this._value = false; break; + } + } + } + } + + if (schema) { + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + if (this._value == schema.default || (this._value && this._value.toString() == schema.default.toString())) { + this._visible = false; + } + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type || null; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +caffe2.Tensor = class { + + constructor(name, tensor) { + this._name = name; + const shape = tensor.shape && tensor.shape.ints ? tensor.shape.ints : null; + this._type = new caffe2.TensorType(tensor.dataType, new caffe2.TensorShape(shape)); + this._values = tensor.values || null; + this._scale = tensor.Y_scale ? tensor.Y_scale.f : 0; + this._zeroPoint = tensor.Y_zero_point ? tensor.Y_zero_point.i : 0; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get kind() { + return 'Initializer'; + } + + get quantization() { + if (this._scale != 0 || this._zeroPoint != 0) { + return this._scale.toString() + ' * ' + (this._zeroPoint == 0 ? 'q' : ('(q - ' + this._zeroPoint.toString() + ')')); + } + return null; + } + + get state() { + return this._context().state; + } + + get value() { + let context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + let context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + let value = this._decode(context, 0); + return caffe2.Tensor._stringify(value, '', ' '); + } + + _context() { + let context = {}; + context.state = null; + context.index = 0; + context.count = 0; + if (!this._values) { + context.state = 'Tensor data is empty.'; + return context; + } + if (this._values.floats == -1) { + context.state = 'Tensor data is too large to load in Chrome.'; + return context; + } + switch (this._type.dataType) { + case 'float32': + context.data = this._values.floats; + break; + case 'boolean': + context.data = this._values.ints; + break; + case 'int8': + context.data = new Int8Array(this._values.s); + break; + case 'int32': + context.data = this._values.ints; + break; + default: + context.state = 'Unknown data type.'; + return context; + } + context.shape = this._type.shape.dimensions; + context.dataType = this._type.dataType; + return context; + } + + _decode(context, dimension) { + let results = []; + let size = context.shape[dimension]; + if (dimension == context.shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (context.dataType) { + case 'float32': + results.push(context.data[context.index]); + break; + case 'boolean': + results.push(context.data[context.index] == 0 ? false : true); + break; + case 'int8': + results.push(context.data[context.index]); + break; + case 'int32': + results.push(context.data[context.index]); + break; + default: + context.state = 'Unknown data type.'; + break; + } + context.index++; + context.count++; + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + return results; + } + + static _stringify(value, indentation, indent) { + if (Array.isArray(value)) { + let result = []; + result.push(indentation + '['); + const items = value.map((item) => caffe2.Tensor._stringify(item, indentation + indent, indent)); + if (items.length > 0) { + result.push(items.join(',\n')); + } + result.push(indentation + ']'); + return result.join('\n'); + } + if (typeof value == 'string') { + return indentation + value; + } + if (value == Infinity) { + return indentation + 'Infinity'; + } + if (value == -Infinity) { + return indentation + '-Infinity'; + } + if (isNaN(value)) { + return indentation + 'NaN'; + } + return indentation + value.toString(); + } +}; + +caffe2.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType || '?'; + } + + get shape() { + return this._shape; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +caffe2.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return this._dimensions ? ('[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']') : ''; + } +}; + +caffe2.Metadata = class { + + static open(host) { + if (caffe2.Metadata._metadata) { + return Promise.resolve(caffe2.Metadata._metadata); + } + return host.request(null, 'caffe2-metadata.json', 'utf-8').then((data) => { + caffe2.Metadata._metadata = new caffe2.Metadata(data); + return caffe2.Metadata._metadata; + }).catch(() => { + caffe2.Metadata._metadata = new caffe2.Metadata(null); + return caffe2.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + this._attributeCache = {}; + if (data) { + let items = JSON.parse(data); + if (items) { + for (let item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map[item.name] = item.schema; + } + } + } + } + } + + type(name) { + return this._map[name] || null; + } + + attribute(type, name) { + let map = this._attributeCache[type]; + if (!map) { + map = {}; + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (let attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[type] = map; + } + return map[name] || null; + } +}; + +caffe2.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Caffe2 model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = caffe2.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/chainer.js b/frontend/packages/core/public/netron/chainer.js new file mode 100644 index 00000000..a6be39e4 --- /dev/null +++ b/frontend/packages/core/public/netron/chainer.js @@ -0,0 +1,673 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental + +var chainer = chainer || {}; +var long = long || { Long: require('long') }; + +chainer.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'npz') { + const entries = context.entries('zip'); + return entries.length > 0 && entries.every((entry) => entry.name.indexOf('/') !== -1); + } + if (extension === 'h5' || extension === 'hd5' || extension === 'hdf5' || extension === 'keras' || extension === 'model') { + const buffer = context.buffer; + const find = (signature, start, end) => { + for (let i = start; i <= end; i++) { + if (buffer.length > i + signature.length && Array.from(signature).every((value, index) => value.charCodeAt(0) === buffer[i + index])) { + return true; + } + } + return false; + }; + if (find('\x89HDF\r\n\x1A\n', 0, 0) && !find('model_config\0', 0, 2048) && !find('keras_version\0', 0, 2048)) { + return true; + } + } + return false; + } + + open(context, host) { + const extension = context.identifier.split('.').pop().toLowerCase(); + switch (extension) { + case 'npz': + return this._openNumPy(context, host); + case 'h5': + case 'hd5': + case 'hdf5': + return this._openHdf5(context, host); + } + } + + _openNumPy(context, host) { + const identifier = context.identifier; + return host.require('./numpy').then((numpy) => { + return host.require('./pickle').then((pickle) => { + try { + const modules = []; + const modulesMap = new Map(); + + const functionTable = new Map(); + const constructorTable = new Map(); + functionTable.set('_codecs.encode', function(obj /*, econding */) { + return obj; + }); + constructorTable.set('numpy.core.multiarray._reconstruct', function(subtype, shape, dtype) { + this.subtype = subtype; + this.shape = shape; + this.dtype = dtype; + this.__setstate__ = function(state) { + this.version = state[0]; + this.shape = state[1]; + this.typecode = state[2]; + this.is_f_order = state[3]; + this.rawdata = state[4]; + }; + this.__read__ = function(unpickler) { + const array = {}; + array.__type__ = this.subtype; + array.dtype = this.typecode; + array.shape = this.shape; + let size = array.dtype.itemsize; + for (let i = 0; i < array.shape.length; i++) { + size = size * array.shape[i]; + } + if (typeof this.rawdata == 'string') { + array.data = unpickler.unescape(this.rawdata, size); + if (array.data.length != size) { + throw new chainer.Error('Invalid string array data size.'); + } + } + else { + array.data = this.rawdata; + if (array.data.length != size) { + // TODO + // throw new chainer.Error('Invalid array data size.'); + } + } + return array; + }; + }); + constructorTable.set('numpy.dtype', function(obj, align, copy) { + switch (obj) { + case 'i1': this.name = 'int8'; this.itemsize = 1; break; + case 'i2': this.name = 'int16'; this.itemsize = 2; break; + case 'i4': this.name = 'int32'; this.itemsize = 4; break; + case 'i8': this.name = 'int64'; this.itemsize = 8; break; + case 'u1': this.name = 'uint8'; this.itemsize = 1; break; + case 'u2': this.name = 'uint16'; this.itemsize = 2; break; + case 'u4': this.name = 'uint32'; this.itemsize = 4; break; + case 'u8': this.name = 'uint64'; this.itemsize = 8; break; + case 'f4': this.name = 'float32'; this.itemsize = 4; break; + case 'f8': this.name = 'float64'; this.itemsize = 8; break; + default: + if (obj.startsWith('V')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'void' + (this.itemsize * 8).toString(); + } + else if (obj.startsWith('O')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'object'; + } + else if (obj.startsWith('S')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'string'; + } + else if (obj.startsWith('U')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'string'; + } + else if (obj.startsWith('M')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'datetime'; + } + else { + throw new chainer.Error("Unknown dtype '" + obj.toString() + "'."); + } + break; + } + this.align = align; + this.copy = copy; + this.__setstate__ = function(state) { + switch (state.length) { + case 8: + this.version = state[0]; + this.byteorder = state[1]; + this.subarray = state[2]; + this.names = state[3]; + this.fields = state[4]; + this.elsize = state[5]; + this.alignment = state[6]; + this.int_dtypeflags = state[7]; + break; + default: + throw new chainer.Error("Unknown numpy.dtype setstate length '" + state.length.toString() + "'."); + } + }; + }); + const function_call = (name, args) => { + if (functionTable.has(name)) { + const func = functionTable.get(name); + return func.apply(null, args); + } + const obj = { __type__: name }; + if (constructorTable.has(name)) { + const constructor = constructorTable.get(name); + constructor.apply(obj, args); + } + else { + throw new chainer.Error("Unknown function '" + name + "'."); + } + return obj; + }; + + const dataTypeMap = new Map([ + [ 'i1', 'int8'], [ 'i2', 'int16' ], [ 'i4', 'int32'], [ 'i8', 'int64' ], + [ 'u1', 'uint8'], [ 'u2', 'uint16' ], [ 'u4', 'uint32'], [ 'u8', 'uint64' ], + [ 'f2', 'float16'], [ 'f4', 'float32' ], [ 'f8', 'float64'] + ]); + + for (const entry of context.entries('zip')) { + if (!entry.name.endsWith('.npy')) { + throw new chainer.Error("Invalid file name '" + entry.name + "'."); + } + const id = entry.name.replace(/\.npy$/, ''); + const parts = id.split('/'); + if (parts.length < 2) { + throw new chainer.Error("Invalid parameter name '" + entry.name + "'."); + } + const parameterName = parts.pop(); + const moduleName = parts.join('/'); + if (!modulesMap.has(moduleName)) { + const newModule = { name: moduleName, parameters: [] }; + modules.push(newModule); + modulesMap.set(moduleName, newModule); + } + const module = modulesMap.get(moduleName); + let array = new numpy.Array(entry.data); + if (array.byteOrder === '|') { + if (array.dataType !== 'O') { + throw new chainer.Error("Invalid data type '" + array.dataType + "'."); + } + const unpickler = new pickle.Unpickler(array.data); + const root = unpickler.load(function_call); + array = { dataType: root.dtype.name, shape: null, data: null, byteOrder: '|' }; + } + + module.parameters.push({ + name: parameterName, + dataType: dataTypeMap.has(array.dataType) ? dataTypeMap.get(array.dataType) : array.dataType, + shape: array.shape, + data: array.data, + byteOrder: array.byteOrder + }); + } + return new chainer.Model(modules, 'Chainer NumPy'); + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new chainer.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + }); + } + + _openHdf5(context, host) { + const identifier = context.identifier; + return host.require('./hdf5').then((hdf5) => { + try { + const file = new hdf5.File(context.buffer); + let rootGroup = file.rootGroup; + if (Object.keys(rootGroup.attributes).length !== 0 || rootGroup.value !== null) { + throw new chainer.Error('File format is not Chainer HDF5'); + } + let format = null; + const modules = []; + const modulesMap = new Map(); + if (Object.keys(rootGroup.attributes).length === 0 && rootGroup.value === null && + rootGroup.groups.length == 1 && rootGroup.groups[0] && + Object.keys(rootGroup.groups[0].attributes).length === 0 && rootGroup.groups[0].value === null) { + rootGroup = rootGroup.groups[0]; + format = 'Weights HDF5'; + } + if (rootGroup.groups.every((moduleGroup) => Object.keys(moduleGroup.attributes).length === 0 && moduleGroup.value === null)) { + format = format || 'Chainer HDF5'; + for (const moduleGroup of rootGroup.groups) { + const moduleName = moduleGroup.attributes.name || moduleGroup.name; + if (!modulesMap.has(moduleName)) { + const newModule = { name: moduleName, parameters: [] }; + modulesMap.set(moduleName, newModule); + modules.push(newModule); + } + const module = modulesMap.get(moduleName); + for (const variableGroup of moduleGroup.groups) { + if (Object.keys(variableGroup.attributes).length !== 0 || variableGroup.groups.length !== 0) { + throw new chainer.Error('Variable format is not Chainer HDF5'); + } + const variable = variableGroup.value; + if (!variable) { + throw new chainer.Error('Variable value is not Chainer HDF5'); + } + module.parameters.push({ + name: variableGroup.name, + dataType: variable.type, + byteOrder: variable.littleEndian ? '<' : '>', + shape: variable.shape, + data: variable.data, + }); + } + } + } + else if (rootGroup.groups.every((group) => group.value === null && group.groups.every((variable) => Object.keys(variable.attributes).length === 0 && variable.value !== null))) { + format = format || 'Weights HDF5'; + for (const group of rootGroup.groups) { + const moduleName = group.attributes.name || group.name; + if (!modulesMap.has(moduleName)) { + const newModule = { name: moduleName, parameters: [] }; + modulesMap.set(moduleName, newModule); + modules.push(newModule); + } + const module = modulesMap.get(moduleName); + for (const variableGroup of group.groups) { + if (Object.keys(variableGroup.attributes).length !== 0 || variableGroup.groups.length !== 0) { + throw new chainer.Error('Variable format is not Chainer HDF5'); + } + const variable = variableGroup.value; + if (!variable) { + throw new chainer.Error('Variable value is not Chainer HDF5'); + } + module.parameters.push({ + name: variableGroup.name, + dataType: variable.type, + byteOrder: variable.littleEndian ? '<' : '>', + shape: variable.shape, + data: variable.data, + }); + } + } + } + else { + throw new chainer.Error('Module group format is not Chainer HDF5'); + } + + return new chainer.Model(modules, format); + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new chainer.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + } +}; + +chainer.Model = class { + + constructor(modules, format) { + this._format = format; + this._graphs = []; + this._graphs.push(new chainer.Graph(modules)); + } + + get format() { + return this._format; + } + + get graphs() { + return this._graphs; + } +}; + +chainer.Graph = class { + + constructor(modules) { + this._nodes = []; + for (const module of modules) { + this._nodes.push(new chainer.Node(module)); + } + } + + get inputs() { + return []; + } + + get outputs() { + return []; + } + + get nodes() { + return this._nodes; + } +}; + +chainer.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +chainer.Argument = class { + + constructor(name, initializer) { + if (typeof name !== 'string') { + throw new chainer.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + return this._initializer.type; + } + + get initializer() { + return this._initializer; + } +}; + +chainer.Node = class { + + constructor(module) { + this._name = module.name; + this._inputs = []; + for (const parameter of module.parameters) { + const name = [ this._name, parameter.name ].join('/'); + const initializer = new chainer.Tensor(name, parameter.dataType, parameter.shape, parameter.data, parameter.byteOrder); + this._inputs.push(new chainer.Parameter(parameter.name, [ + new chainer.Argument(name, initializer) + ])); + } + } + + get type() { + return 'Module'; + } + + get name() { + return this._name; + } + + get metadata() { + return null; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return []; + } + + get attributes() { + return []; + } +}; + +chainer.Tensor = class { + + constructor(name, dataType, shape, data, byteOrder) { + this._name = name; + this._type = new chainer.TensorType(dataType, new chainer.TensorShape(shape)); + this._shape = shape; + this._data = data; + this._byteOrder = byteOrder; + } + + get kind() { + return ''; + } + + get name() { + return this._name; + } + + get type(){ + return this._type; + } + + get state() { + return this._context().state; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return chainer.Tensor._stringify(value, '', ' '); + } + + _context() { + const context = {}; + context.index = 0; + context.count = 0; + context.state = null; + if (this._byteOrder !== '<' && this._byteOrder !== '>') { + context.state = 'Tensor byte order is not supported.'; + return context; + } + if (this._reference) { + context.state = 'Tensor reference not implemented.'; + return context; + } + if (!this._data || this._data.length == 0) { + context.state = 'Tensor data is empty.'; + return context; + } + switch (this._type.dataType) { + case 'float16': + context.itemSize = 2; + break; + case 'float32': + context.itemSize = 4; + break; + case 'float64': + context.itemSize = 8; + break; + case 'int8': + context.itemSize = 1; + break; + case 'int16': + context.itemSize = 2; + break; + case 'int32': + context.itemSize = 4; + break; + case 'int64': + context.itemSize = 8; + break; + case 'uint8': + context.itemSize = 1; + break; + case 'uint16': + context.itemSize = 2; + break; + case 'uint32': + context.itemSize = 4; + break; + default: + context.state = 'Tensor data type is not supported.'; + return context; + } + context.dimensions = this._type.shape.dimensions; + context.dataType = this._type.dataType; + context.littleEndian = this._byteOrder == '<'; + context.data = this._data; + context.rawData = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + return context; + } + + _decode(context, dimension) { + const littleEndian = context.littleEndian; + const shape = context.dimensions.length == 0 ? [ 1 ] : context.dimensions; + const results = []; + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + if (context.rawData) { + switch (context.dataType) { + case 'float16': + results.push(context.rawData.getFloat16(context.index, littleEndian)); + break; + case 'float32': + results.push(context.rawData.getFloat32(context.index, littleEndian)); + break; + case 'float64': + results.push(context.rawData.getFloat64(context.index, littleEndian)); + break; + case 'int8': + results.push(context.rawData.getInt8(context.index, littleEndian)); + break; + case 'int16': + results.push(context.rawData.getInt16(context.index, littleEndian)); + break; + case 'int32': + results.push(context.rawData.getInt32(context.index, littleEndian)); + break; + case 'int64': + results.push(long.Long.fromBytes(context.data.subarray(context.index, context.index + 8), true, littleEndian)); + break; + case 'uint8': + results.push(context.rawData.getUint8(context.index, littleEndian)); + break; + case 'uint16': + results.push(context.rawData.getUint16(context.index, littleEndian)); + break; + case 'uint32': + results.push(context.rawData.getUint32(context.index, littleEndian)); + break; + } + context.index += context.itemSize; + context.count++; + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + if (context.dimensions.length == 0) { + return results[0]; + } + return results; + } + + static _stringify(value, indentation, indent) { + if (Array.isArray(value)) { + const result = []; + result.push(indentation + '['); + const items = value.map((item) => chainer.Tensor._stringify(item, indentation + indent, indent)); + if (items.length > 0) { + result.push(items.join(',\n')); + } + result.push(indentation + ']'); + return result.join('\n'); + } + if (typeof value == 'string') { + return indentation + value; + } + if (value == Infinity) { + return indentation + 'Infinity'; + } + if (value == -Infinity) { + return indentation + '-Infinity'; + } + if (isNaN(value)) { + return indentation + 'NaN'; + } + return indentation + value.toString(); + } +}; + +chainer.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType || '?'; + } + + get shape() { + return this._shape; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +chainer.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return '[' + this._dimensions.join(',') + ']'; + } +}; + +chainer.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Chainer model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = chainer.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/cntk-metadata.json b/frontend/packages/core/public/netron/cntk-metadata.json new file mode 100644 index 00000000..9426cea9 --- /dev/null +++ b/frontend/packages/core/public/netron/cntk-metadata.json @@ -0,0 +1,1170 @@ +[ + { + "name": "Negate", + "schema": { + "operator": 0, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Sigmoid", + "schema": { + "category": "Activation", + "operator": 1, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Tanh", + "schema": { + "category": "Activation", + "operator": 2, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "ReLU", + "schema": { + "category": "Activation", + "operator": 3, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "RectifiedLinear", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Exp", + "schema": { + "operator": 4, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Log", + "schema": { + "operator": 5, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Sqrt", + "schema": { + "operator": 6, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Floor", + "schema": { + "operator": 7, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Abs", + "schema": { + "operator": 8 + } + }, + { + "name": "Reciprocal", + "schema": { + "operator": 9, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Softmax", + "schema": { + "category": "Activation", + "operator": 10, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Hardmax", + "schema": { + "category": "Activation", + "operator": 11 + } + }, + { + "name": "TransposeAxes", + "schema": { + "category": "Activation", + "operator": 12 + } + }, + { + "name": "TransposeDimensions", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Where", + "schema": { + "operator": 13, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Slice", + "schema": { + "category": "Tensor", + "operator": 14, + "inputs": [ + { "name": "input" }, + { "name": "begin" }, + { "name": "end" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Dropout", + "schema": { + "category": "Dropout", + "operator": 15, + "attributes": [ + { "name": "rngSeed", "visible": false }, + { "name": "rngOffset", "visible": false } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Reshape", + "schema": { + "category": "Shape", + "operator": 16, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "MaxPooling", + "schema": { + "category": "Pool", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "AveragePooling", + "schema": { + "category": "Pool", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Pooling", + "schema": { + "category": "Pool", + "operator": 17, + "attributes": [ + { "name": "transpose", "default": false }, + { "name": "includePad", "default": false }, + { "name": "ceilOutDim", "default": false }, + { "name": "autoPadding", "default": [ false, null ] }, + { "name": "sharing", "default": [ true, null] }, + { "name": "strides", "default": [ 1, null ] }, + { "name": "lowerPad", "default": [ 0, null ] }, + { "name": "upperPad", "default": [ 0, null ] }, + { "name": "outputShape", "default": 0 }, + { "name": "maxTempMemSizeInSamples", "default": 0 }, + { "name": "poolingType", "type": "PoolingType", "default": "Max" }, + { "name": "poolKind", "type": "PoolKind", "default": "None" }, + { "name": "imageLayoutKind", "type": "ImageLayoutKind", "visible": false } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "SumAll", + "schema": { + "operator": 18 + } + }, + { + "name": "Plus", + "schema": { + "operator": 19, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + } + }, + { + "name": "Minus", + "schema": { + "operator": 20, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + } + }, + { + "name": "ElementTimes", + "schema": { + "operator": 21, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + } + }, + { + "name": "Equal", + "schema": { + "operator": 22, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + } + }, + { + "name": "NotEqual", + "schema": { + "operator": 23, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + } + }, + { + "name": "Less", + "schema": { + "operator": 24, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + } + }, + { + "name": "LessEqual", + "schema": { + "operator": 25, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + } + }, + { + "name": "Greater", + "schema": { + "operator": 26, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + } + }, + { + "name": "GreaterEqual", + "schema": { + "operator": 27, + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + } + }, + { + "name": "PackedIndex", + "schema": { + "operator": 28, + "inputs": [ + { "name": "source" }, + { "name": "index" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "GatherPacked", + "schema": { + "operator": 29, + "inputs": [ + { "name": "index" }, + { "name": "source" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "ScatterPacked", + "schema": { + "operator": 30 + } + }, + { + "name": "Times", + "schema": { + "operator": 31, + "attributes": [ + { "name": "outputRank", "default": 1 }, + { "name": "inferInputRankToMap", "visible": false, "default": -1 } + ], + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + } + }, + { + "name": "TransposeTimes", + "schema": { + "operator": 32 + } + }, + { + "name": "Convolution", + "schema": { + "category": "Layer", + "operator": 33, + "attributes": [ + { "name": "transpose", "default": false }, + { "name": "maxTempMemSizeInSamples", "default": 0 }, + { "name": "dilation", "default": [ 1, null] }, + { "name": "outputShape", "default": 0 }, + { "name": "sharing", "default": [ true, null] }, + { "name": "strides", "default": [ 1, null ] }, + { "name": "includePad", "default": false }, + { "name": "ceilOutDim", "default": false }, + { "name": "autoPadding", "default": [ true, null ] }, + { "name": "lowerPad", "default": [ 0, null ] }, + { "name": "upperPad", "default": [ 0, null ] }, + { "name": "convolution2D", "visible": false }, + { "name": "poolKind", "type": "PoolKind", "default": "None" }, + { "name": "imageLayoutKind", "type": "ImageLayoutKind", "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "W" }, + { "name": "b" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "SquaredError", + "schema": { + "operator": 34 + } + }, + { + "name": "CrossEntropyWithSoftmax", + "schema": { + "operator": 35, + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "ClassificationError", + "schema": { + "operator": 36, + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "PastValue", + "schema": { + "operator": 37, + "attributes": [ + { "name": "offset", "type": "uint32", "default": 1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "initialState" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "FutureValue", + "schema": { + "operator": 38, + "attributes": [ + { "name": "offset", "type": "uint32", "default": 1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "initialState" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "ReduceElements", + "schema": { + "operator": 39, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "BatchNormalization", + "schema": { + "category": "Normalization", + "operator": 40, + "attributes": [ + { "name": "disableRegularization", "default": false }, + { "name": "useCuDNNEngine", "visible": false }, + { "name": "useCntkEngine", "visible": false }, + { "name": "runCountUntied", "visible": false }, + { "name": "epsilon", "default": 0.00001 }, + { "name": "normalizationTimeConstant", "default": 0 }, + { "name": "disableRegularization", "default": false }, + { "name": "blendTimeConstant", "default": 0 }, + { "name": "imageLayoutKind", "type": "ImageLayoutKind", "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "scale" }, + { "name": "bias" }, + { "name": "mean" }, + { "name": "variance" }, + { "name": "count" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Clip", + "schema": { + "operator": 41 + } + }, + { + "name": "Select", + "schema": { + "operator": 42 + } + }, + { + "name": "Splice", + "schema": { + "category": "Tensor", + "operator": 43, + "inputs": [ + { "name": "inputs", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Combine", + "schema": { + "category": "Tensor", + "operator": 44, + "inputs": [ + { "name": "inputs", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "RandomSample", + "schema": { + "operator": 45 + } + }, + { + "name": "RandomSampleInclusionFrequency", + "schema": { + "operator": 46 + } + }, + { + "name": "ROIPooling", + "schema": { + "operator": 47, + "category": "Pool", + "attributes": [ + { "name": "spatialScale", "default": 0.0625 }, + { "name": "poolKind", "type": "PoolKind", "default": "None" } + ], + "inputs": [ + { "name": "inputs" }, + { "name": "ROIs" } + ], + "outputs": [ + { "name": "outputs" } + ] + } + }, + { + "name": "Logistic", + "schema": { + "operator": 48 + } + }, + { + "name": "OptimizedRNNStack", + "schema": { + "operator": 49 + } + }, + { + "name": "ReconcileDynamicAxis", + "schema": { + "operator": 50 + } + }, + { + "name": "LogSoftmax", + "schema": { + "operator": 51 + } + }, + { + "name": "LogPlus", + "schema": { + "operator": 52 + } + }, + { + "name": "CosDistance", + "schema": { + "operator": 53 + } + }, + { + "name": "Sin", + "schema": { + "operator": 54 + } + }, + { + "name": "Cos", + "schema": { + "operator": 55 + } + }, + { + "name": "Pass", + "schema": { + "operator": 56 + } + }, + { + "name": "Block", + "schema": { + "operator": 57 + } + }, + { + "name": "Unpooling", + "schema": { + "operator": 58 + } + }, + { + "name": "LambdaRank", + "schema": { + "operator": 59 + } + }, + { + "name": "NDCG", + "schema": { + "operator": 60 + } + }, + { + "name": "EditDistanceError", + "schema": { + "operator": 61 + } + }, + { + "name": "NoOp", + "schema": { + "operator": 62, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "LabelsToGraph", + "schema": { + "operator": 63 + } + }, + { + "name": "StopGradient", + "schema": { + "operator": 64 + } + }, + { + "name": "ELU", + "schema": { + "operator": 65 + } + }, + { + "name": "ForwardBackward", + "schema": { + "operator": 66 + } + }, + { + "name": "CosDistanceWithNegativeSamples", + "schema": { + "operator": 67 + } + }, + { + "name": "OneHot", + "schema": { + "operator": 68 + } + }, + { + "name": "Pow", + "schema": { + "operator": 69 + } + }, + { + "name": "ToSequence", + "schema": { + "operator": 70 + } + }, + { + "name": "ToSequenceLike", + "schema": { + "operator": 71 + } + }, + { + "name": "UnpackSequence", + "schema": { + "operator": 72 + } + }, + { + "name": "Assign", + "schema": { + "operator": 73 + } + }, + { + "name": "Gather", + "schema": { + "operator": 74 + } + }, + { + "name": "StableSigmoid", + "schema": { + "operator": 75, + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "RandomDistribution", + "schema": { + "operator": 76 + } + }, + { + "name": "Sinh", + "schema": { + "operator": 77 + } + }, + { + "name": "Cosh", + "schema": { + "operator": 78 + } + }, + { + "name": "UnpackBatch", + "schema": { + "operator": 79 + } + }, + { + "name": "ToBatch", + "schema": { + "operator": 80 + } + }, + { + "name": "Asin", + "schema": { + "operator": 81 + } + }, + { + "name": "Acos", + "schema": { + "operator": 82 + } + }, + { + "name": "Pad", + "schema": { + "operator": 83, + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Crop", + "schema": { + "operator": 84, + "category": "Data" + } + }, + { + "name": "Atanh", + "schema": { + "operator": 85 + } + }, + { + "name": "Asinh", + "schema": { + "operator": 86 + } + }, + { + "name": "TopK", + "schema": { + "operator": 87, + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Squeeze", + "schema": { + "operator": 88, + "category": "Transform" + } + }, + { + "name": "ConstantOp", + "schema": { + "operator": 89 + } + }, + { + "name": "LatticeSequenceWithSoftmax", + "schema": { + "operator": 90 + } + }, + { + "name": "Cast", + "schema": { + "operator": 91 + } + }, + { + "name": "EyeLikeOp", + "schema": { + "operator": 92 + } + }, + { + "name": "CustomProxyOp", + "schema": { + "operator": 93 + } + }, + { + "name": "StraightThrough", + "schema": { + "operator": 94 + } + }, + { + "name": "Tan", + "schema": { + "operator": 95 + } + }, + { + "name": "Atan", + "schema": { + "operator": 96 + } + }, + { + "name": "ConvolutionSequenceShape", + "schema": { + "operator": 97 + } + }, + { + "name": "Function:Dense", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "W" }, + { "name": "b" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Function:Convolution", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Function:ConvolutionTranspose", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Function:Softmax", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Function:linear", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "scale" }, + { "name": "b" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Function:lrn", + "schema": { + "category": "Normalization", + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Function:PReLU", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "x" }, + { "name": "axis" }, + { "name": "slope" } + ], + "outputs": [ + { "name": "y" } + ] + } + }, + + + { + "name": "Function:ElementDivide", + "schema": { + "inputs": [ + { "name": "A" }, + { "name": "B" } + ], + "outputs": [ + { "name": "C" } + ] + } + }, + { + "name": "Function:BatchNormalization", + "schema": { + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "scale" }, + { "name": "bias" }, + { "name": "mean" }, + { "name": "variance" }, + { "name": "count" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Function:MaxPooling", + "schema": { + "category": "Pool", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Function:AveragePooling", + "schema": { + "category": "Pool", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Function:Dropout", + "schema": { + "category": "Dropout", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Function:LSTM", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "0" }, + { "name": "1" }, + { "name": "2" }, + { "name": "b" }, + { "name": "W" }, + { "name": "H" } + ] + } + }, + { + "name": "Mean", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "InvStdDev", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + } +] diff --git a/frontend/packages/core/public/netron/cntk-proto.js b/frontend/packages/core/public/netron/cntk-proto.js new file mode 100644 index 00000000..61847ac2 --- /dev/null +++ b/frontend/packages/core/public/netron/cntk-proto.js @@ -0,0 +1,532 @@ +/*eslint-disable block-scoped-var, id-length, no-control-regex, no-magic-numbers, no-prototype-builtins, no-redeclare, no-shadow, no-var, sort-vars*/ +(function($protobuf) { + "use strict"; + + var $Reader = $protobuf.Reader, $util = $protobuf.util; + + var $root = $protobuf.roots.cntk || ($protobuf.roots.cntk = {}); + + $root.CNTK = (function() { + + var CNTK = {}; + + CNTK.proto = (function() { + + var proto = {}; + + proto.NDShape = (function() { + + function NDShape(properties) { + this.shape_dim = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NDShape.prototype.shape_dim = $util.emptyArray; + + NDShape.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CNTK.proto.NDShape(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shape_dim && message.shape_dim.length)) + message.shape_dim = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.shape_dim.push(reader.uint64()); + } else + message.shape_dim.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NDShape; + })(); + + proto.Axis = (function() { + + function Axis(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Axis.prototype.static_axis_idx = 0; + Axis.prototype.name = ""; + Axis.prototype.is_ordered_dynamic_axis = false; + + Axis.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CNTK.proto.Axis(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.static_axis_idx = reader.int32(); + break; + case 2: + message.name = reader.string(); + break; + case 3: + message.is_ordered_dynamic_axis = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Axis; + })(); + + proto.NDArrayView = (function() { + + function NDArrayView(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NDArrayView.prototype.data_type = 0; + NDArrayView.prototype.storage_format = 0; + NDArrayView.prototype.shape = null; + NDArrayView.prototype.float_values = null; + NDArrayView.prototype.double_values = null; + NDArrayView.prototype.bytes_value = null; + NDArrayView.prototype.sint32_values = null; + + var $oneOfFields; + + Object.defineProperty(NDArrayView.prototype, "values", { + get: $util.oneOfGetter($oneOfFields = ["float_values", "double_values", "bytes_value", "sint32_values"]), + set: $util.oneOfSetter($oneOfFields) + }); + + NDArrayView.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CNTK.proto.NDArrayView(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.data_type = reader.int32(); + break; + case 2: + message.storage_format = reader.int32(); + break; + case 3: + message.shape = $root.CNTK.proto.NDShape.decode(reader, reader.uint32()); + break; + case 4: + message.float_values = $root.CNTK.proto.NDArrayView.FloatValues.decode(reader, reader.uint32()); + break; + case 5: + message.double_values = $root.CNTK.proto.NDArrayView.DoubleValues.decode(reader, reader.uint32()); + break; + case 6: + message.bytes_value = $root.CNTK.proto.NDArrayView.BytesValue.decode(reader, reader.uint32()); + break; + case 7: + message.sint32_values = $root.CNTK.proto.NDArrayView.IntValues.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NDArrayView.DataType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "Unknown"] = 0; + values[valuesById[1] = "Float"] = 1; + values[valuesById[2] = "Double"] = 2; + values[valuesById[4] = "Float16"] = 4; + values[valuesById[5] = "Int8"] = 5; + values[valuesById[6] = "Int16"] = 6; + return values; + })(); + + NDArrayView.StorageFormat = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "Dense"] = 0; + values[valuesById[1] = "SparseCSC"] = 1; + values[valuesById[2] = "SparseBlockCol"] = 2; + return values; + })(); + + NDArrayView.FloatValues = (function() { + + function FloatValues(properties) { + this.value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FloatValues.prototype.value = $util.emptyArray; + + FloatValues.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CNTK.proto.NDArrayView.FloatValues(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.value && message.value.length)) + message.value = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + if (message.value.length == 0 && (end2 - reader.pos) > 1048576) { + var valueLength = end2 - reader.pos; + var valueView = new DataView(reader.buf.buffer, reader.buf.byteOffset + reader.pos, valueLength); + valueLength = valueLength >>> 2; + var value = new Float32Array(valueLength); + for (var i = 0; i < valueLength; i++) { + value[i] = valueView.getFloat32(i << 2, true); + } + message.value = value; + reader.pos = end2; + } + else { + while (reader.pos < end2) + message.value.push(reader.float()); + } + } else + message.value.push(reader.float()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FloatValues; + })(); + + NDArrayView.DoubleValues = (function() { + + function DoubleValues(properties) { + this.value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DoubleValues.prototype.value = $util.emptyArray; + + DoubleValues.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CNTK.proto.NDArrayView.DoubleValues(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.value && message.value.length)) + message.value = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.value.push(reader.double()); + } else + message.value.push(reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DoubleValues; + })(); + + NDArrayView.BytesValue = (function() { + + function BytesValue(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BytesValue.prototype.value = $util.newBuffer([]); + + BytesValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CNTK.proto.NDArrayView.BytesValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BytesValue; + })(); + + NDArrayView.IntValues = (function() { + + function IntValues(properties) { + this.value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + IntValues.prototype.value = $util.emptyArray; + + IntValues.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CNTK.proto.NDArrayView.IntValues(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.value && message.value.length)) + message.value = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.value.push(reader.sint32()); + } else + message.value.push(reader.sint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return IntValues; + })(); + + return NDArrayView; + })(); + + proto.Vector = (function() { + + function Vector(properties) { + this.value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Vector.prototype.value = $util.emptyArray; + + Vector.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CNTK.proto.Vector(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.value && message.value.length)) + message.value = []; + message.value.push($root.CNTK.proto.DictionaryValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Vector; + })(); + + proto.Dictionary = (function() { + + function Dictionary(properties) { + this.data = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Dictionary.prototype.version = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + Dictionary.prototype.data = $util.emptyObject; + + Dictionary.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CNTK.proto.Dictionary(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.uint64(); + break; + case 2: + reader.skip().pos++; + if (message.data === $util.emptyObject) + message.data = {}; + key = reader.string(); + reader.pos++; + message.data[key] = $root.CNTK.proto.DictionaryValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Dictionary; + })(); + + proto.DictionaryValue = (function() { + + function DictionaryValue(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DictionaryValue.prototype.version = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + DictionaryValue.prototype.value_type = 0; + DictionaryValue.prototype.bool_value = false; + DictionaryValue.prototype.int_value = 0; + DictionaryValue.prototype.size_t_value = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + DictionaryValue.prototype.float_value = 0; + DictionaryValue.prototype.double_value = 0; + DictionaryValue.prototype.string_value = ""; + DictionaryValue.prototype.nd_shape_value = null; + DictionaryValue.prototype.axis_value = null; + DictionaryValue.prototype.vector_value = null; + DictionaryValue.prototype.dictionary_value = null; + DictionaryValue.prototype.nd_array_view_value = null; + + var $oneOfFields; + + Object.defineProperty(DictionaryValue.prototype, "value", { + get: $util.oneOfGetter($oneOfFields = ["bool_value", "int_value", "size_t_value", "float_value", "double_value", "string_value", "nd_shape_value", "axis_value", "vector_value", "dictionary_value", "nd_array_view_value"]), + set: $util.oneOfSetter($oneOfFields) + }); + + DictionaryValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CNTK.proto.DictionaryValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.uint64(); + break; + case 2: + message.value_type = reader.int32(); + break; + case 3: + message.bool_value = reader.bool(); + break; + case 4: + message.int_value = reader.int32(); + break; + case 5: + message.size_t_value = reader.uint64(); + break; + case 6: + message.float_value = reader.float(); + break; + case 7: + message.double_value = reader.double(); + break; + case 8: + message.string_value = reader.string(); + break; + case 9: + message.nd_shape_value = $root.CNTK.proto.NDShape.decode(reader, reader.uint32()); + break; + case 10: + message.axis_value = $root.CNTK.proto.Axis.decode(reader, reader.uint32()); + break; + case 11: + message.vector_value = $root.CNTK.proto.Vector.decode(reader, reader.uint32()); + break; + case 12: + message.dictionary_value = $root.CNTK.proto.Dictionary.decode(reader, reader.uint32()); + break; + case 13: + message.nd_array_view_value = $root.CNTK.proto.NDArrayView.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + DictionaryValue.Type = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "None"] = 0; + values[valuesById[1] = "Bool"] = 1; + values[valuesById[2] = "Int"] = 2; + values[valuesById[3] = "SizeT"] = 3; + values[valuesById[4] = "Float"] = 4; + values[valuesById[5] = "Double"] = 5; + values[valuesById[6] = "String"] = 6; + values[valuesById[7] = "NDShape"] = 7; + values[valuesById[8] = "Axis"] = 8; + values[valuesById[9] = "Vector"] = 9; + values[valuesById[10] = "Dictionary"] = 10; + values[valuesById[11] = "NDArrayView"] = 11; + return values; + })(); + + return DictionaryValue; + })(); + + return proto; + })(); + + return CNTK; + })(); + + return $root; +})(protobuf); diff --git a/frontend/packages/core/public/netron/cntk.js b/frontend/packages/core/public/netron/cntk.js new file mode 100644 index 00000000..96cad188 --- /dev/null +++ b/frontend/packages/core/public/netron/cntk.js @@ -0,0 +1,1420 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var cntk = cntk || {}; +var long = long || { Long: require('long') }; +var protobuf = protobuf || require('protobufjs'); + +var cntk_v1 = {}; +var cntk_v2 = null; + +cntk.ModelFactory = class { + + match(context) { + const extension = context.identifier.split('.').pop().toLowerCase(); + if (extension == 'model' || extension == 'cmf' || extension == 'dnn' || extension == 'cntk') { + const buffer = context.buffer; + // Reject PyTorch models with .model file extension. + const torch = [ 0x8a, 0x0a, 0x6c, 0xfc, 0x9c, 0x46, 0xf9, 0x20, 0x6a, 0xa8, 0x50, 0x19 ]; + if (buffer && buffer.length > 14 && buffer[0] == 0x80 && torch.every((v, i) => v == buffer[i + 2])) { + return false; + } + // CNTK v1 + if (buffer && buffer.length >= 8 && + buffer[0] == 0x42 && buffer[1] == 0x00 && buffer[2] == 0x43 && buffer[3] == 0x00 && + buffer[4] == 0x4E && buffer[5] == 0x00 && buffer[6] == 0x00 && buffer[7] == 0x00) { + return true; + } + // CNTK v2 + const tags = context.tags('pb'); + if (tags.get(1) === 0 && tags.get(2) === 2) { + return true; + } + return false; + } + } + + open(context, host) { + return host.require('./cntk-proto').then(() => { + let version = 0; + let obj = null; + try { + const buffer = context.buffer; + if (buffer && buffer.length >= 8 && + buffer[0] == 0x42 && buffer[1] == 0x00 && buffer[2] == 0x43 && buffer[3] == 0x00 && + buffer[4] == 0x4E && buffer[5] == 0x00 && buffer[6] == 0x00 && buffer[7] == 0x00) { + obj = new cntk_v1.ComputationNetwork(buffer); + version = 1; + } + } + catch (error) { + throw new cntk.Error("File format is not CNTK v1 (" + error.message + ") in '" + context.identifier + "'."); + } + try { + if (!obj) { + cntk_v2 = protobuf.roots.cntk.CNTK.proto; + cntk_v2.PoolingType = { 0: 'Max', 1: 'Average' }; + const dictionary = cntk_v2.Dictionary.decode(context.buffer); + obj = cntk.ModelFactory._convertDictionary(dictionary); + version = 2; + } + } + catch (error) { + throw new cntk.Error("File format is not cntk.Dictionary (" + error.message + ") in '" + context.identifier + "'."); + } + return cntk.Metadata.open(host).then((metadata) => { + try { + return new cntk.Model(metadata, version, obj); + } + catch (error) { + throw new cntk.Error(error.message); + } + }); + }); + } + + static _convertDictionary(dictionary) { + const target = {}; + for (const key of Object.keys(dictionary.data).filter((key) => key != 'version')) { + target[key] = cntk.ModelFactory._convertDictionaryValue(dictionary.data[key]); + } + return target; + } + + static _convertDictionaryValue(dictionaryValue) { + switch (dictionaryValue.value_type) { + case cntk_v2.DictionaryValue.Type.Bool: + return dictionaryValue.bool_value; + case cntk_v2.DictionaryValue.Type.Int: + return dictionaryValue.int_value; + case cntk_v2.DictionaryValue.Type.SizeT: + return dictionaryValue.size_t_value; + case cntk_v2.DictionaryValue.Type.Float: + return dictionaryValue.float_value; + case cntk_v2.DictionaryValue.Type.Double: + return dictionaryValue.double_value; + case cntk_v2.DictionaryValue.Type.String: + return dictionaryValue.string_value; + case cntk_v2.DictionaryValue.Type.Vector: + return cntk.ModelFactory._convertVectorValue(dictionaryValue.vector_value); + case cntk_v2.DictionaryValue.Type.NDShape: + return dictionaryValue.nd_shape_value; + case cntk_v2.DictionaryValue.Type.Axis: + return dictionaryValue.axis_value; + case cntk_v2.DictionaryValue.Type.Dictionary: + return cntk.ModelFactory._convertDictionary(dictionaryValue.dictionary_value); + case cntk_v2.DictionaryValue.Type.NDArrayView: + return dictionaryValue.nd_array_view_value; + } + throw new cntk.Error("Unknown dictionary value type '" + dictionaryValue.value_type.toString() + "'."); + } + + static _convertVectorValue(vectorValue) { + return vectorValue.value.map((item) => { + return cntk.ModelFactory._convertDictionaryValue(item); + }); + } +}; + +cntk.Model = class { + + constructor(metadata, version, obj) { + switch (version) { + case 1: + this._format = 'CNTK v1' + (obj.version ? ('.' + obj.version.toString()) : ''); + break; + case 2: + this._format = 'CNTK v2'; + break; + } + this._graphs = []; + this._graphs.push(new cntk.Graph(metadata, version, obj)); + } + + get graphs() { + return this._graphs; + } + + get format() { + return this._format; + } +}; + +cntk.Graph = class { + + constructor(metadata, version, obj) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + this._functions = []; + + const args = {}; + switch (version) { + case 1: { + for (const name of Object.keys(obj.nodes)) { + const node = obj.nodes[name]; + switch (node.__type__) { + case 'InputValue': + this._inputs.push(new cntk.Parameter(node.name, [ + new cntk.Argument(version, node) + ])); + break; + case 'LearnableParameter': + args[node.name] = new cntk.Argument(version, node); + break; + } + } + for (const name of Object.keys(obj.nodes)) { + const node = obj.nodes[name]; + if (node.__type__ != 'InputValue' && node.__type__ != 'LearnableParameter') { + this._nodes.push(new cntk.Node(metadata, version, node, args)); + } + } + if (obj.output) { + for (const output of obj.output) { + this._outputs.push(new cntk.Parameter(output, [ + new cntk.Argument(version, output) + ])); + } + } + break; + } + case 2: { + const nodeMap = new Map(); + for (const node of obj.primitive_functions) { + nodeMap.set(node.uid, node); + } + for (const input of obj.inputs) { + const argument = new cntk.Argument(version, input); + args[input.uid] = argument; + // VariableKind { 0: 'input', 1: 'output', 2: 'parameter', 3: 'constant', 4: 'placeholder' } + if (input.kind == 0) { + const inputName = input.name || input.uid; + this._inputs.push(new cntk.Parameter(inputName, [ argument ])); + } + } + for (const block of obj.primitive_functions) { + if (block.op == 57 && block.block_function_composite) { + const list = [ block.block_function_composite.root ]; + const nodes = []; + while (list.length > 0) { + const name = list.shift(); + if (nodeMap.has(name)) { + const node = nodeMap.get(name); + nodes.push(new cntk.Node(metadata, version, node, args)); + nodeMap.delete(name); + for (let i = 0; i < node.inputs.length; i++) { + const parts = node.inputs[i].split('_'); + if (parts.length >= 3) { + parts.pop(); + if (parts.pop() == 'Output') { + list.push(parts.join('_')); + } + } + } + } + } + const inputs = []; + const outputs = [ block.block_function_composite.root ]; + this._functions.push(new cntk.Function(block.block_function_op_name, nodes, inputs, outputs)); + } + } + for (const node of obj.primitive_functions) { + if (nodeMap.has(node.uid)) { + this._nodes.push(new cntk.Node(metadata, version, node, args)); + } + } + break; + } + default: + throw new cntk.Error("Unsupported graph version '" + version + "'."); + } + } + + get nodes() { + return this._nodes; + } + + get functions() { + return this._functions; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } +}; + +cntk.Function = class { + + constructor(name, nodes, inputs, outputs) { + this._name = name; + this._inputs = inputs; + this._outputs = outputs; + this._nodes = nodes; + } + + get name() { + return this._name; + } + + get description() { + return ''; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +cntk.Parameter = class { + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +cntk.Argument = class { + + constructor(version, obj) { + if (typeof obj === 'string') { + this._name = obj; + } + else { + switch (version) { + case 1: + switch (obj.__type__) { + case 'InputValue': + this._name = obj.name; + this._type = new cntk.TensorType(version, obj.precision, obj.sampleLayout); + this._initializer = null; + break; + case 'LearnableParameter': + this._name = obj.name; + this._type = null; + this._initializer = new cntk.Tensor(version, obj); + break; + } + break; + case 2: + if (obj.value) { + this._name = obj.name || obj.uid; + this._type = null; + this._initializer = new cntk.Tensor(version, obj); + } + else { + this._name = obj.uid; + this._type = new cntk.TensorType(version, obj.data_type, obj.shape); + this._initializer = null; + } + break; + } + } + } + + get name() { + return this._name; + } + + get type() { + if (this._type) { + return this._type; + } + if (this._initializer) { + return this._initializer.type; + } + return null; + } + + get description() { + return ''; + } + + get initializer() { + return this._initializer; + } +}; + +cntk.Node = class { + + constructor(metadata, version, obj, args) { + + this._metadata = metadata; + this._attributes = []; + this._inputs = []; + this._outputs = []; + + let inputs = []; + let outputs = []; + const initializers = []; + + switch (version) { + case 1: { + this._type = obj.__type__; + this._name = obj.name; + for (const attributeName of Object.keys(obj)) { + if (attributeName != '__type__' && attributeName != 'name' && attributeName != 'inputs' && attributeName != 'precision') { + this._attributes.push(new cntk.Attribute(metadata.attribute(this._type, attributeName), attributeName, obj[attributeName])); + } + } + inputs = obj.inputs.map((input) => { + if (args[input]) { + return args[input]; + } + return new cntk.Argument(version, input); + }); + outputs = [ new cntk.Argument(version, this._name) ]; + break; + } + case 2: { + this._name = obj.name || obj.uid || null; + const output = obj.uid; + if (obj.op == 57) { + this._type = 'Block'; + if (obj.block_function_op_name) { + this._type = obj.block_function_op_name; + this._function = true; + } + } + else { + if (!Object.prototype.hasOwnProperty.call(obj, 'op')) { + this._type = obj.type; + if (obj.user_defined_state) { + for (const attributeName of Object.keys(obj.user_defined_state)) { + this._attributes.push(new cntk.Attribute(metadata.attribute(this._type, attributeName), attributeName, obj.user_defined_state[attributeName])); + } + } + } + else { + this._type = this._metadata.name(obj.op); + if (this.type == null) { + this._type = obj.op ? obj.op.toString() : '?'; + } + } + } + if (obj.attributes) { + for (const attributeName of Object.keys(obj.attributes)) { + this._attributes.push(new cntk.Attribute(metadata.attribute(this._type, attributeName), attributeName, obj.attributes[attributeName])); + } + } + for (const input of obj.inputs) { + const argument = args[input]; + if (argument) { + if (argument.initializer) { + initializers.push(argument); + } + else { + inputs.push(argument); + } + } + else { + inputs.push(new cntk.Argument(version, input)); + } + } + outputs.push(new cntk.Argument(version, output + '_Output_0')); + inputs = inputs.concat(initializers); + break; + } + } + + let inputIndex = 0; + const schema = this._metadata.type(this._function ? ('Function:' + this._type) : this._type); + if (schema && schema.inputs) { + for (const inputSchema of schema.inputs) { + if (inputIndex < inputs.length || inputSchema.option != 'optional') { + const inputCount = (inputSchema.option == 'variadic') ? (inputs.length - inputIndex) : 1; + const inputArguments = []; + for (const inputArgument of inputs.slice(inputIndex, inputIndex + inputCount)) { + if (inputArgument.name != '' || inputSchema.option != 'optional') { + inputArguments.push(inputArgument); + } + } + this._inputs.push(new cntk.Parameter(inputSchema.name, inputArguments)); + inputIndex += inputCount; + } + } + } + this._inputs = this._inputs.concat(inputs.slice(inputIndex).map((argument, index) => { + return new cntk.Parameter((inputIndex + index).toString(), [ argument ]); + })); + + let outputIndex = 0; + if (schema && schema.outputs) { + for (const outputSchema of schema.outputs) { + if (outputIndex < outputs.length || outputSchema.option != 'optional') { + const outputCount = (outputSchema.option == 'variadic') ? (outputs.length - outputIndex) : 1; + this._outputs.push(new cntk.Parameter(outputSchema.name, outputs.slice(outputIndex, outputIndex + outputCount))); + outputIndex += outputCount; + } + } + } + this._outputs = this._outputs.concat(outputs.slice(outputIndex).map((argument) => { + return new cntk.Parameter(outputIndex.toString(), [ argument ]); + })); + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get function() { + return this._function || false; + } + + get metadata() { + return this._metadata.type(this._function ? ('Function:' + this._type) : this._type); + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } +}; + +cntk.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + this._value = value; + this._type = null; + if (cntk_v1 && this._value instanceof cntk_v1.TensorShape) { + this._value = new cntk.TensorShape(1, value); + this._type = 'shape'; + } + if (cntk_v2 && this._value instanceof cntk_v2.NDShape) { + this._value = new cntk.TensorShape(2, value); + this._type = 'shape'; + } + if (cntk_v2 && this._value instanceof cntk_v2.Axis) { + const axis = { __type__: 'Axis' }; + for (const key of Object.keys(value).filter((key) => key !== 'name')) { + axis[key] = value[key]; + } + this._value = axis; + } + if (schema) { + if (schema.type) { + this._type = schema.type; + const type = cntk_v1[this._type] || cntk_v2[this._type]; + if (type && type[this._value]) { + this._value = type[this._value]; + } + } + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + let defaultValue = schema.default; + value = this._value; + if (typeof value == 'function') { + value = value(); + } + if (this._type == 'shape') { + value = value.dimensions; + } + if (value == defaultValue) { + this._visible = false; + } + else if (Array.isArray(value) && Array.isArray(defaultValue)) { + defaultValue = defaultValue.slice(0, defaultValue.length); + if (defaultValue.length > 1 && defaultValue[defaultValue.length - 1] == null) { + defaultValue.pop(); + while (defaultValue.length < value.length) { + defaultValue.push(defaultValue[defaultValue.length - 1]); + } + } + if (value.every((item, index) => { return item == defaultValue[index]; })) { + this._visible = false; + } + } + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +cntk.Tensor = class { + + constructor(version, tensor) { + switch (version) { + case 1: + if (tensor.__type__ == 'LearnableParameter') { + this._name = tensor.name || null; + this._type = new cntk.TensorType(version, tensor.precision, tensor.sampleLayout); + } + break; + case 2: + this._name = tensor.name || tensor.uid || null; + this._type = new cntk.TensorType(version, tensor.data_type, tensor.shape); + this._value = tensor.value; + break; + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state || null; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + const context = {}; + context.index = 0; + context.count = 0; + context.state = null; + + if (this._type.dataType == '?') { + context.state = 'Tensor has unknown data type.'; + return context; + } + if (!this._type.shape) { + context.state = 'Tensor has no dimensions.'; + return context; + } + + const value = this._value; + if (!value) { + context.state = 'Tensor data is empty.'; + return context; + } + + switch (this._type.dataType) { + case 'float32': + if (value.float_values && value.float_values.value && value.float_values.value.length > 0) { + context.data = value.float_values.value; + } + else { + context.state = 'Tensor data is empty.'; + } + break; + default: + context.state = 'Tensor data type is not implemented.'; + break; + } + + context.dataType = this._type.dataType; + context.shape = this._type.shape.dimensions; + + return context; + } + + _decode(context, dimension) { + let shape = context.shape; + if (context.shape.length == 0) { + shape = [ 1 ]; + } + const results = []; + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(context.data[context.index++]); + context.count++; + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + if (context.shape.length == 0) { + return results[0]; + } + return results; + } +}; + +cntk.TensorType = class { + + constructor(version, dataType, shape) { + this._dataType = '?'; + switch (version) { + case 1: + switch (dataType) { + case 'float': this._dataType = 'float32'; break; + case 'double': this._dataType = 'float64'; break; + case 'half': this._dataType = 'float16'; break; + case '': this._dataType = 'float32'; break; + } + this._shape = new cntk.TensorShape(version, shape); + break; + case 2: + if (long.Long.isLong(dataType)) { + dataType = dataType.toNumber(); + } + switch (dataType) { + case 1: this._dataType = 'float32'; break; + } + this._shape = new cntk.TensorShape(version, shape); + break; + } + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +cntk.TensorShape = class { + + constructor(version, shape) { + switch (version) { + case 1: + this._dimensions = shape.dims; + break; + case 2: + this._dimensions = shape.shape_dim.map((dimension) => { + if (dimension.low == -1 && dimension.high == -1 && dimension.unsigned == true) { + return -1; + } + if (dimension && long.Long.isLong(dimension)) { + return dimension.toNumber(); + } + return dimension; + }); + break; + } + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return (this._dimensions && this._dimensions.length) ? ('[' + this._dimensions.join(',') + ']') : ''; + } +}; + +cntk.Metadata = class { + + static open(host) { + if (cntk.Metadata._metadata) { + return Promise.resolve(cntk.Metadata._metadata); + } + return host.request(null, 'cntk-metadata.json', 'utf-8').then((data) => { + cntk.Metadata._metadata = new cntk.Metadata(data); + return cntk.Metadata._metadata; + }).catch(() => { + cntk.Metadata._metadata = new cntk.Metadata(null); + return cntk.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + this._attributeCache = {}; + this._typeMap = {}; + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + const name = item.name; + const schema = item.schema; + schema.name = name; + this._map[name] = schema; + if (Object.prototype.hasOwnProperty.call(schema, 'operator')) { + this._typeMap[schema.operator.toString()] = name; + } + } + } + } + } + } + + name(code) { + // cntk/Source/CNTKv2LibraryDll/API/Internals/PrimitiveOpType.h + return this._typeMap[code] || null; + } + + type(name) { + return this._map[name] || null; + } + + attribute(type, name) { + let map = this._attributeCache[type]; + if (!map) { + map = {}; + let schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[type] = map; + } + return map[name] || null; + } +}; + +cntk_v1.ComputationNetwork = class { + + constructor(buffer) { + let reader = new cntk_v1.Reader(buffer); + reader.assert('BCN'); + reader.assert('BVersion'); + this.version = reader.uint64(); + reader.assert('EVersion'); + let numNodes = reader.uint64(); + reader.assert('BNodeList'); + let op = {}; + op.Minus = function() {}; + op.Plus = function() {}; + op.GreaterEqual = function() {}; + op.Equal = function() {}; + op.NotEqual = function() {}; + op.GreaterEqual = function() {}; + op.Exp = function() {}; + op.Log = function() {}; + op.Reciprocal = function() {}; + op.ElementTimes = function() {}; + op.ClassificationError = function() {}; + op.RectifiedLinear = function() {}; + op.InputValue = function(reader, version) { + this.rows = reader.uint64(); + this.cols = reader.uint64(); + this.sampleLayout = new cntk_v1.TensorShape(reader, true); + this.dynamicAxisNodeName = ''; + if (version >= 8) { + let nrAxes = reader.uint32(); + if (nrAxes == 1) { + this.dynamicAxisNodeName = reader.string(); + } + } + this.learningRateMultiplier = 0; + if (version >= 10) { + this.learningRateMultiplier = reader.float32(); + } + }; + op.LearnableParameter = function(reader, version) { + if (version >= 3) { + this.learningRateMultiplier = reader.float32(); + this.sampleLayout = new cntk_v1.TensorShape(reader); + } + else { + throw new cntk.Error('LeanableParameter reader implemented.'); + } + this.value = new cntk_v1.Matrix(reader); + }; + op.CrossEntropyWithSoftmax = function(reader) { + this.evalMode = reader.uint32(); + if (this.evalMode > 2) { + this.evalMode = 0; + reader.skip(-4); + } + }; + op.Times = function(reader, version) { + this.outputRank = (version >= 3) ? reader.uint64() : 1; + this.inferInputRankToMap = (version >= 12) ? reader.int32() : -1; + }; + op.Dropout = function(reader, version) { + if (version >= 16) { + this.rngSeed = (version == 16) ? reader.uint32() : reader.uint64(); + this.rngOffset = reader.uint64(); + } + }; + op.ConvolutionBase = function(reader, version) { + if (version >= 5) { + this.kernelShape = new cntk_v1.TensorShape(reader); + this.mapCount = new cntk_v1.TensorShape(reader); + this.strides = new cntk_v1.TensorShape(reader); + this.sharing = reader.booleans(reader.uint64()); + this.autoPadding = reader.booleans(reader.uint64()); + this.lowerPad = new cntk_v1.TensorShape(reader); + this.upperPad = new cntk_v1.TensorShape(reader); + this.poolKind = reader.enum(); + this.imageLayoutKind = reader.enum(); + this.maxTempMemSizeInSamples = reader.uint64(); + } + if (version >= 9) { + this.transpose = reader.boolean(); + } + if (version >= 20) { + this.outputShape = new cntk_v1.TensorShape(reader); + } + if (version >= 21) { + this.ceilOutDim = reader.boolean(); + } + if (version >= 23) { + this.includePad = reader.boolean(); + } + }; + op.Convolution = function(reader, version) { + op.ConvolutionBase.apply(this, [ reader, version ]); + if (version < 5) { + this.kernelShape = new cntk_v1.TensorShape([ reader.uint64(), reader.uint64(), 1 ]); + this.strides = new cntk_v1.TensorShape([ reader.uint64(), reader.uint64(), 1 ]); + this.mapCount = new cntk_v1.TensorShape([ reader.uint32() ]); + this.imageLayoutKind = reader.enum(); + this.autoPadding = [ reader.boolean() ]; + this.maxTempMemSizeInSamples = reader.uint64(); + this.poolKind = 'None'; + this.convolution2D = true; + this.sharing = [ true ]; + this.lowerPad = new cntk_v1.TensorShape([ 0 ]); + this.upperPad = new cntk_v1.TensorShape([ 0 ]); + } + else { + this.convolution2D = reader.boolean(); + if (version >= 18) { + this.dilation = new cntk_v1.TensorShape(reader); + } + else { + this.dilation = new cntk_v1.TensorShape([ 1 ]); + } + } + }; + op.Pooling = function(reader, version) { + op.ConvolutionBase.apply(this, [ reader, version ]); + }; + op.PoolingBase = function(reader) { + this.imageLayoutKind = reader.enum(); + this.windowWidth = reader.uint32(); + this.windowHeight = reader.uint64(); + this.horizontalSubsample = reader.uint64(); + this.verticalSubsample = reader.uint64(); + }; + op.MaxPooling = function(reader, version) { + op.PoolingBase.apply(this, [ reader, version ]); + }; + op.ROIPooling = function(reader, version) { + this.roiOutputShape = new cntk_v1.TensorShape(reader); + this.poolKind = (version < 26) ? 'Max' : reader.enum(); + this.spatialScale = (version < 26) ? 0.0625 : reader.float64(); + }; + op.Reshape = function(reader) { + this.beginDimParameter = reader.uint32(); + this.endDimParameter = reader.uint32(); + this.replacementSampleLayout = new cntk_v1.TensorShape(reader); + }; + op.ReduceElements = function(reader, version) { + let num_axes = 1; + if (version >= 27) { + num_axes = reader.uint32(); + } + this.axes = []; + for (let i = 0; i < num_axes; i++) { + this.axes.push(reader.uint32()); + } + this.operation = reader.string(); + if (version >= 24) { + this.keepDimensions = reader.boolean(); + } + }; + op.BatchNormalization = function(reader, version) { + let mbCount = 0; + if (version >= 6) { + this.spatial = reader.boolean(); + this.normalizationTimeConstant = reader.float64(); + this.blendTimeConstant = reader.float64(); + this.imageLayoutKind = reader.enum(); + if (version >= 13) { + if (version != 19) { + this.runCountUntied = reader.uint64(); + } + else { + this.runCountUntied = reader.boolean() ? 0 : 'SIZE_MAX'; // TODO + } + } + else { + mbCount = reader.uint64(); + } + this.epsilon = reader.float64(); + this.useCntkEngine = reader.boolean(); + } + else { + let verWritten = reader.int32(); + let verReadable = reader.int32(); + if (verReadable > verWritten || verWritten < 0x00010001 || verReadable > 0x00010004) { + throw new cntk.Error('BatchNormalization version not supported.'); + } + this.eval = reader.boolean(); + this.spatial = reader.boolean(); + if (verWritten >= 0x00010004) { + this.normalizationTimeConstant = reader.float64(); + } + else { + reader.float64(); // expAvgFactor + } + if (verWritten >= 0x00010002) { + this.imageLayoutKind = reader.enum(); + mbCount = reader.uint64(); + } + if (verWritten >= 0x00010003) { + this.epsilon = reader.float64(); + this.useCntkEngine = reader.boolean(); + } + } + if (version < 13) { + this.runCountUntied = 16 * mbCount; + this.convertRunningVariancePending = true; + } + }; + op.Tanh = function() {}; + op.Sigmoid = function() {}; + op.Logistic = function() {}; + op.SquareError = function() {}; + op.ErrorPrediction = function() {}; + op.RowStack = function(reader, version) { + this.spliceDim = (version >= 3) ? reader.int32() : 1; + }; + op.Slice = function(reader, version) { + let num = 1; + if (version >= 22) { + num = reader.int32(); + } + this.index = []; + this.axis = []; + this.strideMultiplier = []; + for (let i = 0; i < num; i++) { + this.index.push([ [ reader.uint64(), reader.uint64() ] ]); + if (version >= 3) { + this.axis.push(reader.int32()); + } + if (version >= 27) { + this.strideMultiplier.push(reader.int32()); + } + } + }; + op.PastValue = function(reader, version) { + this.timeStep = reader.int32(); + if (version > 3) { + this.sampleLayout = new cntk_v1.TensorShape(reader, false); + } + else { + let rows = reader.uint64(); + reader.uint64(); + this.sampleLayout = new cntk_v1.TensorShape([ rows ], true); + } + if (version >= 2) { + this.initialStateValue = reader.int32(); + } + }; + op.FutureValue = function(reader, version) { + this.timeStep = reader.int32(); + if (version > 3) { + this.sampleLayout = new cntk_v1.TensorShape(reader, false); + } + else { + let rows = reader.uint64(); + reader.uint64(); + this.sampleLayout = new cntk_v1.TensorShape([ rows ], true); + } + if (version >= 2) { + this.initialStateValue = reader.int32(); + } + }; + op.TransposeDimensions = function(reader, version) { + if (version >= 3) { + this.axis1 = reader.int32(); + this.axis2 = reader.int32(); + if (version >= 25 && this.axis1 == 0 && this.axis2 == 0) { + let size = reader.uint64(); + this.perm = []; + for (let i = 0; i < size; i++) { + this.perm.push(reader.uint64()); + } + } + } + else { + this.axis1 = 1; + this.axis2 = 2; + } + }; + op.AveragePooling = function(reader, version) { + op.PoolingBase.apply(this, [ reader, version ]); + }; + op.InvStdDev = function(reader) { + this.hasComputed = reader.boolean(); + this.value = new cntk_v1.Matrix(reader); + }; + op.Mean = function(reader) { + this.hasComputed = reader.boolean(); + this.value = new cntk_v1.Matrix(reader); + }; + op.PerDimMeanVarNormalization = function() {}; + op.Softmax = function() {}; + op.DynamicAxis = function() {}; + + let nodes = []; + this.nodes = {}; + for (let i = 0; i < numNodes; i++) { + const precision = this.version >= 7 ? reader.string() : ''; + if (precision != 'float' && precision != 'double' && precision != 'half' && precision != '') { + throw new cntk.Error("Invalid precision format '" + precision + "'."); + } + let obj = { __type__: reader.string() }; + obj.name = reader.string(); + obj.precision = precision; + const constructor = op[obj.__type__]; + if (!constructor) { + throw new cntk.Error("Unknown node type '" + obj.__type__ + "'."); + } + constructor.apply(obj, [ reader, this.version ]); + nodes.push(obj); + this.nodes[obj.name] = obj; + } + reader.assert('ENodeList'); + reader.assert('BRelation'); + for (let j = 0; j < numNodes; j++) { + const nodeName = reader.string(); + const node = this.nodes[nodeName]; + const numChildren = reader.uint64(); + let children = []; + for (let k = 0; k < numChildren; k++) { + children.push(reader.string()); + } + if (this.version < 19 && node.__type__ == 'BatchNormalization') { + const runSampleCount = { + __type__: 'LearnableParameter', + name: nodeName + '.run_sample_count', + precision: node.precision, + sampleLayout: new cntk_v1.TensorShape([ 1 ]), // TODO set value = 0 + learningRateMultiplier: 0 + }; + nodes.push(runSampleCount); + this.nodes[runSampleCount.name] = runSampleCount; + children.push(runSampleCount.name); + } + if (node.__type__ == 'Convolution' && children.length > 1) { + children.splice(0, 0, children.pop()); + } + node.inputs = children; + } + reader.assert('ERelation'); + reader.assert('BRootNodes'); + if (reader.match('BFeatureNodes')) { + this.feature = reader.strings(reader.uint64()); + reader.assert('EFeatureNodes'); + } + if (reader.match('BLabelNodes')) { + this.label = reader.strings(reader.uint64()); + reader.assert('ELabelNodes'); + } + if (reader.match('BCriterionNodes')) { + this.criterion = reader.strings(reader.uint64()); + reader.assert('ECriterionNodes'); + } + if (this.criterion.length == 0) { + if (reader.match('BCriteriaNodes')) { + this.criterion = reader.strings(reader.uint64()); + reader.assert('ECriteriaNodes'); + } + } + if (reader.match('BNodesReqMultiSeqHandling')) { + reader.strings(reader.uint64()); + reader.assert('ENodesReqMultiSeqHandling'); + } + if (reader.match('BEvalNodes')) { + this.eval = reader.strings(reader.uint64()); + reader.assert('EEvalNodes'); + } + if (reader.match('BOutputNodes')) { + this.output = reader.strings(reader.uint64()); + reader.assert('EOutputNodes'); + } + if (reader.match('BPairNodes')) { + this.pair = reader.strings(reader.uint64()); + reader.assert('EPairNodes'); + } + reader.assert('ERootNodes'); + reader.assert('ECN'); + } +}; + +cntk_v1.Reader = class { + + constructor(buffer) { + this._buffer = buffer; + this._dataView = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._position = 0; + } + + match(text) { + let position = this._position; + for (let i = 0; i < text.length; i++) { + if (this.uint16() != text.charCodeAt(i)) { + this._position = position; + return false; + } + } + if (this.uint16() != 0) { + this._position = position; + return false; + } + return true; + } + + assert(text) { + if (!this.match(text)) { + throw new cntk_v1.Error("Invalid '" + text + "' signature."); + } + } + + skip(offset) { + this._position += offset; + if (this._position > this._buffer.length) { + throw new cntk.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + boolean() { + return this.byte() != 0 ? true : false; + } + + booleans(count) { + let array = []; + for (let i = 0; i < count; i++) { + array.push(this.boolean()); + } + return array; + } + + byte() { + const position = this._position; + this.skip(1); + return this._dataView.getUint8(position); + } + + bytes(length) { + const position = this._position; + this.skip(length); + return this._buffer.subarray(position, this._position); + } + + uint16() { + const position = this._position; + this.skip(2); + return this._dataView.getUint16(position, true); + } + + int32() { + const position = this._position; + this.skip(4); + return this._dataView.getInt32(position, true); + } + + uint32() { + const position = this._position; + this.skip(4); + return this._dataView.getUint32(position, true); + } + + uint64() { + const low = this.uint32(); + const hi = this.uint32(); + if (hi > 65536) { + throw new cntk_v1.Error('Value not in 48-bit range.'); + } + return (hi << 32) | low; + } + + float32() { + const position = this._position; + this.skip(4); + return this._dataView.getFloat32(position, true); + } + + float64() { + const position = this._position; + this.skip(8); + return this._dataView.getFloat64(position, true); + } + + string() { + let text = ''; + let c = this.uint16(); + while (c != 0) { + text += String.fromCharCode(c); + c = this.uint16(); + } + return text; + } + + strings(count) { + let array = []; + for (let i = 0; i < count; i++) { + array.push(this.string()); + } + return array; + } + + enum() { + return this.int32(); + } +}; + +cntk_v1.TensorShape = class { + + constructor(reader, acceptLegacyFormat = false) { + if (reader && Array.isArray(reader)) { + this.dims = reader; + return; + } + this.dims = []; + let rank = reader.uint32(); + let dim0 = 0; + if (rank > 0) { + dim0 = reader.uint32(); + } + if (!acceptLegacyFormat || dim0 != 0) { + if (rank > 0) { + this.dims.push(dim0); + } + for (let i = 1; i < rank; i++) { + this.dims.push(reader.uint32()); + } + } + else { + let dim = reader.uint32(); + this.dims.push(reader.uint32()); + this.dims.push(rank); + this.dims.push(dim); + } + } +}; + +cntk_v1.Matrix = class { + + constructor(reader) { + let type = reader.byte(); + switch (type) { + case 100: { + // dense + reader.assert('BMAT'); + const elsize = reader.uint64(); + this.name = reader.string(); + this.format = reader.uint32(); + this.rows = reader.uint64(); + this.columns = reader.uint64(); + reader.bytes(elsize * this.rows * this.columns); + reader.assert('EMAT'); + break; + } + case 115: // sparse + throw new cntk_v1.Error('Matrix sparse type not implemented.'); + default: + throw new cntk_v1.Error("Matrix type '" + type.toString() + "' not implemented."); + } + } +}; + +cntk_v1.ImageLayoutKind = { + 0: 'CHW', + 1: 'HWC' +}; + +cntk_v1.PoolKind = { + 0: 'None', + 1: 'Max', + 2: 'Average' +}; + +cntk_v1.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading CNTK v1 model.'; + } +}; + +cntk.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading CNTK model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = cntk.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/coreml-metadata.json b/frontend/packages/core/public/netron/coreml-metadata.json new file mode 100644 index 00000000..4f996648 --- /dev/null +++ b/frontend/packages/core/public/netron/coreml-metadata.json @@ -0,0 +1,513 @@ +[ + { + "name": "convolution", + "schema": { + "category": "Layer", + "description": "A layer that performs spatial convolution or deconvolution.", + "attributes": [ + { "name": "outputShape", "type": "uint64[]", "description": "Either None or a 2-tuple, specifying the output shape (output_height, output_width). Used only when is_deconv == True. When is_deconv == False, this parameter is ignored. If it is None, the output shape is calculated automatically using the border_mode. Kindly refer to NeuralNetwork.proto for details.", "visible": false }, + { "name": "outputChannels", "type": "uint64", "description": "The number of kernels. Same as ``C_out`` used in the layer description.", "visible": false }, + { "name": "kernelChannels", "type": "uint64", "description": "Channel dimension of the kernels. Must be equal to ``inputChannels / nGroups``, if isDeconvolution == False. Must be equal to ``inputChannels``, if isDeconvolution == True.", "visible": false }, + { "name": "nGroups", "type": "uint64", "description": "Group convolution, i.e. weight reuse along channel axis. Input and kernels are divided into g groups and convolution / deconvolution is applied within the groups independently. If not set or 0, it is set to the default value 1.", "default": 1 }, + { "name": "isDeconvolution", "type": "boolean", "description": "Flag to specify whether it is a deconvolution layer." }, + { "name": "valid", "type": "ValidPadding", "visible": false }, + { "name": "same", "type": "SamePadding", "visible": false }, + { "name": "dilationFactor", "type": "uint64[]", "default": [ 1, 1 ] }, + { "name": "stride", "type": "uint64[]", "default": [ 1, 1 ] }, + { "name": "kernelSize", "type": "uint64[]", "default": [ 3, 3 ] }, + { "name": "hasBias", "type": "boolean", "description": "Flag to specify whether a bias is to be added or not.", "visible": false } + ] + } + }, + { + "name": "innerProduct", + "schema": { + "category": "Layer", + "description": "A layer that performs a matrix vector product. This is equivalent to a fully-connected, or dense layer.", + "attributes": [ + { "name": "inputChannels", "type": "uint64", "visible": false }, + { "name": "outputChannels", "type": "uint64", "visible": false }, + { "name": "hasBias", "type": "boolean", "visible": false } + ] + } + }, + { + "name": "gru", + "schema": { + "category": "Layer", + "description": "Gated-Recurrent Unit (GRU) Layer", + "inputs": [ + { "name": "input" }, + { "name": "h" }, + { "name": "updateGateWeightMatrix", "visible": false }, + { "name": "resetGateWeightMatrix", "visible": false }, + { "name": "outputGateWeightMatrix", "visible": false }, + { "name": "updateGateRecursionMatrix", "visible": false }, + { "name": "resetGateRecursionMatrix", "visible": false }, + { "name": "outputGateRecursionMatrix", "visible": false }, + { "name": "updateGateBiasVector", "visible": false }, + { "name": "resetGateBiasVector", "visible": false }, + { "name": "outputGateBiasVector", "visible": false } + ], + "outputs": [ + { "name": "output" }, + { "name": "h" } + ] + } + }, + { + "name": "uniDirectionalLSTM", + "schema": { + "category": "Layer", + "description": "A unidirectional long short-term memory (LSTM) layer.", + "inputs": [ + { "name": "input" }, + { "name": "h" }, + { "name": "c" }, + { "name": "inputGateWeightMatrix", "visible": false }, + { "name": "forgetGateWeightMatrix", "visible": false }, + { "name": "blockInputWeightMatrix", "visible": false }, + { "name": "outputGateWeightMatrix", "visible": false }, + { "name": "inputGateRecursionMatrix", "visible": false }, + { "name": "forgetGateRecursionMatrix", "visible": false }, + { "name": "blockInputRecursionMatrix", "visible": false }, + { "name": "outputGateRecursionMatrix", "visible": false }, + { "name": "inputGateBiasVector", "visible": false }, + { "name": "forgetGateBiasVector", "visible": false }, + { "name": "blockInputBiasVector", "visible": false }, + { "name": "outputGateBiasVector", "visible": false } + ], + "outputs": [ + { "name": "output" }, + { "name": "h" }, + { "name": "c" } + ] + } + }, + { + "name": "biDirectionalLSTM", + "schema": { + "category": "Layer", + "description": "Bidirectional long short-term memory (LSTM) layer. The first LSTM operates on the input sequence in the forward direction. The second LSTM operates on the input sequence in the reverse direction.", + "inputs": [ + { "name": "input" }, + { "name": "h" }, + { "name": "c" }, + { "name": "h_rev" }, + { "name": "c_rev" }, + { "name": "inputGateWeightMatrix", "visible": false }, + { "name": "forgetGateWeightMatrix", "visible": false }, + { "name": "blockInputWeightMatrix", "visible": false }, + { "name": "outputGateWeightMatrix", "visible": false }, + { "name": "inputGateRecursionMatrix", "visible": false }, + { "name": "forgetGateRecursionMatrix", "visible": false }, + { "name": "blockInputRecursionMatrix", "visible": false }, + { "name": "outputGateRecursionMatrix", "visible": false }, + { "name": "inputGateBiasVector", "visible": false }, + { "name": "forgetGateBiasVector", "visible": false }, + { "name": "blockInputBiasVector", "visible": false }, + { "name": "outputGateBiasVector", "visible": false }, + { "name": "inputGateWeightMatrix_rev", "visible": false }, + { "name": "forgetGateWeightMatrix_rev", "visible": false }, + { "name": "blockInputWeightMatrix_rev", "visible": false }, + { "name": "outputGateWeightMatrix_rev", "visible": false }, + { "name": "inputGateRecursionMatrix_rev", "visible": false }, + { "name": "forgetGateRecursionMatrix_rev", "visible": false }, + { "name": "blockInputRecursionMatrix_rev", "visible": false }, + { "name": "outputGateRecursionMatrix_rev", "visible": false }, + { "name": "inputGateBiasVector_rev", "visible": false }, + { "name": "forgetGateBiasVector_rev", "visible": false }, + { "name": "blockInputBiasVector_rev", "visible": false }, + { "name": "outputGateBiasVector_rev", "visible": false } + ], + "outputs": [ + { "name": "output" }, + { "name": "h" }, + { "name": "c" }, + { "name": "h_rev" }, + { "name": "c_rev" } + ] + } + }, + { + "name": "bias", + "schema": { + "category": "Layer", + "description": "A layer that performs elementwise addition of a bias, which is broadcasted to match the input shape." + } + }, + { + "name": "activation", + "schema": { + "category": "Activation", + "description": "Applies specified type of activation function to input." + } + }, + { + "name": "softmax", + "schema": { + "category": "Activation", + "description": "A layer that performs softmax normalization. Normalization is done along the channel axis." + } + }, + { + "name": "batchnorm", + "schema": { + "category": "Normalization", + "description": "A layer that performs batch normalization, which is performed along the channel axis, and repeated along the other axes, if present.", + "attributes": [ + { "name": "epsilon", "default": 0.000009999999747378752 }, + { "name": "computeMeanVar", "visible": false }, + { "name": "instanceNormalization", "visible": false } + ] + } + }, + { + "name": "l2normalize", + "schema": { + "category": "Normalization", + "description": "A layer that performs L2 normalization, i.e. divides by the the square root of the sum of squares of all elements of input." + } + }, + { + "name": "lrn", + "schema": { + "category": "Normalization", + "description": "A layer that performs local response normalization (LRN).", + "attributes": [ + { "name": "k", "default": 1 } + ] + } + }, + { + "name": "pooling", + "schema": { + "category": "Pool", + "description": "Spatial Pooling layer to reduce dimensions of input using the specified kernel size and type.", + "attributes": [ + { "name": "includeLastPixel", "type": "ValidCompletePadding", "visible": false }, + { "name": "same", "type": "SamePadding", "visible": false }, + { "name": "valid", "type": "ValidCompletePadding", "visible": false }, + { "name": "type", "type": "PoolingLayerParams.PoolingType" }, + { "name": "globalPooling", "type": "boolean", "default": false }, + { "name": "stride", "type": "uint64", "default": [ 1, 1 ] }, + { "name": "kernelSize", "type": "uint64[]", "default": [ 3, 3 ] }, + { "name": "avgPoolExcludePadding", "type": "boolean", "default": false } + ] + } + }, + { + "name": "permute", + "schema": { + "category": "Shape", + "description": "A layer that rearranges the dimensions and data of an input." + } + }, + { + "name": "reduce", + "schema": { + "description": "A layer that reduces the input using a specified operation." + } + }, + { + "name": "gelu", + "schema": { + "category": "Activation", + "description": "Gaussian error linear unit activation.", + "attributes": [ + { "name": "mode", "type": "GeluLayerParams.GeluMode" } + ] + } + }, + { + "name": "softmaxND", + "schema": { + "category": "Activation", + "description": "A layer that performs softmax normalization along a specified axis." + } + }, + { + "name": "flatten", + "schema": { + "category": "Shape", + "description": "A layer that flattens the input.", + "attributes": [ + { "name": "mode", "type": "FlattenLayerParams.FlattenOrder" } + ] + } + }, + { + "name": "reshape", + "schema": { + "category": "Shape", + "description": "A layer that recasts the input into a new shape." + } + }, + { + "name": "reorganizeData", + "schema": { + "category": "Shape", + "description": "A layer that reorganizes data in the input in: 1. SPACE_TO_DEPTH, 2. DEPTH_TO_SPACE." + } + }, + { + "name": "padding", + "schema": { + "category": "Shape", + "description": "Fill a constant value in the padded region.", + "attributes": [ + { "name": "paddingAmounts", "visible": false } + ] + } + }, + { + "name": "crop", + "schema": { + "category": "Data", + "description": "A layer that crops the spatial dimensions of an input. If two inputs are provided, the shape of the second input is used as the reference shape.", + "inputs": [ + { "name": "x1" }, + { "name": "x2" } + ], + "outputs": [ + { "name": "y" } + ] + } + }, + { + "name": "sequenceRepeat", + "schema": { + "category": "Shape", + "description": "A layer that repeats a sequence." + } + }, + { + "name": "concat", + "schema": { + "category": "Tensor", + "description": "A layer that concatenates along the channel axis (default) or sequence axis.", + "inputs": [ + { "name": "inputs", "option": "variadic" } + ] + } + }, + { + "name": "add", + "schema": { + "description": "A layer that performs elementwise addition.", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + } + }, + { + "name": "multiply", + "schema": { + "description": "A layer that performs elementwise multiplication.", + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + } + }, + { + "name": "max", + "schema": { + "description": "A layer that computes the elementwise maximum over the inputs." + } + }, + { + "name": "min", + "schema": { + "description": "A layer that computes the elementwise minimum over the inputs." + } + }, + { + "name": "average", + "schema": { + "description": "A layer that computes the elementwise average of the inputs." + } + }, + { + "name": "unary", + "schema": { + "description": "A layer that applies a unary function.", + "attributes": [ + { "name": "type", "type": "UnaryFunctionLayerParams.Operation" }, + { "name": "alpha", "default": 1.0 }, + { "name": "scale", "default": 1.0 }, + { "name": "epsilon", "default": 9.999999974752427e-7 } + ], + "inputs": [ + { "name": "x" } + ], + "outputs": [ + { "name": "z" } + ] + } + }, + { + "name": "mvn", + "schema": { + "description": "Fill a constant value in the padded region." + } + }, + { + "name": "dot", + "schema": { + "description": "If true, inputs are normalized first, thereby computing the cosine similarity." + } + }, + { + "name": "scale", + "schema": { + "category": "Layer", + "description": "A layer that performs elmentwise multiplication by a scale factor and optionally adds a bias.", + "attributes": [ + { "name": "hasBias", "type": "boolean", "visible": false } + ] + } + }, + { + "name": "upsample", + "schema": { + "category": "Data", + "description": "A layer that scales up spatial dimensions. It supports two modes: nearest neighbour (default) and bilinear." + } + }, + { + "name": "slice", + "schema": { + "description": "A layer that slices the input data along a given axis." + } + }, + { + "name": "slice", + "schema": { + "description": "A layer that uniformly splits across the channel dimension to produce a specified number of outputs." + } + }, + { + "name": "embedding", + "schema": { + "category": "Transform", + "description": "A layer that performs a matrix lookup and optionally adds a bias." + } + }, + { + "name": "featureVectorizer", + "schema": { + "inputs": [ + { "name": "inputs", "option": "variadic" } + ] + } + }, + { + "name": "loadConstant", + "schema": { + "category": "Data" + } + }, + { + "name": "stringClassLabels", + "schema": { + "category": "Data", + "outputs": [ + { "name": "probabilities" }, + { "name": "feature" } + ] + } + }, + { + "name": "int64ClassLabels", + "schema": { + "category": "Data", + "outputs": [ + { "name": "probabilities" }, + { "name": "feature" } + ] + } + }, + { + "name": "scaler", + "schema": { + "category": "Data" + } + }, + { + "name": "wordTagger", + "schema": { + "attributes": [ + { "name": "revision", "visible": false } + ], + "outputs": [ + { "name": "tokens" }, + { "name": "tags" }, + { "name": "locations" }, + { "name": "lengths" } + ] + } + }, + { + "name": "textClassifier", + "schema": { + "attributes": [ + { "name": "revision", "visible": false } + ] + } + }, + { + "name": "nonMaximumSuppression", + "schema": { + "attributes": [ + { "name": "iouThreshold" }, + { "name": "confidenceThreshold" } + ], + "inputs": [ + { "name": "confidence" }, + { "name": "coordinates" }, + { "name": "iouThreshold" }, + { "name": "confidenceThreshold" } + ], + "outputs": [ + { "name": "confidence" }, + { "name": "coordinates" } + ] + } + }, + { + "name": "squeeze", + "schema": { + "category": "Transform" + } + }, + { + "name": "mvn", + "schema": { + "category": "Normalization", + "description": "A layer that performs mean variance normalization, along axis = -3." + } + }, + { + "name": "itemSimilarityRecommender", + "schema": { + "inputs": [ + { "name": "item" }, + { "name": "numRecommendations" }, + { "name": "itemRestriction" }, + { "name": "itemExclusion" } + ], + "outputs": [ + { "name": "recommendedItemList" }, + { "name": "recommendedItemScore" } + ] + } + } +] diff --git a/frontend/packages/core/public/netron/coreml-proto.js b/frontend/packages/core/public/netron/coreml-proto.js new file mode 100644 index 00000000..2bf13735 --- /dev/null +++ b/frontend/packages/core/public/netron/coreml-proto.js @@ -0,0 +1,12924 @@ +/*eslint-disable block-scoped-var, id-length, no-control-regex, no-magic-numbers, no-prototype-builtins, no-redeclare, no-shadow, no-var, sort-vars*/ +(function($protobuf) { + "use strict"; + + var $Reader = $protobuf.Reader, $util = $protobuf.util; + + var $root = $protobuf.roots.coreml || ($protobuf.roots.coreml = {}); + + $root.CoreML = (function() { + + var CoreML = {}; + + CoreML.Specification = (function() { + + var Specification = {}; + + Specification.Pipeline = (function() { + + function Pipeline(properties) { + this.models = []; + this.names = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Pipeline.prototype.models = $util.emptyArray; + Pipeline.prototype.names = $util.emptyArray; + + Pipeline.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Pipeline(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.models && message.models.length)) + message.models = []; + message.models.push($root.CoreML.Specification.Model.decode(reader, reader.uint32())); + break; + case 2: + if (!(message.names && message.names.length)) + message.names = []; + message.names.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Pipeline; + })(); + + Specification.PipelineClassifier = (function() { + + function PipelineClassifier(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PipelineClassifier.prototype.pipeline = null; + + PipelineClassifier.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.PipelineClassifier(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pipeline = $root.CoreML.Specification.Pipeline.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return PipelineClassifier; + })(); + + Specification.PipelineRegressor = (function() { + + function PipelineRegressor(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PipelineRegressor.prototype.pipeline = null; + + PipelineRegressor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.PipelineRegressor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pipeline = $root.CoreML.Specification.Pipeline.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return PipelineRegressor; + })(); + + Specification.FeatureDescription = (function() { + + function FeatureDescription(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FeatureDescription.prototype.name = ""; + FeatureDescription.prototype.shortDescription = ""; + FeatureDescription.prototype.type = null; + + FeatureDescription.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FeatureDescription(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.shortDescription = reader.string(); + break; + case 3: + message.type = $root.CoreML.Specification.FeatureType.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FeatureDescription; + })(); + + Specification.Metadata = (function() { + + function Metadata(properties) { + this.userDefined = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Metadata.prototype.shortDescription = ""; + Metadata.prototype.versionString = ""; + Metadata.prototype.author = ""; + Metadata.prototype.license = ""; + Metadata.prototype.userDefined = $util.emptyObject; + + Metadata.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Metadata(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.shortDescription = reader.string(); + break; + case 2: + message.versionString = reader.string(); + break; + case 3: + message.author = reader.string(); + break; + case 4: + message.license = reader.string(); + break; + case 100: + reader.skip().pos++; + if (message.userDefined === $util.emptyObject) + message.userDefined = {}; + key = reader.string(); + reader.pos++; + message.userDefined[key] = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Metadata; + })(); + + Specification.ModelDescription = (function() { + + function ModelDescription(properties) { + this.input = []; + this.output = []; + this.trainingInput = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ModelDescription.prototype.input = $util.emptyArray; + ModelDescription.prototype.output = $util.emptyArray; + ModelDescription.prototype.predictedFeatureName = ""; + ModelDescription.prototype.predictedProbabilitiesName = ""; + ModelDescription.prototype.trainingInput = $util.emptyArray; + ModelDescription.prototype.metadata = null; + + ModelDescription.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ModelDescription(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.input && message.input.length)) + message.input = []; + message.input.push($root.CoreML.Specification.FeatureDescription.decode(reader, reader.uint32())); + break; + case 10: + if (!(message.output && message.output.length)) + message.output = []; + message.output.push($root.CoreML.Specification.FeatureDescription.decode(reader, reader.uint32())); + break; + case 11: + message.predictedFeatureName = reader.string(); + break; + case 12: + message.predictedProbabilitiesName = reader.string(); + break; + case 50: + if (!(message.trainingInput && message.trainingInput.length)) + message.trainingInput = []; + message.trainingInput.push($root.CoreML.Specification.FeatureDescription.decode(reader, reader.uint32())); + break; + case 100: + message.metadata = $root.CoreML.Specification.Metadata.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ModelDescription; + })(); + + Specification.Model = (function() { + + function Model(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Model.prototype.specificationVersion = 0; + Model.prototype.description = null; + Model.prototype.isUpdatable = false; + Model.prototype.pipelineClassifier = null; + Model.prototype.pipelineRegressor = null; + Model.prototype.pipeline = null; + Model.prototype.glmRegressor = null; + Model.prototype.supportVectorRegressor = null; + Model.prototype.treeEnsembleRegressor = null; + Model.prototype.neuralNetworkRegressor = null; + Model.prototype.bayesianProbitRegressor = null; + Model.prototype.glmClassifier = null; + Model.prototype.supportVectorClassifier = null; + Model.prototype.treeEnsembleClassifier = null; + Model.prototype.neuralNetworkClassifier = null; + Model.prototype.kNearestNeighborsClassifier = null; + Model.prototype.neuralNetwork = null; + Model.prototype.itemSimilarityRecommender = null; + Model.prototype.customModel = null; + Model.prototype.linkedModel = null; + Model.prototype.oneHotEncoder = null; + Model.prototype.imputer = null; + Model.prototype.featureVectorizer = null; + Model.prototype.dictVectorizer = null; + Model.prototype.scaler = null; + Model.prototype.categoricalMapping = null; + Model.prototype.normalizer = null; + Model.prototype.arrayFeatureExtractor = null; + Model.prototype.nonMaximumSuppression = null; + Model.prototype.identity = null; + Model.prototype.textClassifier = null; + Model.prototype.wordTagger = null; + Model.prototype.visionFeaturePrint = null; + Model.prototype.soundAnalysisPreprocessing = null; + Model.prototype.gazetteer = null; + Model.prototype.wordEmbedding = null; + + var $oneOfFields; + + Object.defineProperty(Model.prototype, "Type", { + get: $util.oneOfGetter($oneOfFields = ["pipelineClassifier", "pipelineRegressor", "pipeline", "glmRegressor", "supportVectorRegressor", "treeEnsembleRegressor", "neuralNetworkRegressor", "bayesianProbitRegressor", "glmClassifier", "supportVectorClassifier", "treeEnsembleClassifier", "neuralNetworkClassifier", "kNearestNeighborsClassifier", "neuralNetwork", "itemSimilarityRecommender", "customModel", "linkedModel", "oneHotEncoder", "imputer", "featureVectorizer", "dictVectorizer", "scaler", "categoricalMapping", "normalizer", "arrayFeatureExtractor", "nonMaximumSuppression", "identity", "textClassifier", "wordTagger", "visionFeaturePrint", "soundAnalysisPreprocessing", "gazetteer", "wordEmbedding"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Model.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Model(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.specificationVersion = reader.int32(); + break; + case 2: + message.description = $root.CoreML.Specification.ModelDescription.decode(reader, reader.uint32()); + break; + case 10: + message.isUpdatable = reader.bool(); + break; + case 200: + message.pipelineClassifier = $root.CoreML.Specification.PipelineClassifier.decode(reader, reader.uint32()); + break; + case 201: + message.pipelineRegressor = $root.CoreML.Specification.PipelineRegressor.decode(reader, reader.uint32()); + break; + case 202: + message.pipeline = $root.CoreML.Specification.Pipeline.decode(reader, reader.uint32()); + break; + case 300: + message.glmRegressor = $root.CoreML.Specification.GLMRegressor.decode(reader, reader.uint32()); + break; + case 301: + message.supportVectorRegressor = $root.CoreML.Specification.SupportVectorRegressor.decode(reader, reader.uint32()); + break; + case 302: + message.treeEnsembleRegressor = $root.CoreML.Specification.TreeEnsembleRegressor.decode(reader, reader.uint32()); + break; + case 303: + message.neuralNetworkRegressor = $root.CoreML.Specification.NeuralNetworkRegressor.decode(reader, reader.uint32()); + break; + case 304: + message.bayesianProbitRegressor = $root.CoreML.Specification.BayesianProbitRegressor.decode(reader, reader.uint32()); + break; + case 400: + message.glmClassifier = $root.CoreML.Specification.GLMClassifier.decode(reader, reader.uint32()); + break; + case 401: + message.supportVectorClassifier = $root.CoreML.Specification.SupportVectorClassifier.decode(reader, reader.uint32()); + break; + case 402: + message.treeEnsembleClassifier = $root.CoreML.Specification.TreeEnsembleClassifier.decode(reader, reader.uint32()); + break; + case 403: + message.neuralNetworkClassifier = $root.CoreML.Specification.NeuralNetworkClassifier.decode(reader, reader.uint32()); + break; + case 404: + message.kNearestNeighborsClassifier = $root.CoreML.Specification.KNearestNeighborsClassifier.decode(reader, reader.uint32()); + break; + case 500: + message.neuralNetwork = $root.CoreML.Specification.NeuralNetwork.decode(reader, reader.uint32()); + break; + case 501: + message.itemSimilarityRecommender = $root.CoreML.Specification.ItemSimilarityRecommender.decode(reader, reader.uint32()); + break; + case 555: + message.customModel = $root.CoreML.Specification.CustomModel.decode(reader, reader.uint32()); + break; + case 556: + message.linkedModel = $root.CoreML.Specification.LinkedModel.decode(reader, reader.uint32()); + break; + case 600: + message.oneHotEncoder = $root.CoreML.Specification.OneHotEncoder.decode(reader, reader.uint32()); + break; + case 601: + message.imputer = $root.CoreML.Specification.Imputer.decode(reader, reader.uint32()); + break; + case 602: + message.featureVectorizer = $root.CoreML.Specification.FeatureVectorizer.decode(reader, reader.uint32()); + break; + case 603: + message.dictVectorizer = $root.CoreML.Specification.DictVectorizer.decode(reader, reader.uint32()); + break; + case 604: + message.scaler = $root.CoreML.Specification.Scaler.decode(reader, reader.uint32()); + break; + case 606: + message.categoricalMapping = $root.CoreML.Specification.CategoricalMapping.decode(reader, reader.uint32()); + break; + case 607: + message.normalizer = $root.CoreML.Specification.Normalizer.decode(reader, reader.uint32()); + break; + case 609: + message.arrayFeatureExtractor = $root.CoreML.Specification.ArrayFeatureExtractor.decode(reader, reader.uint32()); + break; + case 610: + message.nonMaximumSuppression = $root.CoreML.Specification.NonMaximumSuppression.decode(reader, reader.uint32()); + break; + case 900: + message.identity = $root.CoreML.Specification.Identity.decode(reader, reader.uint32()); + break; + case 2000: + message.textClassifier = $root.CoreML.Specification.CoreMLModels.TextClassifier.decode(reader, reader.uint32()); + break; + case 2001: + message.wordTagger = $root.CoreML.Specification.CoreMLModels.WordTagger.decode(reader, reader.uint32()); + break; + case 2002: + message.visionFeaturePrint = $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.decode(reader, reader.uint32()); + break; + case 2003: + message.soundAnalysisPreprocessing = $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.decode(reader, reader.uint32()); + break; + case 2004: + message.gazetteer = $root.CoreML.Specification.CoreMLModels.Gazetteer.decode(reader, reader.uint32()); + break; + case 2005: + message.wordEmbedding = $root.CoreML.Specification.CoreMLModels.WordEmbedding.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Model; + })(); + + Specification.CoreMLModels = (function() { + + var CoreMLModels = {}; + + CoreMLModels.VisionFeaturePrint = (function() { + + function VisionFeaturePrint(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + VisionFeaturePrint.prototype.scene = null; + + var $oneOfFields; + + Object.defineProperty(VisionFeaturePrint.prototype, "VisionFeaturePrintType", { + get: $util.oneOfGetter($oneOfFields = ["scene"]), + set: $util.oneOfSetter($oneOfFields) + }); + + VisionFeaturePrint.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 20: + message.scene = $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + VisionFeaturePrint.Scene = (function() { + + function Scene(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Scene.prototype.version = 0; + + Scene.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Scene.SceneVersion = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "SCENE_VERSION_INVALID"] = 0; + values[valuesById[1] = "SCENE_VERSION_1"] = 1; + return values; + })(); + + return Scene; + })(); + + return VisionFeaturePrint; + })(); + + CoreMLModels.TextClassifier = (function() { + + function TextClassifier(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TextClassifier.prototype.revision = 0; + TextClassifier.prototype.language = ""; + TextClassifier.prototype.modelParameterData = $util.newBuffer([]); + TextClassifier.prototype.stringClassLabels = null; + + var $oneOfFields; + + Object.defineProperty(TextClassifier.prototype, "ClassLabels", { + get: $util.oneOfGetter($oneOfFields = ["stringClassLabels"]), + set: $util.oneOfSetter($oneOfFields) + }); + + TextClassifier.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CoreMLModels.TextClassifier(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.revision = reader.uint32(); + break; + case 10: + message.language = reader.string(); + break; + case 100: + message.modelParameterData = reader.bytes(); + break; + case 200: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return TextClassifier; + })(); + + CoreMLModels.WordTagger = (function() { + + function WordTagger(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + WordTagger.prototype.revision = 0; + WordTagger.prototype.language = ""; + WordTagger.prototype.tokensOutputFeatureName = ""; + WordTagger.prototype.tokenTagsOutputFeatureName = ""; + WordTagger.prototype.tokenLocationsOutputFeatureName = ""; + WordTagger.prototype.tokenLengthsOutputFeatureName = ""; + WordTagger.prototype.modelParameterData = $util.newBuffer([]); + WordTagger.prototype.stringTags = null; + + var $oneOfFields; + + Object.defineProperty(WordTagger.prototype, "Tags", { + get: $util.oneOfGetter($oneOfFields = ["stringTags"]), + set: $util.oneOfSetter($oneOfFields) + }); + + WordTagger.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CoreMLModels.WordTagger(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.revision = reader.uint32(); + break; + case 10: + message.language = reader.string(); + break; + case 20: + message.tokensOutputFeatureName = reader.string(); + break; + case 21: + message.tokenTagsOutputFeatureName = reader.string(); + break; + case 22: + message.tokenLocationsOutputFeatureName = reader.string(); + break; + case 23: + message.tokenLengthsOutputFeatureName = reader.string(); + break; + case 100: + message.modelParameterData = reader.bytes(); + break; + case 200: + message.stringTags = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return WordTagger; + })(); + + CoreMLModels.Gazetteer = (function() { + + function Gazetteer(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Gazetteer.prototype.revision = 0; + Gazetteer.prototype.language = ""; + Gazetteer.prototype.modelParameterData = $util.newBuffer([]); + Gazetteer.prototype.stringClassLabels = null; + + var $oneOfFields; + + Object.defineProperty(Gazetteer.prototype, "ClassLabels", { + get: $util.oneOfGetter($oneOfFields = ["stringClassLabels"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Gazetteer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CoreMLModels.Gazetteer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.revision = reader.uint32(); + break; + case 10: + message.language = reader.string(); + break; + case 100: + message.modelParameterData = reader.bytes(); + break; + case 200: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Gazetteer; + })(); + + CoreMLModels.WordEmbedding = (function() { + + function WordEmbedding(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + WordEmbedding.prototype.revision = 0; + WordEmbedding.prototype.language = ""; + WordEmbedding.prototype.modelParameterData = $util.newBuffer([]); + + WordEmbedding.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CoreMLModels.WordEmbedding(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.revision = reader.uint32(); + break; + case 10: + message.language = reader.string(); + break; + case 100: + message.modelParameterData = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return WordEmbedding; + })(); + + CoreMLModels.SoundAnalysisPreprocessing = (function() { + + function SoundAnalysisPreprocessing(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SoundAnalysisPreprocessing.prototype.vggish = null; + + var $oneOfFields; + + Object.defineProperty(SoundAnalysisPreprocessing.prototype, "SoundAnalysisPreprocessingType", { + get: $util.oneOfGetter($oneOfFields = ["vggish"]), + set: $util.oneOfSetter($oneOfFields) + }); + + SoundAnalysisPreprocessing.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 20: + message.vggish = $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.Vggish.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SoundAnalysisPreprocessing.Vggish = (function() { + + function Vggish(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Vggish.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CoreMLModels.SoundAnalysisPreprocessing.Vggish(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Vggish; + })(); + + return SoundAnalysisPreprocessing; + })(); + + return CoreMLModels; + })(); + + Specification.StringToInt64Map = (function() { + + function StringToInt64Map(properties) { + this.map = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + StringToInt64Map.prototype.map = $util.emptyObject; + + StringToInt64Map.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.StringToInt64Map(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.skip().pos++; + if (message.map === $util.emptyObject) + message.map = {}; + key = reader.string(); + reader.pos++; + message.map[key] = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return StringToInt64Map; + })(); + + Specification.Int64ToStringMap = (function() { + + function Int64ToStringMap(properties) { + this.map = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Int64ToStringMap.prototype.map = $util.emptyObject; + + Int64ToStringMap.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Int64ToStringMap(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.skip().pos++; + if (message.map === $util.emptyObject) + message.map = {}; + key = reader.int64(); + reader.pos++; + message.map[typeof key === "object" ? $util.longToHash(key) : key] = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Int64ToStringMap; + })(); + + Specification.StringToDoubleMap = (function() { + + function StringToDoubleMap(properties) { + this.map = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + StringToDoubleMap.prototype.map = $util.emptyObject; + + StringToDoubleMap.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.StringToDoubleMap(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.skip().pos++; + if (message.map === $util.emptyObject) + message.map = {}; + key = reader.string(); + reader.pos++; + message.map[key] = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return StringToDoubleMap; + })(); + + Specification.Int64ToDoubleMap = (function() { + + function Int64ToDoubleMap(properties) { + this.map = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Int64ToDoubleMap.prototype.map = $util.emptyObject; + + Int64ToDoubleMap.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Int64ToDoubleMap(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.skip().pos++; + if (message.map === $util.emptyObject) + message.map = {}; + key = reader.int64(); + reader.pos++; + message.map[typeof key === "object" ? $util.longToHash(key) : key] = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Int64ToDoubleMap; + })(); + + Specification.StringVector = (function() { + + function StringVector(properties) { + this.vector = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + StringVector.prototype.vector = $util.emptyArray; + + StringVector.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.StringVector(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.vector && message.vector.length)) + message.vector = []; + message.vector.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return StringVector; + })(); + + Specification.Int64Vector = (function() { + + function Int64Vector(properties) { + this.vector = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Int64Vector.prototype.vector = $util.emptyArray; + + Int64Vector.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Int64Vector(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.vector && message.vector.length)) + message.vector = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.vector.push(reader.int64()); + } else + message.vector.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Int64Vector; + })(); + + Specification.FloatVector = (function() { + + function FloatVector(properties) { + this.vector = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FloatVector.prototype.vector = $util.emptyArray; + + FloatVector.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FloatVector(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.vector && message.vector.length)) + message.vector = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.vector.push(reader.float()); + } else + message.vector.push(reader.float()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FloatVector; + })(); + + Specification.DoubleVector = (function() { + + function DoubleVector(properties) { + this.vector = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DoubleVector.prototype.vector = $util.emptyArray; + + DoubleVector.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.DoubleVector(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.vector && message.vector.length)) + message.vector = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.vector.push(reader.double()); + } else + message.vector.push(reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DoubleVector; + })(); + + Specification.Int64Range = (function() { + + function Int64Range(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Int64Range.prototype.minValue = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Int64Range.prototype.maxValue = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + Int64Range.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Int64Range(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.minValue = reader.int64(); + break; + case 2: + message.maxValue = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Int64Range; + })(); + + Specification.Int64Set = (function() { + + function Int64Set(properties) { + this.values = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Int64Set.prototype.values = $util.emptyArray; + + Int64Set.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Int64Set(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.values && message.values.length)) + message.values = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.values.push(reader.int64()); + } else + message.values.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Int64Set; + })(); + + Specification.DoubleRange = (function() { + + function DoubleRange(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DoubleRange.prototype.minValue = 0; + DoubleRange.prototype.maxValue = 0; + + DoubleRange.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.DoubleRange(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.minValue = reader.double(); + break; + case 2: + message.maxValue = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DoubleRange; + })(); + + Specification.Int64FeatureType = (function() { + + function Int64FeatureType(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Int64FeatureType.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Int64FeatureType(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Int64FeatureType; + })(); + + Specification.DoubleFeatureType = (function() { + + function DoubleFeatureType(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DoubleFeatureType.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.DoubleFeatureType(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DoubleFeatureType; + })(); + + Specification.StringFeatureType = (function() { + + function StringFeatureType(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + StringFeatureType.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.StringFeatureType(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return StringFeatureType; + })(); + + Specification.SizeRange = (function() { + + function SizeRange(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SizeRange.prototype.lowerBound = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + SizeRange.prototype.upperBound = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + SizeRange.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SizeRange(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.lowerBound = reader.uint64(); + break; + case 2: + message.upperBound = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SizeRange; + })(); + + Specification.ImageFeatureType = (function() { + + function ImageFeatureType(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ImageFeatureType.prototype.width = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ImageFeatureType.prototype.height = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ImageFeatureType.prototype.enumeratedSizes = null; + ImageFeatureType.prototype.imageSizeRange = null; + ImageFeatureType.prototype.colorSpace = 0; + + var $oneOfFields; + + Object.defineProperty(ImageFeatureType.prototype, "SizeFlexibility", { + get: $util.oneOfGetter($oneOfFields = ["enumeratedSizes", "imageSizeRange"]), + set: $util.oneOfSetter($oneOfFields) + }); + + ImageFeatureType.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ImageFeatureType(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.width = reader.int64(); + break; + case 2: + message.height = reader.int64(); + break; + case 21: + message.enumeratedSizes = $root.CoreML.Specification.ImageFeatureType.EnumeratedImageSizes.decode(reader, reader.uint32()); + break; + case 31: + message.imageSizeRange = $root.CoreML.Specification.ImageFeatureType.ImageSizeRange.decode(reader, reader.uint32()); + break; + case 3: + message.colorSpace = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ImageFeatureType.ColorSpace = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "INVALID_COLOR_SPACE"] = 0; + values[valuesById[10] = "GRAYSCALE"] = 10; + values[valuesById[20] = "RGB"] = 20; + values[valuesById[30] = "BGR"] = 30; + return values; + })(); + + ImageFeatureType.ImageSize = (function() { + + function ImageSize(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ImageSize.prototype.width = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + ImageSize.prototype.height = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + ImageSize.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ImageFeatureType.ImageSize(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.width = reader.uint64(); + break; + case 2: + message.height = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ImageSize; + })(); + + ImageFeatureType.EnumeratedImageSizes = (function() { + + function EnumeratedImageSizes(properties) { + this.sizes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + EnumeratedImageSizes.prototype.sizes = $util.emptyArray; + + EnumeratedImageSizes.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ImageFeatureType.EnumeratedImageSizes(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.sizes && message.sizes.length)) + message.sizes = []; + message.sizes.push($root.CoreML.Specification.ImageFeatureType.ImageSize.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return EnumeratedImageSizes; + })(); + + ImageFeatureType.ImageSizeRange = (function() { + + function ImageSizeRange(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ImageSizeRange.prototype.widthRange = null; + ImageSizeRange.prototype.heightRange = null; + + ImageSizeRange.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ImageFeatureType.ImageSizeRange(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.widthRange = $root.CoreML.Specification.SizeRange.decode(reader, reader.uint32()); + break; + case 2: + message.heightRange = $root.CoreML.Specification.SizeRange.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ImageSizeRange; + })(); + + return ImageFeatureType; + })(); + + Specification.ArrayFeatureType = (function() { + + function ArrayFeatureType(properties) { + this.shape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ArrayFeatureType.prototype.shape = $util.emptyArray; + ArrayFeatureType.prototype.dataType = 0; + ArrayFeatureType.prototype.enumeratedShapes = null; + ArrayFeatureType.prototype.shapeRange = null; + + var $oneOfFields; + + Object.defineProperty(ArrayFeatureType.prototype, "ShapeFlexibility", { + get: $util.oneOfGetter($oneOfFields = ["enumeratedShapes", "shapeRange"]), + set: $util.oneOfSetter($oneOfFields) + }); + + ArrayFeatureType.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ArrayFeatureType(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shape && message.shape.length)) + message.shape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.shape.push(reader.int64()); + } else + message.shape.push(reader.int64()); + break; + case 2: + message.dataType = reader.int32(); + break; + case 21: + message.enumeratedShapes = $root.CoreML.Specification.ArrayFeatureType.EnumeratedShapes.decode(reader, reader.uint32()); + break; + case 31: + message.shapeRange = $root.CoreML.Specification.ArrayFeatureType.ShapeRange.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ArrayFeatureType.ArrayDataType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "INVALID_ARRAY_DATA_TYPE"] = 0; + values[valuesById[65568] = "FLOAT32"] = 65568; + values[valuesById[65600] = "DOUBLE"] = 65600; + values[valuesById[131104] = "INT32"] = 131104; + return values; + })(); + + ArrayFeatureType.Shape = (function() { + + function Shape(properties) { + this.shape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Shape.prototype.shape = $util.emptyArray; + + Shape.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ArrayFeatureType.Shape(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shape && message.shape.length)) + message.shape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.shape.push(reader.int64()); + } else + message.shape.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Shape; + })(); + + ArrayFeatureType.EnumeratedShapes = (function() { + + function EnumeratedShapes(properties) { + this.shapes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + EnumeratedShapes.prototype.shapes = $util.emptyArray; + + EnumeratedShapes.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ArrayFeatureType.EnumeratedShapes(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shapes && message.shapes.length)) + message.shapes = []; + message.shapes.push($root.CoreML.Specification.ArrayFeatureType.Shape.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return EnumeratedShapes; + })(); + + ArrayFeatureType.ShapeRange = (function() { + + function ShapeRange(properties) { + this.sizeRanges = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ShapeRange.prototype.sizeRanges = $util.emptyArray; + + ShapeRange.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ArrayFeatureType.ShapeRange(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.sizeRanges && message.sizeRanges.length)) + message.sizeRanges = []; + message.sizeRanges.push($root.CoreML.Specification.SizeRange.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ShapeRange; + })(); + + return ArrayFeatureType; + })(); + + Specification.DictionaryFeatureType = (function() { + + function DictionaryFeatureType(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DictionaryFeatureType.prototype.int64KeyType = null; + DictionaryFeatureType.prototype.stringKeyType = null; + + var $oneOfFields; + + Object.defineProperty(DictionaryFeatureType.prototype, "KeyType", { + get: $util.oneOfGetter($oneOfFields = ["int64KeyType", "stringKeyType"]), + set: $util.oneOfSetter($oneOfFields) + }); + + DictionaryFeatureType.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.DictionaryFeatureType(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.int64KeyType = $root.CoreML.Specification.Int64FeatureType.decode(reader, reader.uint32()); + break; + case 2: + message.stringKeyType = $root.CoreML.Specification.StringFeatureType.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DictionaryFeatureType; + })(); + + Specification.SequenceFeatureType = (function() { + + function SequenceFeatureType(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SequenceFeatureType.prototype.int64Type = null; + SequenceFeatureType.prototype.stringType = null; + SequenceFeatureType.prototype.sizeRange = null; + + var $oneOfFields; + + Object.defineProperty(SequenceFeatureType.prototype, "Type", { + get: $util.oneOfGetter($oneOfFields = ["int64Type", "stringType"]), + set: $util.oneOfSetter($oneOfFields) + }); + + SequenceFeatureType.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SequenceFeatureType(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.int64Type = $root.CoreML.Specification.Int64FeatureType.decode(reader, reader.uint32()); + break; + case 3: + message.stringType = $root.CoreML.Specification.StringFeatureType.decode(reader, reader.uint32()); + break; + case 101: + message.sizeRange = $root.CoreML.Specification.SizeRange.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SequenceFeatureType; + })(); + + Specification.FeatureType = (function() { + + function FeatureType(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FeatureType.prototype.int64Type = null; + FeatureType.prototype.doubleType = null; + FeatureType.prototype.stringType = null; + FeatureType.prototype.imageType = null; + FeatureType.prototype.multiArrayType = null; + FeatureType.prototype.dictionaryType = null; + FeatureType.prototype.sequenceType = null; + FeatureType.prototype.isOptional = false; + + var $oneOfFields; + + Object.defineProperty(FeatureType.prototype, "Type", { + get: $util.oneOfGetter($oneOfFields = ["int64Type", "doubleType", "stringType", "imageType", "multiArrayType", "dictionaryType", "sequenceType"]), + set: $util.oneOfSetter($oneOfFields) + }); + + FeatureType.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FeatureType(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.int64Type = $root.CoreML.Specification.Int64FeatureType.decode(reader, reader.uint32()); + break; + case 2: + message.doubleType = $root.CoreML.Specification.DoubleFeatureType.decode(reader, reader.uint32()); + break; + case 3: + message.stringType = $root.CoreML.Specification.StringFeatureType.decode(reader, reader.uint32()); + break; + case 4: + message.imageType = $root.CoreML.Specification.ImageFeatureType.decode(reader, reader.uint32()); + break; + case 5: + message.multiArrayType = $root.CoreML.Specification.ArrayFeatureType.decode(reader, reader.uint32()); + break; + case 6: + message.dictionaryType = $root.CoreML.Specification.DictionaryFeatureType.decode(reader, reader.uint32()); + break; + case 7: + message.sequenceType = $root.CoreML.Specification.SequenceFeatureType.decode(reader, reader.uint32()); + break; + case 1000: + message.isOptional = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FeatureType; + })(); + + Specification.ArrayFeatureExtractor = (function() { + + function ArrayFeatureExtractor(properties) { + this.extractIndex = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ArrayFeatureExtractor.prototype.extractIndex = $util.emptyArray; + + ArrayFeatureExtractor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ArrayFeatureExtractor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.extractIndex && message.extractIndex.length)) + message.extractIndex = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.extractIndex.push(reader.uint64()); + } else + message.extractIndex.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ArrayFeatureExtractor; + })(); + + Specification.BayesianProbitRegressor = (function() { + + function BayesianProbitRegressor(properties) { + this.features = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BayesianProbitRegressor.prototype.numberOfFeatures = 0; + BayesianProbitRegressor.prototype.bias = null; + BayesianProbitRegressor.prototype.features = $util.emptyArray; + BayesianProbitRegressor.prototype.regressionInputFeatureName = ""; + BayesianProbitRegressor.prototype.optimismInputFeatureName = ""; + BayesianProbitRegressor.prototype.samplingScaleInputFeatureName = ""; + BayesianProbitRegressor.prototype.samplingTruncationInputFeatureName = ""; + BayesianProbitRegressor.prototype.meanOutputFeatureName = ""; + BayesianProbitRegressor.prototype.varianceOutputFeatureName = ""; + BayesianProbitRegressor.prototype.pessimisticProbabilityOutputFeatureName = ""; + BayesianProbitRegressor.prototype.sampledProbabilityOutputFeatureName = ""; + + BayesianProbitRegressor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BayesianProbitRegressor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.numberOfFeatures = reader.uint32(); + break; + case 2: + message.bias = $root.CoreML.Specification.BayesianProbitRegressor.Gaussian.decode(reader, reader.uint32()); + break; + case 3: + if (!(message.features && message.features.length)) + message.features = []; + message.features.push($root.CoreML.Specification.BayesianProbitRegressor.FeatureWeight.decode(reader, reader.uint32())); + break; + case 10: + message.regressionInputFeatureName = reader.string(); + break; + case 11: + message.optimismInputFeatureName = reader.string(); + break; + case 12: + message.samplingScaleInputFeatureName = reader.string(); + break; + case 13: + message.samplingTruncationInputFeatureName = reader.string(); + break; + case 20: + message.meanOutputFeatureName = reader.string(); + break; + case 21: + message.varianceOutputFeatureName = reader.string(); + break; + case 22: + message.pessimisticProbabilityOutputFeatureName = reader.string(); + break; + case 23: + message.sampledProbabilityOutputFeatureName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BayesianProbitRegressor.Gaussian = (function() { + + function Gaussian(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Gaussian.prototype.mean = 0; + Gaussian.prototype.precision = 0; + + Gaussian.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BayesianProbitRegressor.Gaussian(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mean = reader.double(); + break; + case 2: + message.precision = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Gaussian; + })(); + + BayesianProbitRegressor.FeatureValueWeight = (function() { + + function FeatureValueWeight(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FeatureValueWeight.prototype.featureValue = 0; + FeatureValueWeight.prototype.featureWeight = null; + + FeatureValueWeight.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.featureValue = reader.uint32(); + break; + case 2: + message.featureWeight = $root.CoreML.Specification.BayesianProbitRegressor.Gaussian.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FeatureValueWeight; + })(); + + BayesianProbitRegressor.FeatureWeight = (function() { + + function FeatureWeight(properties) { + this.weights = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FeatureWeight.prototype.featureId = 0; + FeatureWeight.prototype.weights = $util.emptyArray; + + FeatureWeight.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BayesianProbitRegressor.FeatureWeight(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.featureId = reader.uint32(); + break; + case 2: + if (!(message.weights && message.weights.length)) + message.weights = []; + message.weights.push($root.CoreML.Specification.BayesianProbitRegressor.FeatureValueWeight.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FeatureWeight; + })(); + + return BayesianProbitRegressor; + })(); + + Specification.CategoricalMapping = (function() { + + function CategoricalMapping(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CategoricalMapping.prototype.stringToInt64Map = null; + CategoricalMapping.prototype.int64ToStringMap = null; + CategoricalMapping.prototype.strValue = ""; + CategoricalMapping.prototype.int64Value = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + var $oneOfFields; + + Object.defineProperty(CategoricalMapping.prototype, "MappingType", { + get: $util.oneOfGetter($oneOfFields = ["stringToInt64Map", "int64ToStringMap"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Object.defineProperty(CategoricalMapping.prototype, "ValueOnUnknown", { + get: $util.oneOfGetter($oneOfFields = ["strValue", "int64Value"]), + set: $util.oneOfSetter($oneOfFields) + }); + + CategoricalMapping.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CategoricalMapping(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.stringToInt64Map = $root.CoreML.Specification.StringToInt64Map.decode(reader, reader.uint32()); + break; + case 2: + message.int64ToStringMap = $root.CoreML.Specification.Int64ToStringMap.decode(reader, reader.uint32()); + break; + case 101: + message.strValue = reader.string(); + break; + case 102: + message.int64Value = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return CategoricalMapping; + })(); + + Specification.CustomModel = (function() { + + function CustomModel(properties) { + this.parameters = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CustomModel.prototype.className = ""; + CustomModel.prototype.parameters = $util.emptyObject; + CustomModel.prototype.description = ""; + + CustomModel.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CustomModel(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.className = reader.string(); + break; + case 30: + reader.skip().pos++; + if (message.parameters === $util.emptyObject) + message.parameters = {}; + key = reader.string(); + reader.pos++; + message.parameters[key] = $root.CoreML.Specification.CustomModel.CustomModelParamValue.decode(reader, reader.uint32()); + break; + case 40: + message.description = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + CustomModel.CustomModelParamValue = (function() { + + function CustomModelParamValue(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CustomModelParamValue.prototype.doubleValue = 0; + CustomModelParamValue.prototype.stringValue = ""; + CustomModelParamValue.prototype.intValue = 0; + CustomModelParamValue.prototype.longValue = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + CustomModelParamValue.prototype.boolValue = false; + CustomModelParamValue.prototype.bytesValue = $util.newBuffer([]); + + var $oneOfFields; + + Object.defineProperty(CustomModelParamValue.prototype, "value", { + get: $util.oneOfGetter($oneOfFields = ["doubleValue", "stringValue", "intValue", "longValue", "boolValue", "bytesValue"]), + set: $util.oneOfSetter($oneOfFields) + }); + + CustomModelParamValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CustomModel.CustomModelParamValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.doubleValue = reader.double(); + break; + case 20: + message.stringValue = reader.string(); + break; + case 30: + message.intValue = reader.int32(); + break; + case 40: + message.longValue = reader.int64(); + break; + case 50: + message.boolValue = reader.bool(); + break; + case 60: + message.bytesValue = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return CustomModelParamValue; + })(); + + return CustomModel; + })(); + + Specification.DictVectorizer = (function() { + + function DictVectorizer(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DictVectorizer.prototype.stringToIndex = null; + DictVectorizer.prototype.int64ToIndex = null; + + var $oneOfFields; + + Object.defineProperty(DictVectorizer.prototype, "Map", { + get: $util.oneOfGetter($oneOfFields = ["stringToIndex", "int64ToIndex"]), + set: $util.oneOfSetter($oneOfFields) + }); + + DictVectorizer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.DictVectorizer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.stringToIndex = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 2: + message.int64ToIndex = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DictVectorizer; + })(); + + Specification.FeatureVectorizer = (function() { + + function FeatureVectorizer(properties) { + this.inputList = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FeatureVectorizer.prototype.inputList = $util.emptyArray; + + FeatureVectorizer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FeatureVectorizer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.inputList && message.inputList.length)) + message.inputList = []; + message.inputList.push($root.CoreML.Specification.FeatureVectorizer.InputColumn.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + FeatureVectorizer.InputColumn = (function() { + + function InputColumn(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + InputColumn.prototype.inputColumn = ""; + InputColumn.prototype.inputDimensions = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + InputColumn.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FeatureVectorizer.InputColumn(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputColumn = reader.string(); + break; + case 2: + message.inputDimensions = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return InputColumn; + })(); + + return FeatureVectorizer; + })(); + + Specification.GLMRegressor = (function() { + + function GLMRegressor(properties) { + this.weights = []; + this.offset = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GLMRegressor.prototype.weights = $util.emptyArray; + GLMRegressor.prototype.offset = $util.emptyArray; + GLMRegressor.prototype.postEvaluationTransform = 0; + + GLMRegressor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GLMRegressor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.weights && message.weights.length)) + message.weights = []; + message.weights.push($root.CoreML.Specification.GLMRegressor.DoubleArray.decode(reader, reader.uint32())); + break; + case 2: + if (!(message.offset && message.offset.length)) + message.offset = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.offset.push(reader.double()); + } else + message.offset.push(reader.double()); + break; + case 3: + message.postEvaluationTransform = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + GLMRegressor.DoubleArray = (function() { + + function DoubleArray(properties) { + this.value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DoubleArray.prototype.value = $util.emptyArray; + + DoubleArray.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GLMRegressor.DoubleArray(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.value && message.value.length)) + message.value = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.value.push(reader.double()); + } else + message.value.push(reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DoubleArray; + })(); + + GLMRegressor.PostEvaluationTransform = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "NoTransform"] = 0; + values[valuesById[1] = "Logit"] = 1; + values[valuesById[2] = "Probit"] = 2; + return values; + })(); + + return GLMRegressor; + })(); + + Specification.GLMClassifier = (function() { + + function GLMClassifier(properties) { + this.weights = []; + this.offset = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GLMClassifier.prototype.weights = $util.emptyArray; + GLMClassifier.prototype.offset = $util.emptyArray; + GLMClassifier.prototype.postEvaluationTransform = 0; + GLMClassifier.prototype.classEncoding = 0; + GLMClassifier.prototype.stringClassLabels = null; + GLMClassifier.prototype.int64ClassLabels = null; + + var $oneOfFields; + + Object.defineProperty(GLMClassifier.prototype, "ClassLabels", { + get: $util.oneOfGetter($oneOfFields = ["stringClassLabels", "int64ClassLabels"]), + set: $util.oneOfSetter($oneOfFields) + }); + + GLMClassifier.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GLMClassifier(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.weights && message.weights.length)) + message.weights = []; + message.weights.push($root.CoreML.Specification.GLMClassifier.DoubleArray.decode(reader, reader.uint32())); + break; + case 2: + if (!(message.offset && message.offset.length)) + message.offset = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.offset.push(reader.double()); + } else + message.offset.push(reader.double()); + break; + case 3: + message.postEvaluationTransform = reader.int32(); + break; + case 4: + message.classEncoding = reader.int32(); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + GLMClassifier.DoubleArray = (function() { + + function DoubleArray(properties) { + this.value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DoubleArray.prototype.value = $util.emptyArray; + + DoubleArray.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GLMClassifier.DoubleArray(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.value && message.value.length)) + message.value = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.value.push(reader.double()); + } else + message.value.push(reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DoubleArray; + })(); + + GLMClassifier.PostEvaluationTransform = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "Logit"] = 0; + values[valuesById[1] = "Probit"] = 1; + return values; + })(); + + GLMClassifier.ClassEncoding = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "ReferenceClass"] = 0; + values[valuesById[1] = "OneVsRest"] = 1; + return values; + })(); + + return GLMClassifier; + })(); + + Specification.KNearestNeighborsClassifier = (function() { + + function KNearestNeighborsClassifier(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + KNearestNeighborsClassifier.prototype.nearestNeighborsIndex = null; + KNearestNeighborsClassifier.prototype.numberOfNeighbors = null; + KNearestNeighborsClassifier.prototype.stringClassLabels = null; + KNearestNeighborsClassifier.prototype.int64ClassLabels = null; + KNearestNeighborsClassifier.prototype.defaultStringLabel = ""; + KNearestNeighborsClassifier.prototype.defaultInt64Label = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + KNearestNeighborsClassifier.prototype.uniformWeighting = null; + KNearestNeighborsClassifier.prototype.inverseDistanceWeighting = null; + + var $oneOfFields; + + Object.defineProperty(KNearestNeighborsClassifier.prototype, "ClassLabels", { + get: $util.oneOfGetter($oneOfFields = ["stringClassLabels", "int64ClassLabels"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Object.defineProperty(KNearestNeighborsClassifier.prototype, "DefaultClassLabel", { + get: $util.oneOfGetter($oneOfFields = ["defaultStringLabel", "defaultInt64Label"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Object.defineProperty(KNearestNeighborsClassifier.prototype, "WeightingScheme", { + get: $util.oneOfGetter($oneOfFields = ["uniformWeighting", "inverseDistanceWeighting"]), + set: $util.oneOfSetter($oneOfFields) + }); + + KNearestNeighborsClassifier.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.KNearestNeighborsClassifier(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.nearestNeighborsIndex = $root.CoreML.Specification.NearestNeighborsIndex.decode(reader, reader.uint32()); + break; + case 3: + message.numberOfNeighbors = $root.CoreML.Specification.Int64Parameter.decode(reader, reader.uint32()); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 110: + message.defaultStringLabel = reader.string(); + break; + case 111: + message.defaultInt64Label = reader.int64(); + break; + case 200: + message.uniformWeighting = $root.CoreML.Specification.UniformWeighting.decode(reader, reader.uint32()); + break; + case 210: + message.inverseDistanceWeighting = $root.CoreML.Specification.InverseDistanceWeighting.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return KNearestNeighborsClassifier; + })(); + + Specification.NearestNeighborsIndex = (function() { + + function NearestNeighborsIndex(properties) { + this.floatSamples = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NearestNeighborsIndex.prototype.numberOfDimensions = 0; + NearestNeighborsIndex.prototype.floatSamples = $util.emptyArray; + NearestNeighborsIndex.prototype.linearIndex = null; + NearestNeighborsIndex.prototype.singleKdTreeIndex = null; + NearestNeighborsIndex.prototype.squaredEuclideanDistance = null; + + var $oneOfFields; + + Object.defineProperty(NearestNeighborsIndex.prototype, "IndexType", { + get: $util.oneOfGetter($oneOfFields = ["linearIndex", "singleKdTreeIndex"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Object.defineProperty(NearestNeighborsIndex.prototype, "DistanceFunction", { + get: $util.oneOfGetter($oneOfFields = ["squaredEuclideanDistance"]), + set: $util.oneOfSetter($oneOfFields) + }); + + NearestNeighborsIndex.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NearestNeighborsIndex(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.numberOfDimensions = reader.int32(); + break; + case 2: + if (!(message.floatSamples && message.floatSamples.length)) + message.floatSamples = []; + message.floatSamples.push($root.CoreML.Specification.FloatVector.decode(reader, reader.uint32())); + break; + case 100: + message.linearIndex = $root.CoreML.Specification.LinearIndex.decode(reader, reader.uint32()); + break; + case 110: + message.singleKdTreeIndex = $root.CoreML.Specification.SingleKdTreeIndex.decode(reader, reader.uint32()); + break; + case 200: + message.squaredEuclideanDistance = $root.CoreML.Specification.SquaredEuclideanDistance.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NearestNeighborsIndex; + })(); + + Specification.UniformWeighting = (function() { + + function UniformWeighting(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + UniformWeighting.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.UniformWeighting(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return UniformWeighting; + })(); + + Specification.InverseDistanceWeighting = (function() { + + function InverseDistanceWeighting(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + InverseDistanceWeighting.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.InverseDistanceWeighting(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return InverseDistanceWeighting; + })(); + + Specification.LinearIndex = (function() { + + function LinearIndex(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LinearIndex.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LinearIndex(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LinearIndex; + })(); + + Specification.SingleKdTreeIndex = (function() { + + function SingleKdTreeIndex(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SingleKdTreeIndex.prototype.leafSize = 0; + + SingleKdTreeIndex.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SingleKdTreeIndex(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.leafSize = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SingleKdTreeIndex; + })(); + + Specification.SquaredEuclideanDistance = (function() { + + function SquaredEuclideanDistance(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SquaredEuclideanDistance.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SquaredEuclideanDistance(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SquaredEuclideanDistance; + })(); + + Specification.Int64Parameter = (function() { + + function Int64Parameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Int64Parameter.prototype.defaultValue = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Int64Parameter.prototype.range = null; + Int64Parameter.prototype.set = null; + + var $oneOfFields; + + Object.defineProperty(Int64Parameter.prototype, "AllowedValues", { + get: $util.oneOfGetter($oneOfFields = ["range", "set"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Int64Parameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Int64Parameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValue = reader.int64(); + break; + case 10: + message.range = $root.CoreML.Specification.Int64Range.decode(reader, reader.uint32()); + break; + case 11: + message.set = $root.CoreML.Specification.Int64Set.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Int64Parameter; + })(); + + Specification.DoubleParameter = (function() { + + function DoubleParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DoubleParameter.prototype.defaultValue = 0; + DoubleParameter.prototype.range = null; + + var $oneOfFields; + + Object.defineProperty(DoubleParameter.prototype, "AllowedValues", { + get: $util.oneOfGetter($oneOfFields = ["range"]), + set: $util.oneOfSetter($oneOfFields) + }); + + DoubleParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.DoubleParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValue = reader.double(); + break; + case 10: + message.range = $root.CoreML.Specification.DoubleRange.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DoubleParameter; + })(); + + Specification.StringParameter = (function() { + + function StringParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + StringParameter.prototype.defaultValue = ""; + + StringParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.StringParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValue = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return StringParameter; + })(); + + Specification.BoolParameter = (function() { + + function BoolParameter(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BoolParameter.prototype.defaultValue = false; + + BoolParameter.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BoolParameter(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.defaultValue = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BoolParameter; + })(); + + Specification.Identity = (function() { + + function Identity(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Identity.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Identity(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Identity; + })(); + + Specification.Imputer = (function() { + + function Imputer(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Imputer.prototype.imputedDoubleValue = 0; + Imputer.prototype.imputedInt64Value = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Imputer.prototype.imputedStringValue = ""; + Imputer.prototype.imputedDoubleArray = null; + Imputer.prototype.imputedInt64Array = null; + Imputer.prototype.imputedStringDictionary = null; + Imputer.prototype.imputedInt64Dictionary = null; + Imputer.prototype.replaceDoubleValue = 0; + Imputer.prototype.replaceInt64Value = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Imputer.prototype.replaceStringValue = ""; + + var $oneOfFields; + + Object.defineProperty(Imputer.prototype, "ImputedValue", { + get: $util.oneOfGetter($oneOfFields = ["imputedDoubleValue", "imputedInt64Value", "imputedStringValue", "imputedDoubleArray", "imputedInt64Array", "imputedStringDictionary", "imputedInt64Dictionary"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Object.defineProperty(Imputer.prototype, "ReplaceValue", { + get: $util.oneOfGetter($oneOfFields = ["replaceDoubleValue", "replaceInt64Value", "replaceStringValue"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Imputer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Imputer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.imputedDoubleValue = reader.double(); + break; + case 2: + message.imputedInt64Value = reader.int64(); + break; + case 3: + message.imputedStringValue = reader.string(); + break; + case 4: + message.imputedDoubleArray = $root.CoreML.Specification.DoubleVector.decode(reader, reader.uint32()); + break; + case 5: + message.imputedInt64Array = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 6: + message.imputedStringDictionary = $root.CoreML.Specification.StringToDoubleMap.decode(reader, reader.uint32()); + break; + case 7: + message.imputedInt64Dictionary = $root.CoreML.Specification.Int64ToDoubleMap.decode(reader, reader.uint32()); + break; + case 11: + message.replaceDoubleValue = reader.double(); + break; + case 12: + message.replaceInt64Value = reader.int64(); + break; + case 13: + message.replaceStringValue = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Imputer; + })(); + + Specification.NeuralNetworkMultiArrayShapeMapping = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "RANK5_ARRAY_MAPPING"] = 0; + values[valuesById[1] = "EXACT_ARRAY_MAPPING"] = 1; + return values; + })(); + + Specification.NeuralNetworkImageShapeMapping = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "RANK5_IMAGE_MAPPING"] = 0; + values[valuesById[1] = "RANK4_IMAGE_MAPPING"] = 1; + return values; + })(); + + Specification.NeuralNetwork = (function() { + + function NeuralNetwork(properties) { + this.layers = []; + this.preprocessing = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NeuralNetwork.prototype.layers = $util.emptyArray; + NeuralNetwork.prototype.preprocessing = $util.emptyArray; + NeuralNetwork.prototype.arrayInputShapeMapping = 0; + NeuralNetwork.prototype.imageInputShapeMapping = 0; + NeuralNetwork.prototype.updateParams = null; + + NeuralNetwork.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NeuralNetwork(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.layers && message.layers.length)) + message.layers = []; + message.layers.push($root.CoreML.Specification.NeuralNetworkLayer.decode(reader, reader.uint32())); + break; + case 2: + if (!(message.preprocessing && message.preprocessing.length)) + message.preprocessing = []; + message.preprocessing.push($root.CoreML.Specification.NeuralNetworkPreprocessing.decode(reader, reader.uint32())); + break; + case 5: + message.arrayInputShapeMapping = reader.int32(); + break; + case 6: + message.imageInputShapeMapping = reader.int32(); + break; + case 10: + message.updateParams = $root.CoreML.Specification.NetworkUpdateParameters.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NeuralNetwork; + })(); + + Specification.NeuralNetworkImageScaler = (function() { + + function NeuralNetworkImageScaler(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NeuralNetworkImageScaler.prototype.channelScale = 0; + NeuralNetworkImageScaler.prototype.blueBias = 0; + NeuralNetworkImageScaler.prototype.greenBias = 0; + NeuralNetworkImageScaler.prototype.redBias = 0; + NeuralNetworkImageScaler.prototype.grayBias = 0; + + NeuralNetworkImageScaler.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NeuralNetworkImageScaler(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.channelScale = reader.float(); + break; + case 20: + message.blueBias = reader.float(); + break; + case 21: + message.greenBias = reader.float(); + break; + case 22: + message.redBias = reader.float(); + break; + case 30: + message.grayBias = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NeuralNetworkImageScaler; + })(); + + Specification.NeuralNetworkMeanImage = (function() { + + function NeuralNetworkMeanImage(properties) { + this.meanImage = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NeuralNetworkMeanImage.prototype.meanImage = $util.emptyArray; + + NeuralNetworkMeanImage.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NeuralNetworkMeanImage(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.meanImage && message.meanImage.length)) + message.meanImage = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.meanImage.push(reader.float()); + } else + message.meanImage.push(reader.float()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NeuralNetworkMeanImage; + })(); + + Specification.NeuralNetworkPreprocessing = (function() { + + function NeuralNetworkPreprocessing(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NeuralNetworkPreprocessing.prototype.featureName = ""; + NeuralNetworkPreprocessing.prototype.scaler = null; + NeuralNetworkPreprocessing.prototype.meanImage = null; + + var $oneOfFields; + + Object.defineProperty(NeuralNetworkPreprocessing.prototype, "preprocessor", { + get: $util.oneOfGetter($oneOfFields = ["scaler", "meanImage"]), + set: $util.oneOfSetter($oneOfFields) + }); + + NeuralNetworkPreprocessing.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NeuralNetworkPreprocessing(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.featureName = reader.string(); + break; + case 10: + message.scaler = $root.CoreML.Specification.NeuralNetworkImageScaler.decode(reader, reader.uint32()); + break; + case 11: + message.meanImage = $root.CoreML.Specification.NeuralNetworkMeanImage.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NeuralNetworkPreprocessing; + })(); + + Specification.ActivationReLU = (function() { + + function ActivationReLU(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationReLU.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationReLU(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationReLU; + })(); + + Specification.ActivationLeakyReLU = (function() { + + function ActivationLeakyReLU(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationLeakyReLU.prototype.alpha = 0; + + ActivationLeakyReLU.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationLeakyReLU(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationLeakyReLU; + })(); + + Specification.ActivationTanh = (function() { + + function ActivationTanh(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationTanh.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationTanh(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationTanh; + })(); + + Specification.ActivationScaledTanh = (function() { + + function ActivationScaledTanh(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationScaledTanh.prototype.alpha = 0; + ActivationScaledTanh.prototype.beta = 0; + + ActivationScaledTanh.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationScaledTanh(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationScaledTanh; + })(); + + Specification.ActivationSigmoid = (function() { + + function ActivationSigmoid(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationSigmoid.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationSigmoid(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationSigmoid; + })(); + + Specification.ActivationLinear = (function() { + + function ActivationLinear(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationLinear.prototype.alpha = 0; + ActivationLinear.prototype.beta = 0; + + ActivationLinear.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationLinear(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationLinear; + })(); + + Specification.ActivationSigmoidHard = (function() { + + function ActivationSigmoidHard(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationSigmoidHard.prototype.alpha = 0; + ActivationSigmoidHard.prototype.beta = 0; + + ActivationSigmoidHard.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationSigmoidHard(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationSigmoidHard; + })(); + + Specification.ActivationPReLU = (function() { + + function ActivationPReLU(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationPReLU.prototype.alpha = null; + + ActivationPReLU.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationPReLU(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationPReLU; + })(); + + Specification.ActivationELU = (function() { + + function ActivationELU(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationELU.prototype.alpha = 0; + + ActivationELU.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationELU(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationELU; + })(); + + Specification.ActivationThresholdedReLU = (function() { + + function ActivationThresholdedReLU(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationThresholdedReLU.prototype.alpha = 0; + + ActivationThresholdedReLU.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationThresholdedReLU(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationThresholdedReLU; + })(); + + Specification.ActivationSoftsign = (function() { + + function ActivationSoftsign(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationSoftsign.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationSoftsign(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationSoftsign; + })(); + + Specification.ActivationSoftplus = (function() { + + function ActivationSoftplus(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationSoftplus.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationSoftplus(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationSoftplus; + })(); + + Specification.ActivationParametricSoftplus = (function() { + + function ActivationParametricSoftplus(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationParametricSoftplus.prototype.alpha = null; + ActivationParametricSoftplus.prototype.beta = null; + + ActivationParametricSoftplus.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationParametricSoftplus(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 2: + message.beta = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationParametricSoftplus; + })(); + + Specification.ActivationParams = (function() { + + function ActivationParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ActivationParams.prototype.linear = null; + ActivationParams.prototype.ReLU = null; + ActivationParams.prototype.leakyReLU = null; + ActivationParams.prototype.thresholdedReLU = null; + ActivationParams.prototype.PReLU = null; + ActivationParams.prototype.tanh = null; + ActivationParams.prototype.scaledTanh = null; + ActivationParams.prototype.sigmoid = null; + ActivationParams.prototype.sigmoidHard = null; + ActivationParams.prototype.ELU = null; + ActivationParams.prototype.softsign = null; + ActivationParams.prototype.softplus = null; + ActivationParams.prototype.parametricSoftplus = null; + + var $oneOfFields; + + Object.defineProperty(ActivationParams.prototype, "NonlinearityType", { + get: $util.oneOfGetter($oneOfFields = ["linear", "ReLU", "leakyReLU", "thresholdedReLU", "PReLU", "tanh", "scaledTanh", "sigmoid", "sigmoidHard", "ELU", "softsign", "softplus", "parametricSoftplus"]), + set: $util.oneOfSetter($oneOfFields) + }); + + ActivationParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ActivationParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 5: + message.linear = $root.CoreML.Specification.ActivationLinear.decode(reader, reader.uint32()); + break; + case 10: + message.ReLU = $root.CoreML.Specification.ActivationReLU.decode(reader, reader.uint32()); + break; + case 15: + message.leakyReLU = $root.CoreML.Specification.ActivationLeakyReLU.decode(reader, reader.uint32()); + break; + case 20: + message.thresholdedReLU = $root.CoreML.Specification.ActivationThresholdedReLU.decode(reader, reader.uint32()); + break; + case 25: + message.PReLU = $root.CoreML.Specification.ActivationPReLU.decode(reader, reader.uint32()); + break; + case 30: + message.tanh = $root.CoreML.Specification.ActivationTanh.decode(reader, reader.uint32()); + break; + case 31: + message.scaledTanh = $root.CoreML.Specification.ActivationScaledTanh.decode(reader, reader.uint32()); + break; + case 40: + message.sigmoid = $root.CoreML.Specification.ActivationSigmoid.decode(reader, reader.uint32()); + break; + case 41: + message.sigmoidHard = $root.CoreML.Specification.ActivationSigmoidHard.decode(reader, reader.uint32()); + break; + case 50: + message.ELU = $root.CoreML.Specification.ActivationELU.decode(reader, reader.uint32()); + break; + case 60: + message.softsign = $root.CoreML.Specification.ActivationSoftsign.decode(reader, reader.uint32()); + break; + case 70: + message.softplus = $root.CoreML.Specification.ActivationSoftplus.decode(reader, reader.uint32()); + break; + case 71: + message.parametricSoftplus = $root.CoreML.Specification.ActivationParametricSoftplus.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ActivationParams; + })(); + + Specification.Tensor = (function() { + + function Tensor(properties) { + this.dimValue = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Tensor.prototype.rank = 0; + Tensor.prototype.dimValue = $util.emptyArray; + + Tensor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Tensor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.rank = reader.uint32(); + break; + case 2: + if (!(message.dimValue && message.dimValue.length)) + message.dimValue = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dimValue.push(reader.int64()); + } else + message.dimValue.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Tensor; + })(); + + Specification.NeuralNetworkLayer = (function() { + + function NeuralNetworkLayer(properties) { + this.input = []; + this.output = []; + this.inputTensor = []; + this.outputTensor = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NeuralNetworkLayer.prototype.name = ""; + NeuralNetworkLayer.prototype.input = $util.emptyArray; + NeuralNetworkLayer.prototype.output = $util.emptyArray; + NeuralNetworkLayer.prototype.inputTensor = $util.emptyArray; + NeuralNetworkLayer.prototype.outputTensor = $util.emptyArray; + NeuralNetworkLayer.prototype.isUpdatable = false; + NeuralNetworkLayer.prototype.convolution = null; + NeuralNetworkLayer.prototype.pooling = null; + NeuralNetworkLayer.prototype.activation = null; + NeuralNetworkLayer.prototype.innerProduct = null; + NeuralNetworkLayer.prototype.embedding = null; + NeuralNetworkLayer.prototype.batchnorm = null; + NeuralNetworkLayer.prototype.mvn = null; + NeuralNetworkLayer.prototype.l2normalize = null; + NeuralNetworkLayer.prototype.softmax = null; + NeuralNetworkLayer.prototype.lrn = null; + NeuralNetworkLayer.prototype.crop = null; + NeuralNetworkLayer.prototype.padding = null; + NeuralNetworkLayer.prototype.upsample = null; + NeuralNetworkLayer.prototype.resizeBilinear = null; + NeuralNetworkLayer.prototype.cropResize = null; + NeuralNetworkLayer.prototype.unary = null; + NeuralNetworkLayer.prototype.add = null; + NeuralNetworkLayer.prototype.multiply = null; + NeuralNetworkLayer.prototype.average = null; + NeuralNetworkLayer.prototype.scale = null; + NeuralNetworkLayer.prototype.bias = null; + NeuralNetworkLayer.prototype.max = null; + NeuralNetworkLayer.prototype.min = null; + NeuralNetworkLayer.prototype.dot = null; + NeuralNetworkLayer.prototype.reduce = null; + NeuralNetworkLayer.prototype.loadConstant = null; + NeuralNetworkLayer.prototype.reshape = null; + NeuralNetworkLayer.prototype.flatten = null; + NeuralNetworkLayer.prototype.permute = null; + NeuralNetworkLayer.prototype.concat = null; + NeuralNetworkLayer.prototype.split = null; + NeuralNetworkLayer.prototype.sequenceRepeat = null; + NeuralNetworkLayer.prototype.reorganizeData = null; + NeuralNetworkLayer.prototype.slice = null; + NeuralNetworkLayer.prototype.simpleRecurrent = null; + NeuralNetworkLayer.prototype.gru = null; + NeuralNetworkLayer.prototype.uniDirectionalLSTM = null; + NeuralNetworkLayer.prototype.biDirectionalLSTM = null; + NeuralNetworkLayer.prototype.custom = null; + NeuralNetworkLayer.prototype.copy = null; + NeuralNetworkLayer.prototype.branch = null; + NeuralNetworkLayer.prototype.loop = null; + NeuralNetworkLayer.prototype.loopBreak = null; + NeuralNetworkLayer.prototype.loopContinue = null; + NeuralNetworkLayer.prototype.rangeStatic = null; + NeuralNetworkLayer.prototype.rangeDynamic = null; + NeuralNetworkLayer.prototype.clip = null; + NeuralNetworkLayer.prototype.ceil = null; + NeuralNetworkLayer.prototype.floor = null; + NeuralNetworkLayer.prototype.sign = null; + NeuralNetworkLayer.prototype.round = null; + NeuralNetworkLayer.prototype.exp2 = null; + NeuralNetworkLayer.prototype.sin = null; + NeuralNetworkLayer.prototype.cos = null; + NeuralNetworkLayer.prototype.tan = null; + NeuralNetworkLayer.prototype.asin = null; + NeuralNetworkLayer.prototype.acos = null; + NeuralNetworkLayer.prototype.atan = null; + NeuralNetworkLayer.prototype.sinh = null; + NeuralNetworkLayer.prototype.cosh = null; + NeuralNetworkLayer.prototype.tanh = null; + NeuralNetworkLayer.prototype.asinh = null; + NeuralNetworkLayer.prototype.acosh = null; + NeuralNetworkLayer.prototype.atanh = null; + NeuralNetworkLayer.prototype.erf = null; + NeuralNetworkLayer.prototype.gelu = null; + NeuralNetworkLayer.prototype.equal = null; + NeuralNetworkLayer.prototype.notEqual = null; + NeuralNetworkLayer.prototype.lessThan = null; + NeuralNetworkLayer.prototype.lessEqual = null; + NeuralNetworkLayer.prototype.greaterThan = null; + NeuralNetworkLayer.prototype.greaterEqual = null; + NeuralNetworkLayer.prototype.logicalOr = null; + NeuralNetworkLayer.prototype.logicalXor = null; + NeuralNetworkLayer.prototype.logicalNot = null; + NeuralNetworkLayer.prototype.logicalAnd = null; + NeuralNetworkLayer.prototype.modBroadcastable = null; + NeuralNetworkLayer.prototype.minBroadcastable = null; + NeuralNetworkLayer.prototype.maxBroadcastable = null; + NeuralNetworkLayer.prototype.addBroadcastable = null; + NeuralNetworkLayer.prototype.powBroadcastable = null; + NeuralNetworkLayer.prototype.divideBroadcastable = null; + NeuralNetworkLayer.prototype.floorDivBroadcastable = null; + NeuralNetworkLayer.prototype.multiplyBroadcastable = null; + NeuralNetworkLayer.prototype.subtractBroadcastable = null; + NeuralNetworkLayer.prototype.tile = null; + NeuralNetworkLayer.prototype.stack = null; + NeuralNetworkLayer.prototype.gather = null; + NeuralNetworkLayer.prototype.scatter = null; + NeuralNetworkLayer.prototype.gatherND = null; + NeuralNetworkLayer.prototype.scatterND = null; + NeuralNetworkLayer.prototype.softmaxND = null; + NeuralNetworkLayer.prototype.gatherAlongAxis = null; + NeuralNetworkLayer.prototype.scatterAlongAxis = null; + NeuralNetworkLayer.prototype.reverse = null; + NeuralNetworkLayer.prototype.reverseSeq = null; + NeuralNetworkLayer.prototype.splitND = null; + NeuralNetworkLayer.prototype.concatND = null; + NeuralNetworkLayer.prototype.transpose = null; + NeuralNetworkLayer.prototype.sliceStatic = null; + NeuralNetworkLayer.prototype.sliceDynamic = null; + NeuralNetworkLayer.prototype.slidingWindows = null; + NeuralNetworkLayer.prototype.topK = null; + NeuralNetworkLayer.prototype.argMin = null; + NeuralNetworkLayer.prototype.argMax = null; + NeuralNetworkLayer.prototype.embeddingND = null; + NeuralNetworkLayer.prototype.batchedMatmul = null; + NeuralNetworkLayer.prototype.getShape = null; + NeuralNetworkLayer.prototype.loadConstantND = null; + NeuralNetworkLayer.prototype.fillLike = null; + NeuralNetworkLayer.prototype.fillStatic = null; + NeuralNetworkLayer.prototype.fillDynamic = null; + NeuralNetworkLayer.prototype.broadcastToLike = null; + NeuralNetworkLayer.prototype.broadcastToStatic = null; + NeuralNetworkLayer.prototype.broadcastToDynamic = null; + NeuralNetworkLayer.prototype.squeeze = null; + NeuralNetworkLayer.prototype.expandDims = null; + NeuralNetworkLayer.prototype.flattenTo2D = null; + NeuralNetworkLayer.prototype.reshapeLike = null; + NeuralNetworkLayer.prototype.reshapeStatic = null; + NeuralNetworkLayer.prototype.reshapeDynamic = null; + NeuralNetworkLayer.prototype.rankPreservingReshape = null; + NeuralNetworkLayer.prototype.constantPad = null; + NeuralNetworkLayer.prototype.randomNormalLike = null; + NeuralNetworkLayer.prototype.randomNormalStatic = null; + NeuralNetworkLayer.prototype.randomNormalDynamic = null; + NeuralNetworkLayer.prototype.randomUniformLike = null; + NeuralNetworkLayer.prototype.randomUniformStatic = null; + NeuralNetworkLayer.prototype.randomUniformDynamic = null; + NeuralNetworkLayer.prototype.randomBernoulliLike = null; + NeuralNetworkLayer.prototype.randomBernoulliStatic = null; + NeuralNetworkLayer.prototype.randomBernoulliDynamic = null; + NeuralNetworkLayer.prototype.categoricalDistribution = null; + NeuralNetworkLayer.prototype.reduceL1 = null; + NeuralNetworkLayer.prototype.reduceL2 = null; + NeuralNetworkLayer.prototype.reduceMax = null; + NeuralNetworkLayer.prototype.reduceMin = null; + NeuralNetworkLayer.prototype.reduceSum = null; + NeuralNetworkLayer.prototype.reduceProd = null; + NeuralNetworkLayer.prototype.reduceMean = null; + NeuralNetworkLayer.prototype.reduceLogSum = null; + NeuralNetworkLayer.prototype.reduceSumSquare = null; + NeuralNetworkLayer.prototype.reduceLogSumExp = null; + NeuralNetworkLayer.prototype.whereNonZero = null; + NeuralNetworkLayer.prototype.matrixBandPart = null; + NeuralNetworkLayer.prototype.lowerTriangular = null; + NeuralNetworkLayer.prototype.upperTriangular = null; + NeuralNetworkLayer.prototype.whereBroadcastable = null; + NeuralNetworkLayer.prototype.layerNormalization = null; + NeuralNetworkLayer.prototype.NonMaximumSuppression = null; + + var $oneOfFields; + + Object.defineProperty(NeuralNetworkLayer.prototype, "layer", { + get: $util.oneOfGetter($oneOfFields = ["convolution", "pooling", "activation", "innerProduct", "embedding", "batchnorm", "mvn", "l2normalize", "softmax", "lrn", "crop", "padding", "upsample", "resizeBilinear", "cropResize", "unary", "add", "multiply", "average", "scale", "bias", "max", "min", "dot", "reduce", "loadConstant", "reshape", "flatten", "permute", "concat", "split", "sequenceRepeat", "reorganizeData", "slice", "simpleRecurrent", "gru", "uniDirectionalLSTM", "biDirectionalLSTM", "custom", "copy", "branch", "loop", "loopBreak", "loopContinue", "rangeStatic", "rangeDynamic", "clip", "ceil", "floor", "sign", "round", "exp2", "sin", "cos", "tan", "asin", "acos", "atan", "sinh", "cosh", "tanh", "asinh", "acosh", "atanh", "erf", "gelu", "equal", "notEqual", "lessThan", "lessEqual", "greaterThan", "greaterEqual", "logicalOr", "logicalXor", "logicalNot", "logicalAnd", "modBroadcastable", "minBroadcastable", "maxBroadcastable", "addBroadcastable", "powBroadcastable", "divideBroadcastable", "floorDivBroadcastable", "multiplyBroadcastable", "subtractBroadcastable", "tile", "stack", "gather", "scatter", "gatherND", "scatterND", "softmaxND", "gatherAlongAxis", "scatterAlongAxis", "reverse", "reverseSeq", "splitND", "concatND", "transpose", "sliceStatic", "sliceDynamic", "slidingWindows", "topK", "argMin", "argMax", "embeddingND", "batchedMatmul", "getShape", "loadConstantND", "fillLike", "fillStatic", "fillDynamic", "broadcastToLike", "broadcastToStatic", "broadcastToDynamic", "squeeze", "expandDims", "flattenTo2D", "reshapeLike", "reshapeStatic", "reshapeDynamic", "rankPreservingReshape", "constantPad", "randomNormalLike", "randomNormalStatic", "randomNormalDynamic", "randomUniformLike", "randomUniformStatic", "randomUniformDynamic", "randomBernoulliLike", "randomBernoulliStatic", "randomBernoulliDynamic", "categoricalDistribution", "reduceL1", "reduceL2", "reduceMax", "reduceMin", "reduceSum", "reduceProd", "reduceMean", "reduceLogSum", "reduceSumSquare", "reduceLogSumExp", "whereNonZero", "matrixBandPart", "lowerTriangular", "upperTriangular", "whereBroadcastable", "layerNormalization", "NonMaximumSuppression"]), + set: $util.oneOfSetter($oneOfFields) + }); + + NeuralNetworkLayer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NeuralNetworkLayer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + if (!(message.input && message.input.length)) + message.input = []; + message.input.push(reader.string()); + break; + case 3: + if (!(message.output && message.output.length)) + message.output = []; + message.output.push(reader.string()); + break; + case 4: + if (!(message.inputTensor && message.inputTensor.length)) + message.inputTensor = []; + message.inputTensor.push($root.CoreML.Specification.Tensor.decode(reader, reader.uint32())); + break; + case 5: + if (!(message.outputTensor && message.outputTensor.length)) + message.outputTensor = []; + message.outputTensor.push($root.CoreML.Specification.Tensor.decode(reader, reader.uint32())); + break; + case 10: + message.isUpdatable = reader.bool(); + break; + case 100: + message.convolution = $root.CoreML.Specification.ConvolutionLayerParams.decode(reader, reader.uint32()); + break; + case 120: + message.pooling = $root.CoreML.Specification.PoolingLayerParams.decode(reader, reader.uint32()); + break; + case 130: + message.activation = $root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32()); + break; + case 140: + message.innerProduct = $root.CoreML.Specification.InnerProductLayerParams.decode(reader, reader.uint32()); + break; + case 150: + message.embedding = $root.CoreML.Specification.EmbeddingLayerParams.decode(reader, reader.uint32()); + break; + case 160: + message.batchnorm = $root.CoreML.Specification.BatchnormLayerParams.decode(reader, reader.uint32()); + break; + case 165: + message.mvn = $root.CoreML.Specification.MeanVarianceNormalizeLayerParams.decode(reader, reader.uint32()); + break; + case 170: + message.l2normalize = $root.CoreML.Specification.L2NormalizeLayerParams.decode(reader, reader.uint32()); + break; + case 175: + message.softmax = $root.CoreML.Specification.SoftmaxLayerParams.decode(reader, reader.uint32()); + break; + case 180: + message.lrn = $root.CoreML.Specification.LRNLayerParams.decode(reader, reader.uint32()); + break; + case 190: + message.crop = $root.CoreML.Specification.CropLayerParams.decode(reader, reader.uint32()); + break; + case 200: + message.padding = $root.CoreML.Specification.PaddingLayerParams.decode(reader, reader.uint32()); + break; + case 210: + message.upsample = $root.CoreML.Specification.UpsampleLayerParams.decode(reader, reader.uint32()); + break; + case 211: + message.resizeBilinear = $root.CoreML.Specification.ResizeBilinearLayerParams.decode(reader, reader.uint32()); + break; + case 212: + message.cropResize = $root.CoreML.Specification.CropResizeLayerParams.decode(reader, reader.uint32()); + break; + case 220: + message.unary = $root.CoreML.Specification.UnaryFunctionLayerParams.decode(reader, reader.uint32()); + break; + case 230: + message.add = $root.CoreML.Specification.AddLayerParams.decode(reader, reader.uint32()); + break; + case 231: + message.multiply = $root.CoreML.Specification.MultiplyLayerParams.decode(reader, reader.uint32()); + break; + case 240: + message.average = $root.CoreML.Specification.AverageLayerParams.decode(reader, reader.uint32()); + break; + case 245: + message.scale = $root.CoreML.Specification.ScaleLayerParams.decode(reader, reader.uint32()); + break; + case 250: + message.bias = $root.CoreML.Specification.BiasLayerParams.decode(reader, reader.uint32()); + break; + case 260: + message.max = $root.CoreML.Specification.MaxLayerParams.decode(reader, reader.uint32()); + break; + case 261: + message.min = $root.CoreML.Specification.MinLayerParams.decode(reader, reader.uint32()); + break; + case 270: + message.dot = $root.CoreML.Specification.DotProductLayerParams.decode(reader, reader.uint32()); + break; + case 280: + message.reduce = $root.CoreML.Specification.ReduceLayerParams.decode(reader, reader.uint32()); + break; + case 290: + message.loadConstant = $root.CoreML.Specification.LoadConstantLayerParams.decode(reader, reader.uint32()); + break; + case 300: + message.reshape = $root.CoreML.Specification.ReshapeLayerParams.decode(reader, reader.uint32()); + break; + case 301: + message.flatten = $root.CoreML.Specification.FlattenLayerParams.decode(reader, reader.uint32()); + break; + case 310: + message.permute = $root.CoreML.Specification.PermuteLayerParams.decode(reader, reader.uint32()); + break; + case 320: + message.concat = $root.CoreML.Specification.ConcatLayerParams.decode(reader, reader.uint32()); + break; + case 330: + message.split = $root.CoreML.Specification.SplitLayerParams.decode(reader, reader.uint32()); + break; + case 340: + message.sequenceRepeat = $root.CoreML.Specification.SequenceRepeatLayerParams.decode(reader, reader.uint32()); + break; + case 345: + message.reorganizeData = $root.CoreML.Specification.ReorganizeDataLayerParams.decode(reader, reader.uint32()); + break; + case 350: + message.slice = $root.CoreML.Specification.SliceLayerParams.decode(reader, reader.uint32()); + break; + case 400: + message.simpleRecurrent = $root.CoreML.Specification.SimpleRecurrentLayerParams.decode(reader, reader.uint32()); + break; + case 410: + message.gru = $root.CoreML.Specification.GRULayerParams.decode(reader, reader.uint32()); + break; + case 420: + message.uniDirectionalLSTM = $root.CoreML.Specification.UniDirectionalLSTMLayerParams.decode(reader, reader.uint32()); + break; + case 430: + message.biDirectionalLSTM = $root.CoreML.Specification.BiDirectionalLSTMLayerParams.decode(reader, reader.uint32()); + break; + case 500: + message.custom = $root.CoreML.Specification.CustomLayerParams.decode(reader, reader.uint32()); + break; + case 600: + message.copy = $root.CoreML.Specification.CopyLayerParams.decode(reader, reader.uint32()); + break; + case 605: + message.branch = $root.CoreML.Specification.BranchLayerParams.decode(reader, reader.uint32()); + break; + case 615: + message.loop = $root.CoreML.Specification.LoopLayerParams.decode(reader, reader.uint32()); + break; + case 620: + message.loopBreak = $root.CoreML.Specification.LoopBreakLayerParams.decode(reader, reader.uint32()); + break; + case 625: + message.loopContinue = $root.CoreML.Specification.LoopContinueLayerParams.decode(reader, reader.uint32()); + break; + case 635: + message.rangeStatic = $root.CoreML.Specification.RangeStaticLayerParams.decode(reader, reader.uint32()); + break; + case 640: + message.rangeDynamic = $root.CoreML.Specification.RangeDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 660: + message.clip = $root.CoreML.Specification.ClipLayerParams.decode(reader, reader.uint32()); + break; + case 665: + message.ceil = $root.CoreML.Specification.CeilLayerParams.decode(reader, reader.uint32()); + break; + case 670: + message.floor = $root.CoreML.Specification.FloorLayerParams.decode(reader, reader.uint32()); + break; + case 680: + message.sign = $root.CoreML.Specification.SignLayerParams.decode(reader, reader.uint32()); + break; + case 685: + message.round = $root.CoreML.Specification.RoundLayerParams.decode(reader, reader.uint32()); + break; + case 700: + message.exp2 = $root.CoreML.Specification.Exp2LayerParams.decode(reader, reader.uint32()); + break; + case 710: + message.sin = $root.CoreML.Specification.SinLayerParams.decode(reader, reader.uint32()); + break; + case 715: + message.cos = $root.CoreML.Specification.CosLayerParams.decode(reader, reader.uint32()); + break; + case 720: + message.tan = $root.CoreML.Specification.TanLayerParams.decode(reader, reader.uint32()); + break; + case 730: + message.asin = $root.CoreML.Specification.AsinLayerParams.decode(reader, reader.uint32()); + break; + case 735: + message.acos = $root.CoreML.Specification.AcosLayerParams.decode(reader, reader.uint32()); + break; + case 740: + message.atan = $root.CoreML.Specification.AtanLayerParams.decode(reader, reader.uint32()); + break; + case 750: + message.sinh = $root.CoreML.Specification.SinhLayerParams.decode(reader, reader.uint32()); + break; + case 755: + message.cosh = $root.CoreML.Specification.CoshLayerParams.decode(reader, reader.uint32()); + break; + case 760: + message.tanh = $root.CoreML.Specification.TanhLayerParams.decode(reader, reader.uint32()); + break; + case 770: + message.asinh = $root.CoreML.Specification.AsinhLayerParams.decode(reader, reader.uint32()); + break; + case 775: + message.acosh = $root.CoreML.Specification.AcoshLayerParams.decode(reader, reader.uint32()); + break; + case 780: + message.atanh = $root.CoreML.Specification.AtanhLayerParams.decode(reader, reader.uint32()); + break; + case 790: + message.erf = $root.CoreML.Specification.ErfLayerParams.decode(reader, reader.uint32()); + break; + case 795: + message.gelu = $root.CoreML.Specification.GeluLayerParams.decode(reader, reader.uint32()); + break; + case 815: + message.equal = $root.CoreML.Specification.EqualLayerParams.decode(reader, reader.uint32()); + break; + case 820: + message.notEqual = $root.CoreML.Specification.NotEqualLayerParams.decode(reader, reader.uint32()); + break; + case 825: + message.lessThan = $root.CoreML.Specification.LessThanLayerParams.decode(reader, reader.uint32()); + break; + case 827: + message.lessEqual = $root.CoreML.Specification.LessEqualLayerParams.decode(reader, reader.uint32()); + break; + case 830: + message.greaterThan = $root.CoreML.Specification.GreaterThanLayerParams.decode(reader, reader.uint32()); + break; + case 832: + message.greaterEqual = $root.CoreML.Specification.GreaterEqualLayerParams.decode(reader, reader.uint32()); + break; + case 840: + message.logicalOr = $root.CoreML.Specification.LogicalOrLayerParams.decode(reader, reader.uint32()); + break; + case 845: + message.logicalXor = $root.CoreML.Specification.LogicalXorLayerParams.decode(reader, reader.uint32()); + break; + case 850: + message.logicalNot = $root.CoreML.Specification.LogicalNotLayerParams.decode(reader, reader.uint32()); + break; + case 855: + message.logicalAnd = $root.CoreML.Specification.LogicalAndLayerParams.decode(reader, reader.uint32()); + break; + case 865: + message.modBroadcastable = $root.CoreML.Specification.ModBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 870: + message.minBroadcastable = $root.CoreML.Specification.MinBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 875: + message.maxBroadcastable = $root.CoreML.Specification.MaxBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 880: + message.addBroadcastable = $root.CoreML.Specification.AddBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 885: + message.powBroadcastable = $root.CoreML.Specification.PowBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 890: + message.divideBroadcastable = $root.CoreML.Specification.DivideBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 895: + message.floorDivBroadcastable = $root.CoreML.Specification.FloorDivBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 900: + message.multiplyBroadcastable = $root.CoreML.Specification.MultiplyBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 905: + message.subtractBroadcastable = $root.CoreML.Specification.SubtractBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 920: + message.tile = $root.CoreML.Specification.TileLayerParams.decode(reader, reader.uint32()); + break; + case 925: + message.stack = $root.CoreML.Specification.StackLayerParams.decode(reader, reader.uint32()); + break; + case 930: + message.gather = $root.CoreML.Specification.GatherLayerParams.decode(reader, reader.uint32()); + break; + case 935: + message.scatter = $root.CoreML.Specification.ScatterLayerParams.decode(reader, reader.uint32()); + break; + case 940: + message.gatherND = $root.CoreML.Specification.GatherNDLayerParams.decode(reader, reader.uint32()); + break; + case 945: + message.scatterND = $root.CoreML.Specification.ScatterNDLayerParams.decode(reader, reader.uint32()); + break; + case 950: + message.softmaxND = $root.CoreML.Specification.SoftmaxNDLayerParams.decode(reader, reader.uint32()); + break; + case 952: + message.gatherAlongAxis = $root.CoreML.Specification.GatherAlongAxisLayerParams.decode(reader, reader.uint32()); + break; + case 954: + message.scatterAlongAxis = $root.CoreML.Specification.ScatterAlongAxisLayerParams.decode(reader, reader.uint32()); + break; + case 960: + message.reverse = $root.CoreML.Specification.ReverseLayerParams.decode(reader, reader.uint32()); + break; + case 965: + message.reverseSeq = $root.CoreML.Specification.ReverseSeqLayerParams.decode(reader, reader.uint32()); + break; + case 975: + message.splitND = $root.CoreML.Specification.SplitNDLayerParams.decode(reader, reader.uint32()); + break; + case 980: + message.concatND = $root.CoreML.Specification.ConcatNDLayerParams.decode(reader, reader.uint32()); + break; + case 985: + message.transpose = $root.CoreML.Specification.TransposeLayerParams.decode(reader, reader.uint32()); + break; + case 995: + message.sliceStatic = $root.CoreML.Specification.SliceStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1000: + message.sliceDynamic = $root.CoreML.Specification.SliceDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1005: + message.slidingWindows = $root.CoreML.Specification.SlidingWindowsLayerParams.decode(reader, reader.uint32()); + break; + case 1015: + message.topK = $root.CoreML.Specification.TopKLayerParams.decode(reader, reader.uint32()); + break; + case 1020: + message.argMin = $root.CoreML.Specification.ArgMinLayerParams.decode(reader, reader.uint32()); + break; + case 1025: + message.argMax = $root.CoreML.Specification.ArgMaxLayerParams.decode(reader, reader.uint32()); + break; + case 1040: + message.embeddingND = $root.CoreML.Specification.EmbeddingNDLayerParams.decode(reader, reader.uint32()); + break; + case 1045: + message.batchedMatmul = $root.CoreML.Specification.BatchedMatMulLayerParams.decode(reader, reader.uint32()); + break; + case 1065: + message.getShape = $root.CoreML.Specification.GetShapeLayerParams.decode(reader, reader.uint32()); + break; + case 1070: + message.loadConstantND = $root.CoreML.Specification.LoadConstantNDLayerParams.decode(reader, reader.uint32()); + break; + case 1080: + message.fillLike = $root.CoreML.Specification.FillLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1085: + message.fillStatic = $root.CoreML.Specification.FillStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1090: + message.fillDynamic = $root.CoreML.Specification.FillDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1100: + message.broadcastToLike = $root.CoreML.Specification.BroadcastToLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1105: + message.broadcastToStatic = $root.CoreML.Specification.BroadcastToStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1110: + message.broadcastToDynamic = $root.CoreML.Specification.BroadcastToDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1120: + message.squeeze = $root.CoreML.Specification.SqueezeLayerParams.decode(reader, reader.uint32()); + break; + case 1125: + message.expandDims = $root.CoreML.Specification.ExpandDimsLayerParams.decode(reader, reader.uint32()); + break; + case 1130: + message.flattenTo2D = $root.CoreML.Specification.FlattenTo2DLayerParams.decode(reader, reader.uint32()); + break; + case 1135: + message.reshapeLike = $root.CoreML.Specification.ReshapeLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1140: + message.reshapeStatic = $root.CoreML.Specification.ReshapeStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1145: + message.reshapeDynamic = $root.CoreML.Specification.ReshapeDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1150: + message.rankPreservingReshape = $root.CoreML.Specification.RankPreservingReshapeLayerParams.decode(reader, reader.uint32()); + break; + case 1155: + message.constantPad = $root.CoreML.Specification.ConstantPaddingLayerParams.decode(reader, reader.uint32()); + break; + case 1170: + message.randomNormalLike = $root.CoreML.Specification.RandomNormalLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1175: + message.randomNormalStatic = $root.CoreML.Specification.RandomNormalStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1180: + message.randomNormalDynamic = $root.CoreML.Specification.RandomNormalDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1190: + message.randomUniformLike = $root.CoreML.Specification.RandomUniformLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1195: + message.randomUniformStatic = $root.CoreML.Specification.RandomUniformStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1200: + message.randomUniformDynamic = $root.CoreML.Specification.RandomUniformDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1210: + message.randomBernoulliLike = $root.CoreML.Specification.RandomBernoulliLikeLayerParams.decode(reader, reader.uint32()); + break; + case 1215: + message.randomBernoulliStatic = $root.CoreML.Specification.RandomBernoulliStaticLayerParams.decode(reader, reader.uint32()); + break; + case 1220: + message.randomBernoulliDynamic = $root.CoreML.Specification.RandomBernoulliDynamicLayerParams.decode(reader, reader.uint32()); + break; + case 1230: + message.categoricalDistribution = $root.CoreML.Specification.CategoricalDistributionLayerParams.decode(reader, reader.uint32()); + break; + case 1250: + message.reduceL1 = $root.CoreML.Specification.ReduceL1LayerParams.decode(reader, reader.uint32()); + break; + case 1255: + message.reduceL2 = $root.CoreML.Specification.ReduceL2LayerParams.decode(reader, reader.uint32()); + break; + case 1260: + message.reduceMax = $root.CoreML.Specification.ReduceMaxLayerParams.decode(reader, reader.uint32()); + break; + case 1265: + message.reduceMin = $root.CoreML.Specification.ReduceMinLayerParams.decode(reader, reader.uint32()); + break; + case 1270: + message.reduceSum = $root.CoreML.Specification.ReduceSumLayerParams.decode(reader, reader.uint32()); + break; + case 1275: + message.reduceProd = $root.CoreML.Specification.ReduceProdLayerParams.decode(reader, reader.uint32()); + break; + case 1280: + message.reduceMean = $root.CoreML.Specification.ReduceMeanLayerParams.decode(reader, reader.uint32()); + break; + case 1285: + message.reduceLogSum = $root.CoreML.Specification.ReduceLogSumLayerParams.decode(reader, reader.uint32()); + break; + case 1290: + message.reduceSumSquare = $root.CoreML.Specification.ReduceSumSquareLayerParams.decode(reader, reader.uint32()); + break; + case 1295: + message.reduceLogSumExp = $root.CoreML.Specification.ReduceLogSumExpLayerParams.decode(reader, reader.uint32()); + break; + case 1313: + message.whereNonZero = $root.CoreML.Specification.WhereNonZeroLayerParams.decode(reader, reader.uint32()); + break; + case 1315: + message.matrixBandPart = $root.CoreML.Specification.MatrixBandPartLayerParams.decode(reader, reader.uint32()); + break; + case 1320: + message.lowerTriangular = $root.CoreML.Specification.LowerTriangularLayerParams.decode(reader, reader.uint32()); + break; + case 1325: + message.upperTriangular = $root.CoreML.Specification.UpperTriangularLayerParams.decode(reader, reader.uint32()); + break; + case 1330: + message.whereBroadcastable = $root.CoreML.Specification.WhereBroadcastableLayerParams.decode(reader, reader.uint32()); + break; + case 1350: + message.layerNormalization = $root.CoreML.Specification.LayerNormalizationLayerParams.decode(reader, reader.uint32()); + break; + case 1400: + message.NonMaximumSuppression = $root.CoreML.Specification.NonMaximumSuppressionLayerParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NeuralNetworkLayer; + })(); + + Specification.BranchLayerParams = (function() { + + function BranchLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BranchLayerParams.prototype.ifBranch = null; + BranchLayerParams.prototype.elseBranch = null; + + BranchLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BranchLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.ifBranch = $root.CoreML.Specification.NeuralNetwork.decode(reader, reader.uint32()); + break; + case 2: + message.elseBranch = $root.CoreML.Specification.NeuralNetwork.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BranchLayerParams; + })(); + + Specification.LoopLayerParams = (function() { + + function LoopLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LoopLayerParams.prototype.maxLoopIterations = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + LoopLayerParams.prototype.conditionVar = ""; + LoopLayerParams.prototype.conditionNetwork = null; + LoopLayerParams.prototype.bodyNetwork = null; + + LoopLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LoopLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.maxLoopIterations = reader.uint64(); + break; + case 2: + message.conditionVar = reader.string(); + break; + case 3: + message.conditionNetwork = $root.CoreML.Specification.NeuralNetwork.decode(reader, reader.uint32()); + break; + case 4: + message.bodyNetwork = $root.CoreML.Specification.NeuralNetwork.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LoopLayerParams; + })(); + + Specification.LoopBreakLayerParams = (function() { + + function LoopBreakLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LoopBreakLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LoopBreakLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LoopBreakLayerParams; + })(); + + Specification.LoopContinueLayerParams = (function() { + + function LoopContinueLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LoopContinueLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LoopContinueLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LoopContinueLayerParams; + })(); + + Specification.CopyLayerParams = (function() { + + function CopyLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CopyLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CopyLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return CopyLayerParams; + })(); + + Specification.GreaterThanLayerParams = (function() { + + function GreaterThanLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GreaterThanLayerParams.prototype.alpha = 0; + + GreaterThanLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GreaterThanLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return GreaterThanLayerParams; + })(); + + Specification.GreaterEqualLayerParams = (function() { + + function GreaterEqualLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GreaterEqualLayerParams.prototype.alpha = 0; + + GreaterEqualLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GreaterEqualLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return GreaterEqualLayerParams; + })(); + + Specification.LessThanLayerParams = (function() { + + function LessThanLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LessThanLayerParams.prototype.alpha = 0; + + LessThanLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LessThanLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LessThanLayerParams; + })(); + + Specification.LessEqualLayerParams = (function() { + + function LessEqualLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LessEqualLayerParams.prototype.alpha = 0; + + LessEqualLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LessEqualLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LessEqualLayerParams; + })(); + + Specification.EqualLayerParams = (function() { + + function EqualLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + EqualLayerParams.prototype.alpha = 0; + + EqualLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.EqualLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return EqualLayerParams; + })(); + + Specification.NotEqualLayerParams = (function() { + + function NotEqualLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NotEqualLayerParams.prototype.alpha = 0; + + NotEqualLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NotEqualLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NotEqualLayerParams; + })(); + + Specification.LogicalAndLayerParams = (function() { + + function LogicalAndLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LogicalAndLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LogicalAndLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LogicalAndLayerParams; + })(); + + Specification.LogicalOrLayerParams = (function() { + + function LogicalOrLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LogicalOrLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LogicalOrLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LogicalOrLayerParams; + })(); + + Specification.LogicalXorLayerParams = (function() { + + function LogicalXorLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LogicalXorLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LogicalXorLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LogicalXorLayerParams; + })(); + + Specification.LogicalNotLayerParams = (function() { + + function LogicalNotLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LogicalNotLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LogicalNotLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LogicalNotLayerParams; + })(); + + Specification.BorderAmounts = (function() { + + function BorderAmounts(properties) { + this.borderAmounts = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BorderAmounts.prototype.borderAmounts = $util.emptyArray; + + BorderAmounts.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BorderAmounts(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + if (!(message.borderAmounts && message.borderAmounts.length)) + message.borderAmounts = []; + message.borderAmounts.push($root.CoreML.Specification.BorderAmounts.EdgeSizes.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BorderAmounts.EdgeSizes = (function() { + + function EdgeSizes(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + EdgeSizes.prototype.startEdgeSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + EdgeSizes.prototype.endEdgeSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + EdgeSizes.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BorderAmounts.EdgeSizes(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.startEdgeSize = reader.uint64(); + break; + case 2: + message.endEdgeSize = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return EdgeSizes; + })(); + + return BorderAmounts; + })(); + + Specification.ValidPadding = (function() { + + function ValidPadding(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ValidPadding.prototype.paddingAmounts = null; + + ValidPadding.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ValidPadding(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.paddingAmounts = $root.CoreML.Specification.BorderAmounts.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ValidPadding; + })(); + + Specification.SamePadding = (function() { + + function SamePadding(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SamePadding.prototype.asymmetryMode = 0; + + SamePadding.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SamePadding(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.asymmetryMode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SamePadding.SamePaddingMode = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "BOTTOM_RIGHT_HEAVY"] = 0; + values[valuesById[1] = "TOP_LEFT_HEAVY"] = 1; + return values; + })(); + + return SamePadding; + })(); + + Specification.SamplingMode = (function() { + + function SamplingMode(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SamplingMode.prototype.samplingMethod = 0; + + SamplingMode.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SamplingMode(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.samplingMethod = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SamplingMode.Method = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "STRICT_ALIGN_ENDPOINTS_MODE"] = 0; + values[valuesById[1] = "ALIGN_ENDPOINTS_MODE"] = 1; + values[valuesById[2] = "UPSAMPLE_MODE"] = 2; + values[valuesById[3] = "ROI_ALIGN_MODE"] = 3; + return values; + })(); + + return SamplingMode; + })(); + + Specification.BoxCoordinatesMode = (function() { + + function BoxCoordinatesMode(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BoxCoordinatesMode.prototype.boxMode = 0; + + BoxCoordinatesMode.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BoxCoordinatesMode(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.boxMode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BoxCoordinatesMode.Coordinates = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "CORNERS_HEIGHT_FIRST"] = 0; + values[valuesById[1] = "CORNERS_WIDTH_FIRST"] = 1; + values[valuesById[2] = "CENTER_SIZE_HEIGHT_FIRST"] = 2; + values[valuesById[3] = "CENTER_SIZE_WIDTH_FIRST"] = 3; + return values; + })(); + + return BoxCoordinatesMode; + })(); + + Specification.WeightParams = (function() { + + function WeightParams(properties) { + this.floatValue = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + WeightParams.prototype.floatValue = $util.emptyArray; + WeightParams.prototype.float16Value = $util.newBuffer([]); + WeightParams.prototype.rawValue = $util.newBuffer([]); + WeightParams.prototype.quantization = null; + WeightParams.prototype.isUpdatable = false; + + WeightParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.WeightParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.floatValue && message.floatValue.length)) + message.floatValue = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + if (message.floatValue.length == 0 && (end2 - reader.pos) > 1048576) { + var floatValueLength = end2 - reader.pos; + var floatValueView = new DataView(reader.buf.buffer, reader.buf.byteOffset + reader.pos, floatValueLength); + floatValueLength = floatValueLength >>> 2; + var floatValue = new Float32Array(floatValueLength); + for (var i = 0; i < floatValueLength; i++) { + floatValue[i] = floatValueView.getFloat32(i << 2, true); + } + message.floatValue = floatValue; + reader.pos = end2; + } + else { + while (reader.pos < end2) + message.floatValue.push(reader.float()); + } + } else + message.floatValue.push(reader.float()); + break; + case 2: + message.float16Value = reader.bytes(); + break; + case 30: + message.rawValue = reader.bytes(); + break; + case 40: + message.quantization = $root.CoreML.Specification.QuantizationParams.decode(reader, reader.uint32()); + break; + case 50: + message.isUpdatable = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return WeightParams; + })(); + + Specification.QuantizationParams = (function() { + + function QuantizationParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + QuantizationParams.prototype.numberOfBits = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + QuantizationParams.prototype.linearQuantization = null; + QuantizationParams.prototype.lookupTableQuantization = null; + + var $oneOfFields; + + Object.defineProperty(QuantizationParams.prototype, "QuantizationType", { + get: $util.oneOfGetter($oneOfFields = ["linearQuantization", "lookupTableQuantization"]), + set: $util.oneOfSetter($oneOfFields) + }); + + QuantizationParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.QuantizationParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.numberOfBits = reader.uint64(); + break; + case 101: + message.linearQuantization = $root.CoreML.Specification.LinearQuantizationParams.decode(reader, reader.uint32()); + break; + case 102: + message.lookupTableQuantization = $root.CoreML.Specification.LookUpTableQuantizationParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return QuantizationParams; + })(); + + Specification.LinearQuantizationParams = (function() { + + function LinearQuantizationParams(properties) { + this.scale = []; + this.bias = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LinearQuantizationParams.prototype.scale = $util.emptyArray; + LinearQuantizationParams.prototype.bias = $util.emptyArray; + + LinearQuantizationParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LinearQuantizationParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.scale && message.scale.length)) + message.scale = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.scale.push(reader.float()); + } else + message.scale.push(reader.float()); + break; + case 2: + if (!(message.bias && message.bias.length)) + message.bias = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.bias.push(reader.float()); + } else + message.bias.push(reader.float()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LinearQuantizationParams; + })(); + + Specification.LookUpTableQuantizationParams = (function() { + + function LookUpTableQuantizationParams(properties) { + this.floatValue = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LookUpTableQuantizationParams.prototype.floatValue = $util.emptyArray; + + LookUpTableQuantizationParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LookUpTableQuantizationParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.floatValue && message.floatValue.length)) + message.floatValue = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + if (message.floatValue.length == 0 && (end2 - reader.pos) > 1048576) { + var floatValueLength = end2 - reader.pos; + var floatValueView = new DataView(reader.buf.buffer, reader.buf.byteOffset + reader.pos, floatValueLength); + floatValueLength = floatValueLength >>> 2; + var floatValue = new Float32Array(floatValueLength); + for (var i = 0; i < floatValueLength; i++) { + floatValue[i] = floatValueView.getFloat32(i << 2, true); + } + message.floatValue = floatValue; + reader.pos = end2; + } + else { + while (reader.pos < end2) + message.floatValue.push(reader.float()); + } + } else + message.floatValue.push(reader.float()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LookUpTableQuantizationParams; + })(); + + Specification.ConvolutionLayerParams = (function() { + + function ConvolutionLayerParams(properties) { + this.kernelSize = []; + this.stride = []; + this.dilationFactor = []; + this.outputShape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ConvolutionLayerParams.prototype.outputChannels = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + ConvolutionLayerParams.prototype.kernelChannels = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + ConvolutionLayerParams.prototype.nGroups = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + ConvolutionLayerParams.prototype.kernelSize = $util.emptyArray; + ConvolutionLayerParams.prototype.stride = $util.emptyArray; + ConvolutionLayerParams.prototype.dilationFactor = $util.emptyArray; + ConvolutionLayerParams.prototype.valid = null; + ConvolutionLayerParams.prototype.same = null; + ConvolutionLayerParams.prototype.isDeconvolution = false; + ConvolutionLayerParams.prototype.hasBias = false; + ConvolutionLayerParams.prototype.weights = null; + ConvolutionLayerParams.prototype.bias = null; + ConvolutionLayerParams.prototype.outputShape = $util.emptyArray; + + var $oneOfFields; + + Object.defineProperty(ConvolutionLayerParams.prototype, "ConvolutionPaddingType", { + get: $util.oneOfGetter($oneOfFields = ["valid", "same"]), + set: $util.oneOfSetter($oneOfFields) + }); + + ConvolutionLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ConvolutionLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.outputChannels = reader.uint64(); + break; + case 2: + message.kernelChannels = reader.uint64(); + break; + case 10: + message.nGroups = reader.uint64(); + break; + case 20: + if (!(message.kernelSize && message.kernelSize.length)) + message.kernelSize = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.kernelSize.push(reader.uint64()); + } else + message.kernelSize.push(reader.uint64()); + break; + case 30: + if (!(message.stride && message.stride.length)) + message.stride = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.stride.push(reader.uint64()); + } else + message.stride.push(reader.uint64()); + break; + case 40: + if (!(message.dilationFactor && message.dilationFactor.length)) + message.dilationFactor = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dilationFactor.push(reader.uint64()); + } else + message.dilationFactor.push(reader.uint64()); + break; + case 50: + message.valid = $root.CoreML.Specification.ValidPadding.decode(reader, reader.uint32()); + break; + case 51: + message.same = $root.CoreML.Specification.SamePadding.decode(reader, reader.uint32()); + break; + case 60: + message.isDeconvolution = reader.bool(); + break; + case 70: + message.hasBias = reader.bool(); + break; + case 90: + message.weights = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 91: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 100: + if (!(message.outputShape && message.outputShape.length)) + message.outputShape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.outputShape.push(reader.uint64()); + } else + message.outputShape.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ConvolutionLayerParams; + })(); + + Specification.InnerProductLayerParams = (function() { + + function InnerProductLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + InnerProductLayerParams.prototype.inputChannels = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + InnerProductLayerParams.prototype.outputChannels = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + InnerProductLayerParams.prototype.hasBias = false; + InnerProductLayerParams.prototype.weights = null; + InnerProductLayerParams.prototype.bias = null; + + InnerProductLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.InnerProductLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputChannels = reader.uint64(); + break; + case 2: + message.outputChannels = reader.uint64(); + break; + case 10: + message.hasBias = reader.bool(); + break; + case 20: + message.weights = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 21: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return InnerProductLayerParams; + })(); + + Specification.EmbeddingLayerParams = (function() { + + function EmbeddingLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + EmbeddingLayerParams.prototype.inputDim = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + EmbeddingLayerParams.prototype.outputChannels = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + EmbeddingLayerParams.prototype.hasBias = false; + EmbeddingLayerParams.prototype.weights = null; + EmbeddingLayerParams.prototype.bias = null; + + EmbeddingLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.EmbeddingLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputDim = reader.uint64(); + break; + case 2: + message.outputChannels = reader.uint64(); + break; + case 10: + message.hasBias = reader.bool(); + break; + case 20: + message.weights = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 21: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return EmbeddingLayerParams; + })(); + + Specification.EmbeddingNDLayerParams = (function() { + + function EmbeddingNDLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + EmbeddingNDLayerParams.prototype.vocabSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + EmbeddingNDLayerParams.prototype.embeddingSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + EmbeddingNDLayerParams.prototype.hasBias = false; + EmbeddingNDLayerParams.prototype.weights = null; + EmbeddingNDLayerParams.prototype.bias = null; + + EmbeddingNDLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.EmbeddingNDLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.vocabSize = reader.uint64(); + break; + case 2: + message.embeddingSize = reader.uint64(); + break; + case 3: + message.hasBias = reader.bool(); + break; + case 20: + message.weights = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 21: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return EmbeddingNDLayerParams; + })(); + + Specification.BatchnormLayerParams = (function() { + + function BatchnormLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BatchnormLayerParams.prototype.channels = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + BatchnormLayerParams.prototype.computeMeanVar = false; + BatchnormLayerParams.prototype.instanceNormalization = false; + BatchnormLayerParams.prototype.epsilon = 0; + BatchnormLayerParams.prototype.gamma = null; + BatchnormLayerParams.prototype.beta = null; + BatchnormLayerParams.prototype.mean = null; + BatchnormLayerParams.prototype.variance = null; + + BatchnormLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BatchnormLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.channels = reader.uint64(); + break; + case 5: + message.computeMeanVar = reader.bool(); + break; + case 6: + message.instanceNormalization = reader.bool(); + break; + case 10: + message.epsilon = reader.float(); + break; + case 15: + message.gamma = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 16: + message.beta = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 17: + message.mean = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 18: + message.variance = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BatchnormLayerParams; + })(); + + Specification.PoolingLayerParams = (function() { + + function PoolingLayerParams(properties) { + this.kernelSize = []; + this.stride = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PoolingLayerParams.prototype.type = 0; + PoolingLayerParams.prototype.kernelSize = $util.emptyArray; + PoolingLayerParams.prototype.stride = $util.emptyArray; + PoolingLayerParams.prototype.valid = null; + PoolingLayerParams.prototype.same = null; + PoolingLayerParams.prototype.includeLastPixel = null; + PoolingLayerParams.prototype.avgPoolExcludePadding = false; + PoolingLayerParams.prototype.globalPooling = false; + + var $oneOfFields; + + Object.defineProperty(PoolingLayerParams.prototype, "PoolingPaddingType", { + get: $util.oneOfGetter($oneOfFields = ["valid", "same", "includeLastPixel"]), + set: $util.oneOfSetter($oneOfFields) + }); + + PoolingLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.PoolingLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32(); + break; + case 10: + if (!(message.kernelSize && message.kernelSize.length)) + message.kernelSize = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.kernelSize.push(reader.uint64()); + } else + message.kernelSize.push(reader.uint64()); + break; + case 20: + if (!(message.stride && message.stride.length)) + message.stride = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.stride.push(reader.uint64()); + } else + message.stride.push(reader.uint64()); + break; + case 30: + message.valid = $root.CoreML.Specification.ValidPadding.decode(reader, reader.uint32()); + break; + case 31: + message.same = $root.CoreML.Specification.SamePadding.decode(reader, reader.uint32()); + break; + case 32: + message.includeLastPixel = $root.CoreML.Specification.PoolingLayerParams.ValidCompletePadding.decode(reader, reader.uint32()); + break; + case 50: + message.avgPoolExcludePadding = reader.bool(); + break; + case 60: + message.globalPooling = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + PoolingLayerParams.PoolingType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "MAX"] = 0; + values[valuesById[1] = "AVERAGE"] = 1; + values[valuesById[2] = "L2"] = 2; + return values; + })(); + + PoolingLayerParams.ValidCompletePadding = (function() { + + function ValidCompletePadding(properties) { + this.paddingAmounts = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ValidCompletePadding.prototype.paddingAmounts = $util.emptyArray; + + ValidCompletePadding.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.PoolingLayerParams.ValidCompletePadding(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + if (!(message.paddingAmounts && message.paddingAmounts.length)) + message.paddingAmounts = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.paddingAmounts.push(reader.uint64()); + } else + message.paddingAmounts.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ValidCompletePadding; + })(); + + return PoolingLayerParams; + })(); + + Specification.PaddingLayerParams = (function() { + + function PaddingLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PaddingLayerParams.prototype.constant = null; + PaddingLayerParams.prototype.reflection = null; + PaddingLayerParams.prototype.replication = null; + PaddingLayerParams.prototype.paddingAmounts = null; + + var $oneOfFields; + + Object.defineProperty(PaddingLayerParams.prototype, "PaddingType", { + get: $util.oneOfGetter($oneOfFields = ["constant", "reflection", "replication"]), + set: $util.oneOfSetter($oneOfFields) + }); + + PaddingLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.PaddingLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.constant = $root.CoreML.Specification.PaddingLayerParams.PaddingConstant.decode(reader, reader.uint32()); + break; + case 2: + message.reflection = $root.CoreML.Specification.PaddingLayerParams.PaddingReflection.decode(reader, reader.uint32()); + break; + case 3: + message.replication = $root.CoreML.Specification.PaddingLayerParams.PaddingReplication.decode(reader, reader.uint32()); + break; + case 10: + message.paddingAmounts = $root.CoreML.Specification.BorderAmounts.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + PaddingLayerParams.PaddingConstant = (function() { + + function PaddingConstant(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PaddingConstant.prototype.value = 0; + + PaddingConstant.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.PaddingLayerParams.PaddingConstant(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return PaddingConstant; + })(); + + PaddingLayerParams.PaddingReflection = (function() { + + function PaddingReflection(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PaddingReflection.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.PaddingLayerParams.PaddingReflection(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return PaddingReflection; + })(); + + PaddingLayerParams.PaddingReplication = (function() { + + function PaddingReplication(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PaddingReplication.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.PaddingLayerParams.PaddingReplication(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return PaddingReplication; + })(); + + return PaddingLayerParams; + })(); + + Specification.ConcatLayerParams = (function() { + + function ConcatLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ConcatLayerParams.prototype.sequenceConcat = false; + + ConcatLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ConcatLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 100: + message.sequenceConcat = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ConcatLayerParams; + })(); + + Specification.LRNLayerParams = (function() { + + function LRNLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LRNLayerParams.prototype.alpha = 0; + LRNLayerParams.prototype.beta = 0; + LRNLayerParams.prototype.localSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + LRNLayerParams.prototype.k = 0; + + LRNLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LRNLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + case 2: + message.beta = reader.float(); + break; + case 3: + message.localSize = reader.uint64(); + break; + case 4: + message.k = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LRNLayerParams; + })(); + + Specification.SoftmaxLayerParams = (function() { + + function SoftmaxLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SoftmaxLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SoftmaxLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SoftmaxLayerParams; + })(); + + Specification.SplitLayerParams = (function() { + + function SplitLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SplitLayerParams.prototype.nOutputs = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + SplitLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SplitLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.nOutputs = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SplitLayerParams; + })(); + + Specification.AddLayerParams = (function() { + + function AddLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AddLayerParams.prototype.alpha = 0; + + AddLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.AddLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return AddLayerParams; + })(); + + Specification.MultiplyLayerParams = (function() { + + function MultiplyLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MultiplyLayerParams.prototype.alpha = 0; + + MultiplyLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.MultiplyLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.alpha = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return MultiplyLayerParams; + })(); + + Specification.UnaryFunctionLayerParams = (function() { + + function UnaryFunctionLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + UnaryFunctionLayerParams.prototype.type = 0; + UnaryFunctionLayerParams.prototype.alpha = 0; + UnaryFunctionLayerParams.prototype.epsilon = 0; + UnaryFunctionLayerParams.prototype.shift = 0; + UnaryFunctionLayerParams.prototype.scale = 0; + + UnaryFunctionLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.UnaryFunctionLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32(); + break; + case 2: + message.alpha = reader.float(); + break; + case 3: + message.epsilon = reader.float(); + break; + case 4: + message.shift = reader.float(); + break; + case 5: + message.scale = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + UnaryFunctionLayerParams.Operation = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "SQRT"] = 0; + values[valuesById[1] = "RSQRT"] = 1; + values[valuesById[2] = "INVERSE"] = 2; + values[valuesById[3] = "POWER"] = 3; + values[valuesById[4] = "EXP"] = 4; + values[valuesById[5] = "LOG"] = 5; + values[valuesById[6] = "ABS"] = 6; + values[valuesById[7] = "THRESHOLD"] = 7; + return values; + })(); + + return UnaryFunctionLayerParams; + })(); + + Specification.UpsampleLayerParams = (function() { + + function UpsampleLayerParams(properties) { + this.scalingFactor = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + UpsampleLayerParams.prototype.scalingFactor = $util.emptyArray; + UpsampleLayerParams.prototype.mode = 0; + + UpsampleLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.UpsampleLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.scalingFactor && message.scalingFactor.length)) + message.scalingFactor = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.scalingFactor.push(reader.uint64()); + } else + message.scalingFactor.push(reader.uint64()); + break; + case 5: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + UpsampleLayerParams.InterpolationMode = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "NN"] = 0; + values[valuesById[1] = "BILINEAR"] = 1; + return values; + })(); + + return UpsampleLayerParams; + })(); + + Specification.ResizeBilinearLayerParams = (function() { + + function ResizeBilinearLayerParams(properties) { + this.targetSize = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ResizeBilinearLayerParams.prototype.targetSize = $util.emptyArray; + ResizeBilinearLayerParams.prototype.mode = null; + + ResizeBilinearLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ResizeBilinearLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.targetSize && message.targetSize.length)) + message.targetSize = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.targetSize.push(reader.uint64()); + } else + message.targetSize.push(reader.uint64()); + break; + case 2: + message.mode = $root.CoreML.Specification.SamplingMode.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ResizeBilinearLayerParams; + })(); + + Specification.CropResizeLayerParams = (function() { + + function CropResizeLayerParams(properties) { + this.targetSize = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CropResizeLayerParams.prototype.targetSize = $util.emptyArray; + CropResizeLayerParams.prototype.normalizedCoordinates = false; + CropResizeLayerParams.prototype.mode = null; + CropResizeLayerParams.prototype.boxIndicesMode = null; + CropResizeLayerParams.prototype.spatialScale = 0; + + CropResizeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CropResizeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.targetSize && message.targetSize.length)) + message.targetSize = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.targetSize.push(reader.uint64()); + } else + message.targetSize.push(reader.uint64()); + break; + case 2: + message.normalizedCoordinates = reader.bool(); + break; + case 3: + message.mode = $root.CoreML.Specification.SamplingMode.decode(reader, reader.uint32()); + break; + case 4: + message.boxIndicesMode = $root.CoreML.Specification.BoxCoordinatesMode.decode(reader, reader.uint32()); + break; + case 5: + message.spatialScale = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return CropResizeLayerParams; + })(); + + Specification.BiasLayerParams = (function() { + + function BiasLayerParams(properties) { + this.shape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BiasLayerParams.prototype.shape = $util.emptyArray; + BiasLayerParams.prototype.bias = null; + + BiasLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BiasLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shape && message.shape.length)) + message.shape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.shape.push(reader.uint64()); + } else + message.shape.push(reader.uint64()); + break; + case 2: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BiasLayerParams; + })(); + + Specification.ScaleLayerParams = (function() { + + function ScaleLayerParams(properties) { + this.shapeScale = []; + this.shapeBias = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ScaleLayerParams.prototype.shapeScale = $util.emptyArray; + ScaleLayerParams.prototype.scale = null; + ScaleLayerParams.prototype.hasBias = false; + ScaleLayerParams.prototype.shapeBias = $util.emptyArray; + ScaleLayerParams.prototype.bias = null; + + ScaleLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ScaleLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shapeScale && message.shapeScale.length)) + message.shapeScale = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.shapeScale.push(reader.uint64()); + } else + message.shapeScale.push(reader.uint64()); + break; + case 2: + message.scale = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 3: + message.hasBias = reader.bool(); + break; + case 4: + if (!(message.shapeBias && message.shapeBias.length)) + message.shapeBias = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.shapeBias.push(reader.uint64()); + } else + message.shapeBias.push(reader.uint64()); + break; + case 5: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ScaleLayerParams; + })(); + + Specification.LoadConstantLayerParams = (function() { + + function LoadConstantLayerParams(properties) { + this.shape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LoadConstantLayerParams.prototype.shape = $util.emptyArray; + LoadConstantLayerParams.prototype.data = null; + + LoadConstantLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LoadConstantLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shape && message.shape.length)) + message.shape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.shape.push(reader.uint64()); + } else + message.shape.push(reader.uint64()); + break; + case 2: + message.data = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LoadConstantLayerParams; + })(); + + Specification.L2NormalizeLayerParams = (function() { + + function L2NormalizeLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + L2NormalizeLayerParams.prototype.epsilon = 0; + + L2NormalizeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.L2NormalizeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.epsilon = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return L2NormalizeLayerParams; + })(); + + Specification.FlattenLayerParams = (function() { + + function FlattenLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FlattenLayerParams.prototype.mode = 0; + + FlattenLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FlattenLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + FlattenLayerParams.FlattenOrder = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "CHANNEL_FIRST"] = 0; + values[valuesById[1] = "CHANNEL_LAST"] = 1; + return values; + })(); + + return FlattenLayerParams; + })(); + + Specification.ReshapeLayerParams = (function() { + + function ReshapeLayerParams(properties) { + this.targetShape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReshapeLayerParams.prototype.targetShape = $util.emptyArray; + ReshapeLayerParams.prototype.mode = 0; + + ReshapeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReshapeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.targetShape && message.targetShape.length)) + message.targetShape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.targetShape.push(reader.int64()); + } else + message.targetShape.push(reader.int64()); + break; + case 2: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ReshapeLayerParams.ReshapeOrder = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "CHANNEL_FIRST"] = 0; + values[valuesById[1] = "CHANNEL_LAST"] = 1; + return values; + })(); + + return ReshapeLayerParams; + })(); + + Specification.PermuteLayerParams = (function() { + + function PermuteLayerParams(properties) { + this.axis = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PermuteLayerParams.prototype.axis = $util.emptyArray; + + PermuteLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.PermuteLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axis && message.axis.length)) + message.axis = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axis.push(reader.uint64()); + } else + message.axis.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return PermuteLayerParams; + })(); + + Specification.ReorganizeDataLayerParams = (function() { + + function ReorganizeDataLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReorganizeDataLayerParams.prototype.mode = 0; + ReorganizeDataLayerParams.prototype.blockSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + ReorganizeDataLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReorganizeDataLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32(); + break; + case 2: + message.blockSize = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ReorganizeDataLayerParams.ReorganizationType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "SPACE_TO_DEPTH"] = 0; + values[valuesById[1] = "DEPTH_TO_SPACE"] = 1; + return values; + })(); + + return ReorganizeDataLayerParams; + })(); + + Specification.SliceLayerParams = (function() { + + function SliceLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SliceLayerParams.prototype.startIndex = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + SliceLayerParams.prototype.endIndex = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + SliceLayerParams.prototype.stride = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + SliceLayerParams.prototype.axis = 0; + + SliceLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SliceLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.startIndex = reader.int64(); + break; + case 2: + message.endIndex = reader.int64(); + break; + case 3: + message.stride = reader.uint64(); + break; + case 4: + message.axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SliceLayerParams.SliceAxis = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "CHANNEL_AXIS"] = 0; + values[valuesById[1] = "HEIGHT_AXIS"] = 1; + values[valuesById[2] = "WIDTH_AXIS"] = 2; + return values; + })(); + + return SliceLayerParams; + })(); + + Specification.ReduceLayerParams = (function() { + + function ReduceLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReduceLayerParams.prototype.mode = 0; + ReduceLayerParams.prototype.epsilon = 0; + ReduceLayerParams.prototype.axis = 0; + + ReduceLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReduceLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32(); + break; + case 2: + message.epsilon = reader.float(); + break; + case 3: + message.axis = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ReduceLayerParams.ReduceOperation = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "SUM"] = 0; + values[valuesById[1] = "AVG"] = 1; + values[valuesById[2] = "PROD"] = 2; + values[valuesById[3] = "LOGSUM"] = 3; + values[valuesById[4] = "SUMSQUARE"] = 4; + values[valuesById[5] = "L1"] = 5; + values[valuesById[6] = "L2"] = 6; + values[valuesById[7] = "MAX"] = 7; + values[valuesById[8] = "MIN"] = 8; + values[valuesById[9] = "ARGMAX"] = 9; + return values; + })(); + + ReduceLayerParams.ReduceAxis = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "CHW"] = 0; + values[valuesById[1] = "HW"] = 1; + values[valuesById[2] = "C"] = 2; + values[valuesById[3] = "H"] = 3; + values[valuesById[4] = "W"] = 4; + return values; + })(); + + return ReduceLayerParams; + })(); + + Specification.CropLayerParams = (function() { + + function CropLayerParams(properties) { + this.offset = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CropLayerParams.prototype.cropAmounts = null; + CropLayerParams.prototype.offset = $util.emptyArray; + + CropLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CropLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cropAmounts = $root.CoreML.Specification.BorderAmounts.decode(reader, reader.uint32()); + break; + case 5: + if (!(message.offset && message.offset.length)) + message.offset = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.offset.push(reader.uint64()); + } else + message.offset.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return CropLayerParams; + })(); + + Specification.AverageLayerParams = (function() { + + function AverageLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AverageLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.AverageLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return AverageLayerParams; + })(); + + Specification.MaxLayerParams = (function() { + + function MaxLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MaxLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.MaxLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return MaxLayerParams; + })(); + + Specification.MinLayerParams = (function() { + + function MinLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MinLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.MinLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return MinLayerParams; + })(); + + Specification.DotProductLayerParams = (function() { + + function DotProductLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DotProductLayerParams.prototype.cosineSimilarity = false; + + DotProductLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.DotProductLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.cosineSimilarity = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DotProductLayerParams; + })(); + + Specification.MeanVarianceNormalizeLayerParams = (function() { + + function MeanVarianceNormalizeLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MeanVarianceNormalizeLayerParams.prototype.acrossChannels = false; + MeanVarianceNormalizeLayerParams.prototype.normalizeVariance = false; + MeanVarianceNormalizeLayerParams.prototype.epsilon = 0; + + MeanVarianceNormalizeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.MeanVarianceNormalizeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.acrossChannels = reader.bool(); + break; + case 2: + message.normalizeVariance = reader.bool(); + break; + case 3: + message.epsilon = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return MeanVarianceNormalizeLayerParams; + })(); + + Specification.SequenceRepeatLayerParams = (function() { + + function SequenceRepeatLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SequenceRepeatLayerParams.prototype.nRepetitions = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + SequenceRepeatLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SequenceRepeatLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.nRepetitions = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SequenceRepeatLayerParams; + })(); + + Specification.SimpleRecurrentLayerParams = (function() { + + function SimpleRecurrentLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SimpleRecurrentLayerParams.prototype.inputVectorSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + SimpleRecurrentLayerParams.prototype.outputVectorSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + SimpleRecurrentLayerParams.prototype.activation = null; + SimpleRecurrentLayerParams.prototype.sequenceOutput = false; + SimpleRecurrentLayerParams.prototype.hasBiasVector = false; + SimpleRecurrentLayerParams.prototype.weightMatrix = null; + SimpleRecurrentLayerParams.prototype.recursionMatrix = null; + SimpleRecurrentLayerParams.prototype.biasVector = null; + SimpleRecurrentLayerParams.prototype.reverseInput = false; + + SimpleRecurrentLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SimpleRecurrentLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputVectorSize = reader.uint64(); + break; + case 2: + message.outputVectorSize = reader.uint64(); + break; + case 10: + message.activation = $root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32()); + break; + case 15: + message.sequenceOutput = reader.bool(); + break; + case 20: + message.hasBiasVector = reader.bool(); + break; + case 30: + message.weightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 31: + message.recursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 32: + message.biasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 100: + message.reverseInput = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SimpleRecurrentLayerParams; + })(); + + Specification.GRULayerParams = (function() { + + function GRULayerParams(properties) { + this.activations = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GRULayerParams.prototype.inputVectorSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + GRULayerParams.prototype.outputVectorSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + GRULayerParams.prototype.activations = $util.emptyArray; + GRULayerParams.prototype.sequenceOutput = false; + GRULayerParams.prototype.hasBiasVectors = false; + GRULayerParams.prototype.updateGateWeightMatrix = null; + GRULayerParams.prototype.resetGateWeightMatrix = null; + GRULayerParams.prototype.outputGateWeightMatrix = null; + GRULayerParams.prototype.updateGateRecursionMatrix = null; + GRULayerParams.prototype.resetGateRecursionMatrix = null; + GRULayerParams.prototype.outputGateRecursionMatrix = null; + GRULayerParams.prototype.updateGateBiasVector = null; + GRULayerParams.prototype.resetGateBiasVector = null; + GRULayerParams.prototype.outputGateBiasVector = null; + GRULayerParams.prototype.reverseInput = false; + + GRULayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GRULayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputVectorSize = reader.uint64(); + break; + case 2: + message.outputVectorSize = reader.uint64(); + break; + case 10: + if (!(message.activations && message.activations.length)) + message.activations = []; + message.activations.push($root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32())); + break; + case 15: + message.sequenceOutput = reader.bool(); + break; + case 20: + message.hasBiasVectors = reader.bool(); + break; + case 30: + message.updateGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 31: + message.resetGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 32: + message.outputGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 50: + message.updateGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 51: + message.resetGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 52: + message.outputGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 70: + message.updateGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 71: + message.resetGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 72: + message.outputGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 100: + message.reverseInput = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return GRULayerParams; + })(); + + Specification.LSTMParams = (function() { + + function LSTMParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LSTMParams.prototype.sequenceOutput = false; + LSTMParams.prototype.hasBiasVectors = false; + LSTMParams.prototype.forgetBias = false; + LSTMParams.prototype.hasPeepholeVectors = false; + LSTMParams.prototype.coupledInputAndForgetGate = false; + LSTMParams.prototype.cellClipThreshold = 0; + + LSTMParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LSTMParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.sequenceOutput = reader.bool(); + break; + case 20: + message.hasBiasVectors = reader.bool(); + break; + case 30: + message.forgetBias = reader.bool(); + break; + case 40: + message.hasPeepholeVectors = reader.bool(); + break; + case 50: + message.coupledInputAndForgetGate = reader.bool(); + break; + case 60: + message.cellClipThreshold = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LSTMParams; + })(); + + Specification.LSTMWeightParams = (function() { + + function LSTMWeightParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LSTMWeightParams.prototype.inputGateWeightMatrix = null; + LSTMWeightParams.prototype.forgetGateWeightMatrix = null; + LSTMWeightParams.prototype.blockInputWeightMatrix = null; + LSTMWeightParams.prototype.outputGateWeightMatrix = null; + LSTMWeightParams.prototype.inputGateRecursionMatrix = null; + LSTMWeightParams.prototype.forgetGateRecursionMatrix = null; + LSTMWeightParams.prototype.blockInputRecursionMatrix = null; + LSTMWeightParams.prototype.outputGateRecursionMatrix = null; + LSTMWeightParams.prototype.inputGateBiasVector = null; + LSTMWeightParams.prototype.forgetGateBiasVector = null; + LSTMWeightParams.prototype.blockInputBiasVector = null; + LSTMWeightParams.prototype.outputGateBiasVector = null; + LSTMWeightParams.prototype.inputGatePeepholeVector = null; + LSTMWeightParams.prototype.forgetGatePeepholeVector = null; + LSTMWeightParams.prototype.outputGatePeepholeVector = null; + + LSTMWeightParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LSTMWeightParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 2: + message.forgetGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 3: + message.blockInputWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 4: + message.outputGateWeightMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 20: + message.inputGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 21: + message.forgetGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 22: + message.blockInputRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 23: + message.outputGateRecursionMatrix = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 40: + message.inputGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 41: + message.forgetGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 42: + message.blockInputBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 43: + message.outputGateBiasVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 60: + message.inputGatePeepholeVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 61: + message.forgetGatePeepholeVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 62: + message.outputGatePeepholeVector = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LSTMWeightParams; + })(); + + Specification.UniDirectionalLSTMLayerParams = (function() { + + function UniDirectionalLSTMLayerParams(properties) { + this.activations = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + UniDirectionalLSTMLayerParams.prototype.inputVectorSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + UniDirectionalLSTMLayerParams.prototype.outputVectorSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + UniDirectionalLSTMLayerParams.prototype.activations = $util.emptyArray; + UniDirectionalLSTMLayerParams.prototype.params = null; + UniDirectionalLSTMLayerParams.prototype.weightParams = null; + UniDirectionalLSTMLayerParams.prototype.reverseInput = false; + + UniDirectionalLSTMLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.UniDirectionalLSTMLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputVectorSize = reader.uint64(); + break; + case 2: + message.outputVectorSize = reader.uint64(); + break; + case 10: + if (!(message.activations && message.activations.length)) + message.activations = []; + message.activations.push($root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32())); + break; + case 15: + message.params = $root.CoreML.Specification.LSTMParams.decode(reader, reader.uint32()); + break; + case 20: + message.weightParams = $root.CoreML.Specification.LSTMWeightParams.decode(reader, reader.uint32()); + break; + case 100: + message.reverseInput = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return UniDirectionalLSTMLayerParams; + })(); + + Specification.BiDirectionalLSTMLayerParams = (function() { + + function BiDirectionalLSTMLayerParams(properties) { + this.activationsForwardLSTM = []; + this.activationsBackwardLSTM = []; + this.weightParams = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BiDirectionalLSTMLayerParams.prototype.inputVectorSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + BiDirectionalLSTMLayerParams.prototype.outputVectorSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + BiDirectionalLSTMLayerParams.prototype.activationsForwardLSTM = $util.emptyArray; + BiDirectionalLSTMLayerParams.prototype.activationsBackwardLSTM = $util.emptyArray; + BiDirectionalLSTMLayerParams.prototype.params = null; + BiDirectionalLSTMLayerParams.prototype.weightParams = $util.emptyArray; + + BiDirectionalLSTMLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BiDirectionalLSTMLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.inputVectorSize = reader.uint64(); + break; + case 2: + message.outputVectorSize = reader.uint64(); + break; + case 10: + if (!(message.activationsForwardLSTM && message.activationsForwardLSTM.length)) + message.activationsForwardLSTM = []; + message.activationsForwardLSTM.push($root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32())); + break; + case 11: + if (!(message.activationsBackwardLSTM && message.activationsBackwardLSTM.length)) + message.activationsBackwardLSTM = []; + message.activationsBackwardLSTM.push($root.CoreML.Specification.ActivationParams.decode(reader, reader.uint32())); + break; + case 15: + message.params = $root.CoreML.Specification.LSTMParams.decode(reader, reader.uint32()); + break; + case 20: + if (!(message.weightParams && message.weightParams.length)) + message.weightParams = []; + message.weightParams.push($root.CoreML.Specification.LSTMWeightParams.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BiDirectionalLSTMLayerParams; + })(); + + Specification.CustomLayerParams = (function() { + + function CustomLayerParams(properties) { + this.weights = []; + this.parameters = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CustomLayerParams.prototype.className = ""; + CustomLayerParams.prototype.weights = $util.emptyArray; + CustomLayerParams.prototype.parameters = $util.emptyObject; + CustomLayerParams.prototype.description = ""; + + CustomLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CustomLayerParams(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.className = reader.string(); + break; + case 20: + if (!(message.weights && message.weights.length)) + message.weights = []; + message.weights.push($root.CoreML.Specification.WeightParams.decode(reader, reader.uint32())); + break; + case 30: + reader.skip().pos++; + if (message.parameters === $util.emptyObject) + message.parameters = {}; + key = reader.string(); + reader.pos++; + message.parameters[key] = $root.CoreML.Specification.CustomLayerParams.CustomLayerParamValue.decode(reader, reader.uint32()); + break; + case 40: + message.description = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + CustomLayerParams.CustomLayerParamValue = (function() { + + function CustomLayerParamValue(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CustomLayerParamValue.prototype.doubleValue = 0; + CustomLayerParamValue.prototype.stringValue = ""; + CustomLayerParamValue.prototype.intValue = 0; + CustomLayerParamValue.prototype.longValue = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + CustomLayerParamValue.prototype.boolValue = false; + + var $oneOfFields; + + Object.defineProperty(CustomLayerParamValue.prototype, "value", { + get: $util.oneOfGetter($oneOfFields = ["doubleValue", "stringValue", "intValue", "longValue", "boolValue"]), + set: $util.oneOfSetter($oneOfFields) + }); + + CustomLayerParamValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CustomLayerParams.CustomLayerParamValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.doubleValue = reader.double(); + break; + case 20: + message.stringValue = reader.string(); + break; + case 30: + message.intValue = reader.int32(); + break; + case 40: + message.longValue = reader.int64(); + break; + case 50: + message.boolValue = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return CustomLayerParamValue; + })(); + + return CustomLayerParams; + })(); + + Specification.TransposeLayerParams = (function() { + + function TransposeLayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TransposeLayerParams.prototype.axes = $util.emptyArray; + + TransposeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.TransposeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.uint64()); + } else + message.axes.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return TransposeLayerParams; + })(); + + Specification.BatchedMatMulLayerParams = (function() { + + function BatchedMatMulLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BatchedMatMulLayerParams.prototype.transposeA = false; + BatchedMatMulLayerParams.prototype.transposeB = false; + BatchedMatMulLayerParams.prototype.weightMatrixFirstDimension = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + BatchedMatMulLayerParams.prototype.weightMatrixSecondDimension = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + BatchedMatMulLayerParams.prototype.hasBias = false; + BatchedMatMulLayerParams.prototype.weights = null; + BatchedMatMulLayerParams.prototype.bias = null; + + BatchedMatMulLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BatchedMatMulLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.transposeA = reader.bool(); + break; + case 2: + message.transposeB = reader.bool(); + break; + case 5: + message.weightMatrixFirstDimension = reader.uint64(); + break; + case 6: + message.weightMatrixSecondDimension = reader.uint64(); + break; + case 7: + message.hasBias = reader.bool(); + break; + case 8: + message.weights = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 9: + message.bias = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BatchedMatMulLayerParams; + })(); + + Specification.ConcatNDLayerParams = (function() { + + function ConcatNDLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ConcatNDLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + ConcatNDLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ConcatNDLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ConcatNDLayerParams; + })(); + + Specification.SoftmaxNDLayerParams = (function() { + + function SoftmaxNDLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SoftmaxNDLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + SoftmaxNDLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SoftmaxNDLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SoftmaxNDLayerParams; + })(); + + Specification.ReverseLayerParams = (function() { + + function ReverseLayerParams(properties) { + this.reverseDim = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReverseLayerParams.prototype.reverseDim = $util.emptyArray; + + ReverseLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReverseLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.reverseDim && message.reverseDim.length)) + message.reverseDim = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.reverseDim.push(reader.bool()); + } else + message.reverseDim.push(reader.bool()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReverseLayerParams; + })(); + + Specification.ReverseSeqLayerParams = (function() { + + function ReverseSeqLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReverseSeqLayerParams.prototype.batchAxis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ReverseSeqLayerParams.prototype.sequenceAxis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + ReverseSeqLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReverseSeqLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.batchAxis = reader.int64(); + break; + case 2: + message.sequenceAxis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReverseSeqLayerParams; + })(); + + Specification.LoadConstantNDLayerParams = (function() { + + function LoadConstantNDLayerParams(properties) { + this.shape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LoadConstantNDLayerParams.prototype.shape = $util.emptyArray; + LoadConstantNDLayerParams.prototype.data = null; + + LoadConstantNDLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LoadConstantNDLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shape && message.shape.length)) + message.shape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.shape.push(reader.uint64()); + } else + message.shape.push(reader.uint64()); + break; + case 2: + message.data = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LoadConstantNDLayerParams; + })(); + + Specification.FillLikeLayerParams = (function() { + + function FillLikeLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FillLikeLayerParams.prototype.value = 0; + + FillLikeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FillLikeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FillLikeLayerParams; + })(); + + Specification.FillStaticLayerParams = (function() { + + function FillStaticLayerParams(properties) { + this.targetShape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FillStaticLayerParams.prototype.value = 0; + FillStaticLayerParams.prototype.targetShape = $util.emptyArray; + + FillStaticLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FillStaticLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.float(); + break; + case 2: + if (!(message.targetShape && message.targetShape.length)) + message.targetShape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.targetShape.push(reader.uint64()); + } else + message.targetShape.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FillStaticLayerParams; + })(); + + Specification.FillDynamicLayerParams = (function() { + + function FillDynamicLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FillDynamicLayerParams.prototype.value = 0; + + FillDynamicLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FillDynamicLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FillDynamicLayerParams; + })(); + + Specification.WhereBroadcastableLayerParams = (function() { + + function WhereBroadcastableLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + WhereBroadcastableLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.WhereBroadcastableLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return WhereBroadcastableLayerParams; + })(); + + Specification.SinLayerParams = (function() { + + function SinLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SinLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SinLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SinLayerParams; + })(); + + Specification.CosLayerParams = (function() { + + function CosLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CosLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CosLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return CosLayerParams; + })(); + + Specification.TanLayerParams = (function() { + + function TanLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TanLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.TanLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return TanLayerParams; + })(); + + Specification.AsinLayerParams = (function() { + + function AsinLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AsinLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.AsinLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return AsinLayerParams; + })(); + + Specification.AcosLayerParams = (function() { + + function AcosLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AcosLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.AcosLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return AcosLayerParams; + })(); + + Specification.AtanLayerParams = (function() { + + function AtanLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AtanLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.AtanLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return AtanLayerParams; + })(); + + Specification.SinhLayerParams = (function() { + + function SinhLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SinhLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SinhLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SinhLayerParams; + })(); + + Specification.CoshLayerParams = (function() { + + function CoshLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CoshLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CoshLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return CoshLayerParams; + })(); + + Specification.TanhLayerParams = (function() { + + function TanhLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TanhLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.TanhLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return TanhLayerParams; + })(); + + Specification.AsinhLayerParams = (function() { + + function AsinhLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AsinhLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.AsinhLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return AsinhLayerParams; + })(); + + Specification.AcoshLayerParams = (function() { + + function AcoshLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AcoshLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.AcoshLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return AcoshLayerParams; + })(); + + Specification.AtanhLayerParams = (function() { + + function AtanhLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AtanhLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.AtanhLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return AtanhLayerParams; + })(); + + Specification.PowBroadcastableLayerParams = (function() { + + function PowBroadcastableLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PowBroadcastableLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.PowBroadcastableLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return PowBroadcastableLayerParams; + })(); + + Specification.Exp2LayerParams = (function() { + + function Exp2LayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Exp2LayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Exp2LayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Exp2LayerParams; + })(); + + Specification.WhereNonZeroLayerParams = (function() { + + function WhereNonZeroLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + WhereNonZeroLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.WhereNonZeroLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return WhereNonZeroLayerParams; + })(); + + Specification.MatrixBandPartLayerParams = (function() { + + function MatrixBandPartLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MatrixBandPartLayerParams.prototype.numLower = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + MatrixBandPartLayerParams.prototype.numUpper = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + MatrixBandPartLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.MatrixBandPartLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.numLower = reader.int64(); + break; + case 2: + message.numUpper = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return MatrixBandPartLayerParams; + })(); + + Specification.UpperTriangularLayerParams = (function() { + + function UpperTriangularLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + UpperTriangularLayerParams.prototype.k = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + UpperTriangularLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.UpperTriangularLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.k = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return UpperTriangularLayerParams; + })(); + + Specification.LowerTriangularLayerParams = (function() { + + function LowerTriangularLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LowerTriangularLayerParams.prototype.k = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + LowerTriangularLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LowerTriangularLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.k = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LowerTriangularLayerParams; + })(); + + Specification.BroadcastToLikeLayerParams = (function() { + + function BroadcastToLikeLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BroadcastToLikeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BroadcastToLikeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BroadcastToLikeLayerParams; + })(); + + Specification.BroadcastToStaticLayerParams = (function() { + + function BroadcastToStaticLayerParams(properties) { + this.targetShape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BroadcastToStaticLayerParams.prototype.targetShape = $util.emptyArray; + + BroadcastToStaticLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BroadcastToStaticLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.targetShape && message.targetShape.length)) + message.targetShape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.targetShape.push(reader.uint64()); + } else + message.targetShape.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BroadcastToStaticLayerParams; + })(); + + Specification.BroadcastToDynamicLayerParams = (function() { + + function BroadcastToDynamicLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BroadcastToDynamicLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.BroadcastToDynamicLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return BroadcastToDynamicLayerParams; + })(); + + Specification.AddBroadcastableLayerParams = (function() { + + function AddBroadcastableLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AddBroadcastableLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.AddBroadcastableLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return AddBroadcastableLayerParams; + })(); + + Specification.MaxBroadcastableLayerParams = (function() { + + function MaxBroadcastableLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MaxBroadcastableLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.MaxBroadcastableLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return MaxBroadcastableLayerParams; + })(); + + Specification.MinBroadcastableLayerParams = (function() { + + function MinBroadcastableLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MinBroadcastableLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.MinBroadcastableLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return MinBroadcastableLayerParams; + })(); + + Specification.ModBroadcastableLayerParams = (function() { + + function ModBroadcastableLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ModBroadcastableLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ModBroadcastableLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ModBroadcastableLayerParams; + })(); + + Specification.FloorDivBroadcastableLayerParams = (function() { + + function FloorDivBroadcastableLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FloorDivBroadcastableLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FloorDivBroadcastableLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FloorDivBroadcastableLayerParams; + })(); + + Specification.SubtractBroadcastableLayerParams = (function() { + + function SubtractBroadcastableLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SubtractBroadcastableLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SubtractBroadcastableLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SubtractBroadcastableLayerParams; + })(); + + Specification.MultiplyBroadcastableLayerParams = (function() { + + function MultiplyBroadcastableLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MultiplyBroadcastableLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.MultiplyBroadcastableLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return MultiplyBroadcastableLayerParams; + })(); + + Specification.DivideBroadcastableLayerParams = (function() { + + function DivideBroadcastableLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DivideBroadcastableLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.DivideBroadcastableLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DivideBroadcastableLayerParams; + })(); + + Specification.GatherLayerParams = (function() { + + function GatherLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GatherLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + GatherLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GatherLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return GatherLayerParams; + })(); + + Specification.ScatterMode = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "SCATTER_UPDATE"] = 0; + values[valuesById[1] = "SCATTER_ADD"] = 1; + values[valuesById[2] = "SCATTER_SUB"] = 2; + values[valuesById[3] = "SCATTER_MUL"] = 3; + values[valuesById[4] = "SCATTER_DIV"] = 4; + values[valuesById[5] = "SCATTER_MAX"] = 5; + values[valuesById[6] = "SCATTER_MIN"] = 6; + return values; + })(); + + Specification.ScatterLayerParams = (function() { + + function ScatterLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ScatterLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ScatterLayerParams.prototype.mode = 0; + + ScatterLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ScatterLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ScatterLayerParams; + })(); + + Specification.GatherNDLayerParams = (function() { + + function GatherNDLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GatherNDLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GatherNDLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return GatherNDLayerParams; + })(); + + Specification.ScatterNDLayerParams = (function() { + + function ScatterNDLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ScatterNDLayerParams.prototype.mode = 0; + + ScatterNDLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ScatterNDLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ScatterNDLayerParams; + })(); + + Specification.GatherAlongAxisLayerParams = (function() { + + function GatherAlongAxisLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GatherAlongAxisLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + GatherAlongAxisLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GatherAlongAxisLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return GatherAlongAxisLayerParams; + })(); + + Specification.ScatterAlongAxisLayerParams = (function() { + + function ScatterAlongAxisLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ScatterAlongAxisLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ScatterAlongAxisLayerParams.prototype.mode = 0; + + ScatterAlongAxisLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ScatterAlongAxisLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ScatterAlongAxisLayerParams; + })(); + + Specification.StackLayerParams = (function() { + + function StackLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + StackLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + StackLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.StackLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return StackLayerParams; + })(); + + Specification.RankPreservingReshapeLayerParams = (function() { + + function RankPreservingReshapeLayerParams(properties) { + this.targetShape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RankPreservingReshapeLayerParams.prototype.targetShape = $util.emptyArray; + + RankPreservingReshapeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RankPreservingReshapeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.targetShape && message.targetShape.length)) + message.targetShape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.targetShape.push(reader.int64()); + } else + message.targetShape.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RankPreservingReshapeLayerParams; + })(); + + Specification.ConstantPaddingLayerParams = (function() { + + function ConstantPaddingLayerParams(properties) { + this.padAmounts = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ConstantPaddingLayerParams.prototype.value = 0; + ConstantPaddingLayerParams.prototype.padAmounts = $util.emptyArray; + ConstantPaddingLayerParams.prototype.padToGivenOutputSizeMode = false; + + ConstantPaddingLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ConstantPaddingLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.value = reader.float(); + break; + case 2: + if (!(message.padAmounts && message.padAmounts.length)) + message.padAmounts = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.padAmounts.push(reader.uint64()); + } else + message.padAmounts.push(reader.uint64()); + break; + case 3: + message.padToGivenOutputSizeMode = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ConstantPaddingLayerParams; + })(); + + Specification.RandomNormalLikeLayerParams = (function() { + + function RandomNormalLikeLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RandomNormalLikeLayerParams.prototype.seed = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + RandomNormalLikeLayerParams.prototype.mean = 0; + RandomNormalLikeLayerParams.prototype.stdDev = 0; + + RandomNormalLikeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RandomNormalLikeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.mean = reader.float(); + break; + case 3: + message.stdDev = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RandomNormalLikeLayerParams; + })(); + + Specification.RandomNormalStaticLayerParams = (function() { + + function RandomNormalStaticLayerParams(properties) { + this.outputShape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RandomNormalStaticLayerParams.prototype.seed = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + RandomNormalStaticLayerParams.prototype.mean = 0; + RandomNormalStaticLayerParams.prototype.stdDev = 0; + RandomNormalStaticLayerParams.prototype.outputShape = $util.emptyArray; + + RandomNormalStaticLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RandomNormalStaticLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.mean = reader.float(); + break; + case 3: + message.stdDev = reader.float(); + break; + case 4: + if (!(message.outputShape && message.outputShape.length)) + message.outputShape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.outputShape.push(reader.uint64()); + } else + message.outputShape.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RandomNormalStaticLayerParams; + })(); + + Specification.RandomNormalDynamicLayerParams = (function() { + + function RandomNormalDynamicLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RandomNormalDynamicLayerParams.prototype.seed = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + RandomNormalDynamicLayerParams.prototype.mean = 0; + RandomNormalDynamicLayerParams.prototype.stdDev = 0; + + RandomNormalDynamicLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RandomNormalDynamicLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.mean = reader.float(); + break; + case 3: + message.stdDev = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RandomNormalDynamicLayerParams; + })(); + + Specification.RandomUniformLikeLayerParams = (function() { + + function RandomUniformLikeLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RandomUniformLikeLayerParams.prototype.seed = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + RandomUniformLikeLayerParams.prototype.minVal = 0; + RandomUniformLikeLayerParams.prototype.maxVal = 0; + + RandomUniformLikeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RandomUniformLikeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.minVal = reader.float(); + break; + case 3: + message.maxVal = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RandomUniformLikeLayerParams; + })(); + + Specification.RandomUniformStaticLayerParams = (function() { + + function RandomUniformStaticLayerParams(properties) { + this.outputShape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RandomUniformStaticLayerParams.prototype.seed = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + RandomUniformStaticLayerParams.prototype.minVal = 0; + RandomUniformStaticLayerParams.prototype.maxVal = 0; + RandomUniformStaticLayerParams.prototype.outputShape = $util.emptyArray; + + RandomUniformStaticLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RandomUniformStaticLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.minVal = reader.float(); + break; + case 3: + message.maxVal = reader.float(); + break; + case 4: + if (!(message.outputShape && message.outputShape.length)) + message.outputShape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.outputShape.push(reader.uint64()); + } else + message.outputShape.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RandomUniformStaticLayerParams; + })(); + + Specification.RandomUniformDynamicLayerParams = (function() { + + function RandomUniformDynamicLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RandomUniformDynamicLayerParams.prototype.seed = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + RandomUniformDynamicLayerParams.prototype.minVal = 0; + RandomUniformDynamicLayerParams.prototype.maxVal = 0; + + RandomUniformDynamicLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RandomUniformDynamicLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.minVal = reader.float(); + break; + case 3: + message.maxVal = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RandomUniformDynamicLayerParams; + })(); + + Specification.RandomBernoulliLikeLayerParams = (function() { + + function RandomBernoulliLikeLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RandomBernoulliLikeLayerParams.prototype.seed = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + RandomBernoulliLikeLayerParams.prototype.prob = 0; + + RandomBernoulliLikeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RandomBernoulliLikeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.prob = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RandomBernoulliLikeLayerParams; + })(); + + Specification.RandomBernoulliStaticLayerParams = (function() { + + function RandomBernoulliStaticLayerParams(properties) { + this.outputShape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RandomBernoulliStaticLayerParams.prototype.seed = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + RandomBernoulliStaticLayerParams.prototype.prob = 0; + RandomBernoulliStaticLayerParams.prototype.outputShape = $util.emptyArray; + + RandomBernoulliStaticLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RandomBernoulliStaticLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.prob = reader.float(); + break; + case 3: + if (!(message.outputShape && message.outputShape.length)) + message.outputShape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.outputShape.push(reader.uint64()); + } else + message.outputShape.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RandomBernoulliStaticLayerParams; + })(); + + Specification.RandomBernoulliDynamicLayerParams = (function() { + + function RandomBernoulliDynamicLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RandomBernoulliDynamicLayerParams.prototype.seed = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + RandomBernoulliDynamicLayerParams.prototype.prob = 0; + + RandomBernoulliDynamicLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RandomBernoulliDynamicLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.prob = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RandomBernoulliDynamicLayerParams; + })(); + + Specification.CategoricalDistributionLayerParams = (function() { + + function CategoricalDistributionLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CategoricalDistributionLayerParams.prototype.seed = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + CategoricalDistributionLayerParams.prototype.numSamples = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + CategoricalDistributionLayerParams.prototype.isLogits = false; + CategoricalDistributionLayerParams.prototype.eps = 0; + CategoricalDistributionLayerParams.prototype.temperature = 0; + + CategoricalDistributionLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CategoricalDistributionLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.seed = reader.int64(); + break; + case 2: + message.numSamples = reader.int64(); + break; + case 3: + message.isLogits = reader.bool(); + break; + case 4: + message.eps = reader.float(); + break; + case 5: + message.temperature = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return CategoricalDistributionLayerParams; + })(); + + Specification.ReduceL1LayerParams = (function() { + + function ReduceL1LayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReduceL1LayerParams.prototype.axes = $util.emptyArray; + ReduceL1LayerParams.prototype.keepDims = false; + ReduceL1LayerParams.prototype.reduceAll = false; + + ReduceL1LayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReduceL1LayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReduceL1LayerParams; + })(); + + Specification.ReduceL2LayerParams = (function() { + + function ReduceL2LayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReduceL2LayerParams.prototype.axes = $util.emptyArray; + ReduceL2LayerParams.prototype.keepDims = false; + ReduceL2LayerParams.prototype.reduceAll = false; + + ReduceL2LayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReduceL2LayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReduceL2LayerParams; + })(); + + Specification.ReduceMaxLayerParams = (function() { + + function ReduceMaxLayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReduceMaxLayerParams.prototype.axes = $util.emptyArray; + ReduceMaxLayerParams.prototype.keepDims = false; + ReduceMaxLayerParams.prototype.reduceAll = false; + + ReduceMaxLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReduceMaxLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReduceMaxLayerParams; + })(); + + Specification.ReduceMinLayerParams = (function() { + + function ReduceMinLayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReduceMinLayerParams.prototype.axes = $util.emptyArray; + ReduceMinLayerParams.prototype.keepDims = false; + ReduceMinLayerParams.prototype.reduceAll = false; + + ReduceMinLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReduceMinLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReduceMinLayerParams; + })(); + + Specification.ReduceSumLayerParams = (function() { + + function ReduceSumLayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReduceSumLayerParams.prototype.axes = $util.emptyArray; + ReduceSumLayerParams.prototype.keepDims = false; + ReduceSumLayerParams.prototype.reduceAll = false; + + ReduceSumLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReduceSumLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReduceSumLayerParams; + })(); + + Specification.ReduceProdLayerParams = (function() { + + function ReduceProdLayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReduceProdLayerParams.prototype.axes = $util.emptyArray; + ReduceProdLayerParams.prototype.keepDims = false; + ReduceProdLayerParams.prototype.reduceAll = false; + + ReduceProdLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReduceProdLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReduceProdLayerParams; + })(); + + Specification.ReduceMeanLayerParams = (function() { + + function ReduceMeanLayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReduceMeanLayerParams.prototype.axes = $util.emptyArray; + ReduceMeanLayerParams.prototype.keepDims = false; + ReduceMeanLayerParams.prototype.reduceAll = false; + + ReduceMeanLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReduceMeanLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReduceMeanLayerParams; + })(); + + Specification.ReduceLogSumLayerParams = (function() { + + function ReduceLogSumLayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReduceLogSumLayerParams.prototype.axes = $util.emptyArray; + ReduceLogSumLayerParams.prototype.keepDims = false; + ReduceLogSumLayerParams.prototype.reduceAll = false; + + ReduceLogSumLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReduceLogSumLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReduceLogSumLayerParams; + })(); + + Specification.ReduceSumSquareLayerParams = (function() { + + function ReduceSumSquareLayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReduceSumSquareLayerParams.prototype.axes = $util.emptyArray; + ReduceSumSquareLayerParams.prototype.keepDims = false; + ReduceSumSquareLayerParams.prototype.reduceAll = false; + + ReduceSumSquareLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReduceSumSquareLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReduceSumSquareLayerParams; + })(); + + Specification.ReduceLogSumExpLayerParams = (function() { + + function ReduceLogSumExpLayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReduceLogSumExpLayerParams.prototype.axes = $util.emptyArray; + ReduceLogSumExpLayerParams.prototype.keepDims = false; + ReduceLogSumExpLayerParams.prototype.reduceAll = false; + + ReduceLogSumExpLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReduceLogSumExpLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + case 2: + message.keepDims = reader.bool(); + break; + case 3: + message.reduceAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReduceLogSumExpLayerParams; + })(); + + Specification.ExpandDimsLayerParams = (function() { + + function ExpandDimsLayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ExpandDimsLayerParams.prototype.axes = $util.emptyArray; + + ExpandDimsLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ExpandDimsLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ExpandDimsLayerParams; + })(); + + Specification.FlattenTo2DLayerParams = (function() { + + function FlattenTo2DLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FlattenTo2DLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + FlattenTo2DLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FlattenTo2DLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FlattenTo2DLayerParams; + })(); + + Specification.ReshapeStaticLayerParams = (function() { + + function ReshapeStaticLayerParams(properties) { + this.targetShape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReshapeStaticLayerParams.prototype.targetShape = $util.emptyArray; + + ReshapeStaticLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReshapeStaticLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.targetShape && message.targetShape.length)) + message.targetShape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.targetShape.push(reader.int64()); + } else + message.targetShape.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReshapeStaticLayerParams; + })(); + + Specification.ReshapeLikeLayerParams = (function() { + + function ReshapeLikeLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReshapeLikeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReshapeLikeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReshapeLikeLayerParams; + })(); + + Specification.ReshapeDynamicLayerParams = (function() { + + function ReshapeDynamicLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReshapeDynamicLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ReshapeDynamicLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReshapeDynamicLayerParams; + })(); + + Specification.SqueezeLayerParams = (function() { + + function SqueezeLayerParams(properties) { + this.axes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SqueezeLayerParams.prototype.axes = $util.emptyArray; + SqueezeLayerParams.prototype.squeezeAll = false; + + SqueezeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SqueezeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.axes && message.axes.length)) + message.axes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.axes.push(reader.int64()); + } else + message.axes.push(reader.int64()); + break; + case 2: + message.squeezeAll = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SqueezeLayerParams; + })(); + + Specification.TopKLayerParams = (function() { + + function TopKLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TopKLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + TopKLayerParams.prototype.K = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + TopKLayerParams.prototype.useBottomK = false; + + TopKLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.TopKLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.K = reader.uint64(); + break; + case 3: + message.useBottomK = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return TopKLayerParams; + })(); + + Specification.ArgMaxLayerParams = (function() { + + function ArgMaxLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ArgMaxLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ArgMaxLayerParams.prototype.removeDim = false; + + ArgMaxLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ArgMaxLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.removeDim = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ArgMaxLayerParams; + })(); + + Specification.ArgMinLayerParams = (function() { + + function ArgMinLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ArgMinLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ArgMinLayerParams.prototype.removeDim = false; + + ArgMinLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ArgMinLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.removeDim = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ArgMinLayerParams; + })(); + + Specification.SplitNDLayerParams = (function() { + + function SplitNDLayerParams(properties) { + this.splitSizes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SplitNDLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + SplitNDLayerParams.prototype.numSplits = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + SplitNDLayerParams.prototype.splitSizes = $util.emptyArray; + + SplitNDLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SplitNDLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.numSplits = reader.uint64(); + break; + case 3: + if (!(message.splitSizes && message.splitSizes.length)) + message.splitSizes = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.splitSizes.push(reader.uint64()); + } else + message.splitSizes.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SplitNDLayerParams; + })(); + + Specification.CeilLayerParams = (function() { + + function CeilLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CeilLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CeilLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return CeilLayerParams; + })(); + + Specification.RoundLayerParams = (function() { + + function RoundLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RoundLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RoundLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RoundLayerParams; + })(); + + Specification.FloorLayerParams = (function() { + + function FloorLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FloorLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.FloorLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return FloorLayerParams; + })(); + + Specification.SignLayerParams = (function() { + + function SignLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SignLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SignLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SignLayerParams; + })(); + + Specification.ClipLayerParams = (function() { + + function ClipLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ClipLayerParams.prototype.minVal = 0; + ClipLayerParams.prototype.maxVal = 0; + + ClipLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ClipLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.minVal = reader.float(); + break; + case 2: + message.maxVal = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ClipLayerParams; + })(); + + Specification.SliceStaticLayerParams = (function() { + + function SliceStaticLayerParams(properties) { + this.beginIds = []; + this.beginMasks = []; + this.endIds = []; + this.endMasks = []; + this.strides = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SliceStaticLayerParams.prototype.beginIds = $util.emptyArray; + SliceStaticLayerParams.prototype.beginMasks = $util.emptyArray; + SliceStaticLayerParams.prototype.endIds = $util.emptyArray; + SliceStaticLayerParams.prototype.endMasks = $util.emptyArray; + SliceStaticLayerParams.prototype.strides = $util.emptyArray; + + SliceStaticLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SliceStaticLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.beginIds && message.beginIds.length)) + message.beginIds = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.beginIds.push(reader.int64()); + } else + message.beginIds.push(reader.int64()); + break; + case 2: + if (!(message.beginMasks && message.beginMasks.length)) + message.beginMasks = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.beginMasks.push(reader.bool()); + } else + message.beginMasks.push(reader.bool()); + break; + case 3: + if (!(message.endIds && message.endIds.length)) + message.endIds = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.endIds.push(reader.int64()); + } else + message.endIds.push(reader.int64()); + break; + case 4: + if (!(message.endMasks && message.endMasks.length)) + message.endMasks = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.endMasks.push(reader.bool()); + } else + message.endMasks.push(reader.bool()); + break; + case 5: + if (!(message.strides && message.strides.length)) + message.strides = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.strides.push(reader.int64()); + } else + message.strides.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SliceStaticLayerParams; + })(); + + Specification.SliceDynamicLayerParams = (function() { + + function SliceDynamicLayerParams(properties) { + this.beginMasks = []; + this.endIds = []; + this.endMasks = []; + this.strides = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SliceDynamicLayerParams.prototype.beginMasks = $util.emptyArray; + SliceDynamicLayerParams.prototype.endIds = $util.emptyArray; + SliceDynamicLayerParams.prototype.endMasks = $util.emptyArray; + SliceDynamicLayerParams.prototype.strides = $util.emptyArray; + + SliceDynamicLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SliceDynamicLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + if (!(message.beginMasks && message.beginMasks.length)) + message.beginMasks = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.beginMasks.push(reader.bool()); + } else + message.beginMasks.push(reader.bool()); + break; + case 3: + if (!(message.endIds && message.endIds.length)) + message.endIds = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.endIds.push(reader.int64()); + } else + message.endIds.push(reader.int64()); + break; + case 4: + if (!(message.endMasks && message.endMasks.length)) + message.endMasks = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.endMasks.push(reader.bool()); + } else + message.endMasks.push(reader.bool()); + break; + case 5: + if (!(message.strides && message.strides.length)) + message.strides = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.strides.push(reader.int64()); + } else + message.strides.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SliceDynamicLayerParams; + })(); + + Specification.TileLayerParams = (function() { + + function TileLayerParams(properties) { + this.reps = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TileLayerParams.prototype.reps = $util.emptyArray; + + TileLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.TileLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.reps && message.reps.length)) + message.reps = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.reps.push(reader.uint64()); + } else + message.reps.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return TileLayerParams; + })(); + + Specification.GetShapeLayerParams = (function() { + + function GetShapeLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GetShapeLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GetShapeLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return GetShapeLayerParams; + })(); + + Specification.ErfLayerParams = (function() { + + function ErfLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ErfLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ErfLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ErfLayerParams; + })(); + + Specification.GeluLayerParams = (function() { + + function GeluLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GeluLayerParams.prototype.mode = 0; + + GeluLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.GeluLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.mode = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + GeluLayerParams.GeluMode = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "EXACT"] = 0; + values[valuesById[1] = "TANH_APPROXIMATION"] = 1; + values[valuesById[2] = "SIGMOID_APPROXIMATION"] = 2; + return values; + })(); + + return GeluLayerParams; + })(); + + Specification.RangeStaticLayerParams = (function() { + + function RangeStaticLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RangeStaticLayerParams.prototype.endValue = 0; + RangeStaticLayerParams.prototype.startValue = 0; + RangeStaticLayerParams.prototype.stepSizeValue = 0; + + RangeStaticLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RangeStaticLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.endValue = reader.float(); + break; + case 2: + message.startValue = reader.float(); + break; + case 3: + message.stepSizeValue = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RangeStaticLayerParams; + })(); + + Specification.RangeDynamicLayerParams = (function() { + + function RangeDynamicLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RangeDynamicLayerParams.prototype.startValue = 0; + RangeDynamicLayerParams.prototype.stepSizeValue = 0; + + RangeDynamicLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RangeDynamicLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.startValue = reader.float(); + break; + case 3: + message.stepSizeValue = reader.float(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RangeDynamicLayerParams; + })(); + + Specification.SlidingWindowsLayerParams = (function() { + + function SlidingWindowsLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SlidingWindowsLayerParams.prototype.axis = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + SlidingWindowsLayerParams.prototype.windowSize = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + SlidingWindowsLayerParams.prototype.step = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + SlidingWindowsLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SlidingWindowsLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.axis = reader.int64(); + break; + case 2: + message.windowSize = reader.uint64(); + break; + case 3: + message.step = reader.uint64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SlidingWindowsLayerParams; + })(); + + Specification.LayerNormalizationLayerParams = (function() { + + function LayerNormalizationLayerParams(properties) { + this.normalizedShape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LayerNormalizationLayerParams.prototype.normalizedShape = $util.emptyArray; + LayerNormalizationLayerParams.prototype.eps = 0; + LayerNormalizationLayerParams.prototype.gamma = null; + LayerNormalizationLayerParams.prototype.beta = null; + + LayerNormalizationLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LayerNormalizationLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.normalizedShape && message.normalizedShape.length)) + message.normalizedShape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.normalizedShape.push(reader.int64()); + } else + message.normalizedShape.push(reader.int64()); + break; + case 2: + message.eps = reader.float(); + break; + case 3: + message.gamma = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + case 4: + message.beta = $root.CoreML.Specification.WeightParams.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LayerNormalizationLayerParams; + })(); + + Specification.NonMaximumSuppressionLayerParams = (function() { + + function NonMaximumSuppressionLayerParams(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NonMaximumSuppressionLayerParams.prototype.iouThreshold = 0; + NonMaximumSuppressionLayerParams.prototype.scoreThreshold = 0; + NonMaximumSuppressionLayerParams.prototype.maxBoxes = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + NonMaximumSuppressionLayerParams.prototype.perClassSuppression = false; + + NonMaximumSuppressionLayerParams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NonMaximumSuppressionLayerParams(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.iouThreshold = reader.float(); + break; + case 2: + message.scoreThreshold = reader.float(); + break; + case 3: + message.maxBoxes = reader.uint64(); + break; + case 4: + message.perClassSuppression = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NonMaximumSuppressionLayerParams; + })(); + + Specification.NeuralNetworkClassifier = (function() { + + function NeuralNetworkClassifier(properties) { + this.layers = []; + this.preprocessing = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NeuralNetworkClassifier.prototype.layers = $util.emptyArray; + NeuralNetworkClassifier.prototype.preprocessing = $util.emptyArray; + NeuralNetworkClassifier.prototype.arrayInputShapeMapping = 0; + NeuralNetworkClassifier.prototype.imageInputShapeMapping = 0; + NeuralNetworkClassifier.prototype.updateParams = null; + NeuralNetworkClassifier.prototype.stringClassLabels = null; + NeuralNetworkClassifier.prototype.int64ClassLabels = null; + NeuralNetworkClassifier.prototype.labelProbabilityLayerName = ""; + + var $oneOfFields; + + Object.defineProperty(NeuralNetworkClassifier.prototype, "ClassLabels", { + get: $util.oneOfGetter($oneOfFields = ["stringClassLabels", "int64ClassLabels"]), + set: $util.oneOfSetter($oneOfFields) + }); + + NeuralNetworkClassifier.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NeuralNetworkClassifier(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.layers && message.layers.length)) + message.layers = []; + message.layers.push($root.CoreML.Specification.NeuralNetworkLayer.decode(reader, reader.uint32())); + break; + case 2: + if (!(message.preprocessing && message.preprocessing.length)) + message.preprocessing = []; + message.preprocessing.push($root.CoreML.Specification.NeuralNetworkPreprocessing.decode(reader, reader.uint32())); + break; + case 5: + message.arrayInputShapeMapping = reader.int32(); + break; + case 6: + message.imageInputShapeMapping = reader.int32(); + break; + case 10: + message.updateParams = $root.CoreML.Specification.NetworkUpdateParameters.decode(reader, reader.uint32()); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 200: + message.labelProbabilityLayerName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NeuralNetworkClassifier; + })(); + + Specification.NeuralNetworkRegressor = (function() { + + function NeuralNetworkRegressor(properties) { + this.layers = []; + this.preprocessing = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NeuralNetworkRegressor.prototype.layers = $util.emptyArray; + NeuralNetworkRegressor.prototype.preprocessing = $util.emptyArray; + NeuralNetworkRegressor.prototype.arrayInputShapeMapping = 0; + NeuralNetworkRegressor.prototype.imageInputShapeMapping = 0; + NeuralNetworkRegressor.prototype.updateParams = null; + + NeuralNetworkRegressor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NeuralNetworkRegressor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.layers && message.layers.length)) + message.layers = []; + message.layers.push($root.CoreML.Specification.NeuralNetworkLayer.decode(reader, reader.uint32())); + break; + case 2: + if (!(message.preprocessing && message.preprocessing.length)) + message.preprocessing = []; + message.preprocessing.push($root.CoreML.Specification.NeuralNetworkPreprocessing.decode(reader, reader.uint32())); + break; + case 5: + message.arrayInputShapeMapping = reader.int32(); + break; + case 6: + message.imageInputShapeMapping = reader.int32(); + break; + case 10: + message.updateParams = $root.CoreML.Specification.NetworkUpdateParameters.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NeuralNetworkRegressor; + })(); + + Specification.NetworkUpdateParameters = (function() { + + function NetworkUpdateParameters(properties) { + this.lossLayers = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NetworkUpdateParameters.prototype.lossLayers = $util.emptyArray; + NetworkUpdateParameters.prototype.optimizer = null; + NetworkUpdateParameters.prototype.epochs = null; + NetworkUpdateParameters.prototype.shuffle = null; + NetworkUpdateParameters.prototype.seed = null; + + NetworkUpdateParameters.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NetworkUpdateParameters(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.lossLayers && message.lossLayers.length)) + message.lossLayers = []; + message.lossLayers.push($root.CoreML.Specification.LossLayer.decode(reader, reader.uint32())); + break; + case 2: + message.optimizer = $root.CoreML.Specification.Optimizer.decode(reader, reader.uint32()); + break; + case 3: + message.epochs = $root.CoreML.Specification.Int64Parameter.decode(reader, reader.uint32()); + break; + case 10: + message.shuffle = $root.CoreML.Specification.BoolParameter.decode(reader, reader.uint32()); + break; + case 20: + message.seed = $root.CoreML.Specification.Int64Parameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return NetworkUpdateParameters; + })(); + + Specification.LossLayer = (function() { + + function LossLayer(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LossLayer.prototype.name = ""; + LossLayer.prototype.categoricalCrossEntropyLossLayer = null; + LossLayer.prototype.meanSquaredErrorLossLayer = null; + + var $oneOfFields; + + Object.defineProperty(LossLayer.prototype, "LossLayerType", { + get: $util.oneOfGetter($oneOfFields = ["categoricalCrossEntropyLossLayer", "meanSquaredErrorLossLayer"]), + set: $util.oneOfSetter($oneOfFields) + }); + + LossLayer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LossLayer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 10: + message.categoricalCrossEntropyLossLayer = $root.CoreML.Specification.CategoricalCrossEntropyLossLayer.decode(reader, reader.uint32()); + break; + case 11: + message.meanSquaredErrorLossLayer = $root.CoreML.Specification.MeanSquaredErrorLossLayer.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LossLayer; + })(); + + Specification.CategoricalCrossEntropyLossLayer = (function() { + + function CategoricalCrossEntropyLossLayer(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CategoricalCrossEntropyLossLayer.prototype.input = ""; + CategoricalCrossEntropyLossLayer.prototype.target = ""; + + CategoricalCrossEntropyLossLayer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.CategoricalCrossEntropyLossLayer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.input = reader.string(); + break; + case 2: + message.target = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return CategoricalCrossEntropyLossLayer; + })(); + + Specification.MeanSquaredErrorLossLayer = (function() { + + function MeanSquaredErrorLossLayer(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MeanSquaredErrorLossLayer.prototype.input = ""; + MeanSquaredErrorLossLayer.prototype.target = ""; + + MeanSquaredErrorLossLayer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.MeanSquaredErrorLossLayer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.input = reader.string(); + break; + case 2: + message.target = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return MeanSquaredErrorLossLayer; + })(); + + Specification.Optimizer = (function() { + + function Optimizer(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Optimizer.prototype.sgdOptimizer = null; + Optimizer.prototype.adamOptimizer = null; + + var $oneOfFields; + + Object.defineProperty(Optimizer.prototype, "OptimizerType", { + get: $util.oneOfGetter($oneOfFields = ["sgdOptimizer", "adamOptimizer"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Optimizer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Optimizer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 10: + message.sgdOptimizer = $root.CoreML.Specification.SGDOptimizer.decode(reader, reader.uint32()); + break; + case 11: + message.adamOptimizer = $root.CoreML.Specification.AdamOptimizer.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Optimizer; + })(); + + Specification.SGDOptimizer = (function() { + + function SGDOptimizer(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SGDOptimizer.prototype.learningRate = null; + SGDOptimizer.prototype.miniBatchSize = null; + SGDOptimizer.prototype.momentum = null; + + SGDOptimizer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SGDOptimizer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.learningRate = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + case 2: + message.miniBatchSize = $root.CoreML.Specification.Int64Parameter.decode(reader, reader.uint32()); + break; + case 3: + message.momentum = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SGDOptimizer; + })(); + + Specification.AdamOptimizer = (function() { + + function AdamOptimizer(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AdamOptimizer.prototype.learningRate = null; + AdamOptimizer.prototype.miniBatchSize = null; + AdamOptimizer.prototype.beta1 = null; + AdamOptimizer.prototype.beta2 = null; + AdamOptimizer.prototype.eps = null; + + AdamOptimizer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.AdamOptimizer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.learningRate = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + case 2: + message.miniBatchSize = $root.CoreML.Specification.Int64Parameter.decode(reader, reader.uint32()); + break; + case 3: + message.beta1 = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + case 4: + message.beta2 = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + case 5: + message.eps = $root.CoreML.Specification.DoubleParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return AdamOptimizer; + })(); + + Specification.Normalizer = (function() { + + function Normalizer(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Normalizer.prototype.normType = 0; + + Normalizer.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Normalizer(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.normType = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Normalizer.NormType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "LMax"] = 0; + values[valuesById[1] = "L1"] = 1; + values[valuesById[2] = "L2"] = 2; + return values; + })(); + + return Normalizer; + })(); + + Specification.OneHotEncoder = (function() { + + function OneHotEncoder(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OneHotEncoder.prototype.stringCategories = null; + OneHotEncoder.prototype.int64Categories = null; + OneHotEncoder.prototype.outputSparse = false; + OneHotEncoder.prototype.handleUnknown = 0; + + var $oneOfFields; + + Object.defineProperty(OneHotEncoder.prototype, "CategoryType", { + get: $util.oneOfGetter($oneOfFields = ["stringCategories", "int64Categories"]), + set: $util.oneOfSetter($oneOfFields) + }); + + OneHotEncoder.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.OneHotEncoder(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.stringCategories = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 2: + message.int64Categories = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 10: + message.outputSparse = reader.bool(); + break; + case 11: + message.handleUnknown = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + OneHotEncoder.HandleUnknown = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "ErrorOnUnknown"] = 0; + values[valuesById[1] = "IgnoreUnknown"] = 1; + return values; + })(); + + return OneHotEncoder; + })(); + + Specification.Scaler = (function() { + + function Scaler(properties) { + this.shiftValue = []; + this.scaleValue = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Scaler.prototype.shiftValue = $util.emptyArray; + Scaler.prototype.scaleValue = $util.emptyArray; + + Scaler.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Scaler(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.shiftValue && message.shiftValue.length)) + message.shiftValue = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.shiftValue.push(reader.double()); + } else + message.shiftValue.push(reader.double()); + break; + case 2: + if (!(message.scaleValue && message.scaleValue.length)) + message.scaleValue = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.scaleValue.push(reader.double()); + } else + message.scaleValue.push(reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Scaler; + })(); + + Specification.NonMaximumSuppression = (function() { + + function NonMaximumSuppression(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NonMaximumSuppression.prototype.pickTop = null; + NonMaximumSuppression.prototype.stringClassLabels = null; + NonMaximumSuppression.prototype.int64ClassLabels = null; + NonMaximumSuppression.prototype.iouThreshold = 0; + NonMaximumSuppression.prototype.confidenceThreshold = 0; + NonMaximumSuppression.prototype.confidenceInputFeatureName = ""; + NonMaximumSuppression.prototype.coordinatesInputFeatureName = ""; + NonMaximumSuppression.prototype.iouThresholdInputFeatureName = ""; + NonMaximumSuppression.prototype.confidenceThresholdInputFeatureName = ""; + NonMaximumSuppression.prototype.confidenceOutputFeatureName = ""; + NonMaximumSuppression.prototype.coordinatesOutputFeatureName = ""; + + var $oneOfFields; + + Object.defineProperty(NonMaximumSuppression.prototype, "SuppressionMethod", { + get: $util.oneOfGetter($oneOfFields = ["pickTop"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Object.defineProperty(NonMaximumSuppression.prototype, "ClassLabels", { + get: $util.oneOfGetter($oneOfFields = ["stringClassLabels", "int64ClassLabels"]), + set: $util.oneOfSetter($oneOfFields) + }); + + NonMaximumSuppression.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NonMaximumSuppression(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.pickTop = $root.CoreML.Specification.NonMaximumSuppression.PickTop.decode(reader, reader.uint32()); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 110: + message.iouThreshold = reader.double(); + break; + case 111: + message.confidenceThreshold = reader.double(); + break; + case 200: + message.confidenceInputFeatureName = reader.string(); + break; + case 201: + message.coordinatesInputFeatureName = reader.string(); + break; + case 202: + message.iouThresholdInputFeatureName = reader.string(); + break; + case 203: + message.confidenceThresholdInputFeatureName = reader.string(); + break; + case 210: + message.confidenceOutputFeatureName = reader.string(); + break; + case 211: + message.coordinatesOutputFeatureName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NonMaximumSuppression.PickTop = (function() { + + function PickTop(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PickTop.prototype.perClass = false; + + PickTop.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.NonMaximumSuppression.PickTop(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.perClass = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return PickTop; + })(); + + return NonMaximumSuppression; + })(); + + Specification.LinearKernel = (function() { + + function LinearKernel(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LinearKernel.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LinearKernel(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LinearKernel; + })(); + + Specification.RBFKernel = (function() { + + function RBFKernel(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + RBFKernel.prototype.gamma = 0; + + RBFKernel.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.RBFKernel(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gamma = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return RBFKernel; + })(); + + Specification.PolyKernel = (function() { + + function PolyKernel(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PolyKernel.prototype.degree = 0; + PolyKernel.prototype.c = 0; + PolyKernel.prototype.gamma = 0; + + PolyKernel.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.PolyKernel(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.degree = reader.int32(); + break; + case 2: + message.c = reader.double(); + break; + case 3: + message.gamma = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return PolyKernel; + })(); + + Specification.SigmoidKernel = (function() { + + function SigmoidKernel(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SigmoidKernel.prototype.gamma = 0; + SigmoidKernel.prototype.c = 0; + + SigmoidKernel.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SigmoidKernel(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.gamma = reader.double(); + break; + case 2: + message.c = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SigmoidKernel; + })(); + + Specification.Kernel = (function() { + + function Kernel(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Kernel.prototype.linearKernel = null; + Kernel.prototype.rbfKernel = null; + Kernel.prototype.polyKernel = null; + Kernel.prototype.sigmoidKernel = null; + + var $oneOfFields; + + Object.defineProperty(Kernel.prototype, "kernel", { + get: $util.oneOfGetter($oneOfFields = ["linearKernel", "rbfKernel", "polyKernel", "sigmoidKernel"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Kernel.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Kernel(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.linearKernel = $root.CoreML.Specification.LinearKernel.decode(reader, reader.uint32()); + break; + case 2: + message.rbfKernel = $root.CoreML.Specification.RBFKernel.decode(reader, reader.uint32()); + break; + case 3: + message.polyKernel = $root.CoreML.Specification.PolyKernel.decode(reader, reader.uint32()); + break; + case 4: + message.sigmoidKernel = $root.CoreML.Specification.SigmoidKernel.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Kernel; + })(); + + Specification.SparseNode = (function() { + + function SparseNode(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SparseNode.prototype.index = 0; + SparseNode.prototype.value = 0; + + SparseNode.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SparseNode(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.index = reader.int32(); + break; + case 2: + message.value = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SparseNode; + })(); + + Specification.SparseVector = (function() { + + function SparseVector(properties) { + this.nodes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SparseVector.prototype.nodes = $util.emptyArray; + + SparseVector.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SparseVector(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.nodes && message.nodes.length)) + message.nodes = []; + message.nodes.push($root.CoreML.Specification.SparseNode.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SparseVector; + })(); + + Specification.SparseSupportVectors = (function() { + + function SparseSupportVectors(properties) { + this.vectors = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SparseSupportVectors.prototype.vectors = $util.emptyArray; + + SparseSupportVectors.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SparseSupportVectors(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.vectors && message.vectors.length)) + message.vectors = []; + message.vectors.push($root.CoreML.Specification.SparseVector.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SparseSupportVectors; + })(); + + Specification.DenseVector = (function() { + + function DenseVector(properties) { + this.values = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DenseVector.prototype.values = $util.emptyArray; + + DenseVector.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.DenseVector(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.values && message.values.length)) + message.values = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.values.push(reader.double()); + } else + message.values.push(reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DenseVector; + })(); + + Specification.DenseSupportVectors = (function() { + + function DenseSupportVectors(properties) { + this.vectors = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DenseSupportVectors.prototype.vectors = $util.emptyArray; + + DenseSupportVectors.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.DenseSupportVectors(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.vectors && message.vectors.length)) + message.vectors = []; + message.vectors.push($root.CoreML.Specification.DenseVector.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return DenseSupportVectors; + })(); + + Specification.Coefficients = (function() { + + function Coefficients(properties) { + this.alpha = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Coefficients.prototype.alpha = $util.emptyArray; + + Coefficients.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.Coefficients(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.alpha && message.alpha.length)) + message.alpha = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.alpha.push(reader.double()); + } else + message.alpha.push(reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Coefficients; + })(); + + Specification.SupportVectorRegressor = (function() { + + function SupportVectorRegressor(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SupportVectorRegressor.prototype.kernel = null; + SupportVectorRegressor.prototype.sparseSupportVectors = null; + SupportVectorRegressor.prototype.denseSupportVectors = null; + SupportVectorRegressor.prototype.coefficients = null; + SupportVectorRegressor.prototype.rho = 0; + + var $oneOfFields; + + Object.defineProperty(SupportVectorRegressor.prototype, "supportVectors", { + get: $util.oneOfGetter($oneOfFields = ["sparseSupportVectors", "denseSupportVectors"]), + set: $util.oneOfSetter($oneOfFields) + }); + + SupportVectorRegressor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SupportVectorRegressor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.kernel = $root.CoreML.Specification.Kernel.decode(reader, reader.uint32()); + break; + case 2: + message.sparseSupportVectors = $root.CoreML.Specification.SparseSupportVectors.decode(reader, reader.uint32()); + break; + case 3: + message.denseSupportVectors = $root.CoreML.Specification.DenseSupportVectors.decode(reader, reader.uint32()); + break; + case 4: + message.coefficients = $root.CoreML.Specification.Coefficients.decode(reader, reader.uint32()); + break; + case 5: + message.rho = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SupportVectorRegressor; + })(); + + Specification.SupportVectorClassifier = (function() { + + function SupportVectorClassifier(properties) { + this.numberOfSupportVectorsPerClass = []; + this.coefficients = []; + this.rho = []; + this.probA = []; + this.probB = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SupportVectorClassifier.prototype.kernel = null; + SupportVectorClassifier.prototype.numberOfSupportVectorsPerClass = $util.emptyArray; + SupportVectorClassifier.prototype.sparseSupportVectors = null; + SupportVectorClassifier.prototype.denseSupportVectors = null; + SupportVectorClassifier.prototype.coefficients = $util.emptyArray; + SupportVectorClassifier.prototype.rho = $util.emptyArray; + SupportVectorClassifier.prototype.probA = $util.emptyArray; + SupportVectorClassifier.prototype.probB = $util.emptyArray; + SupportVectorClassifier.prototype.stringClassLabels = null; + SupportVectorClassifier.prototype.int64ClassLabels = null; + + var $oneOfFields; + + Object.defineProperty(SupportVectorClassifier.prototype, "supportVectors", { + get: $util.oneOfGetter($oneOfFields = ["sparseSupportVectors", "denseSupportVectors"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Object.defineProperty(SupportVectorClassifier.prototype, "ClassLabels", { + get: $util.oneOfGetter($oneOfFields = ["stringClassLabels", "int64ClassLabels"]), + set: $util.oneOfSetter($oneOfFields) + }); + + SupportVectorClassifier.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.SupportVectorClassifier(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.kernel = $root.CoreML.Specification.Kernel.decode(reader, reader.uint32()); + break; + case 2: + if (!(message.numberOfSupportVectorsPerClass && message.numberOfSupportVectorsPerClass.length)) + message.numberOfSupportVectorsPerClass = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.numberOfSupportVectorsPerClass.push(reader.int32()); + } else + message.numberOfSupportVectorsPerClass.push(reader.int32()); + break; + case 3: + message.sparseSupportVectors = $root.CoreML.Specification.SparseSupportVectors.decode(reader, reader.uint32()); + break; + case 4: + message.denseSupportVectors = $root.CoreML.Specification.DenseSupportVectors.decode(reader, reader.uint32()); + break; + case 5: + if (!(message.coefficients && message.coefficients.length)) + message.coefficients = []; + message.coefficients.push($root.CoreML.Specification.Coefficients.decode(reader, reader.uint32())); + break; + case 6: + if (!(message.rho && message.rho.length)) + message.rho = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.rho.push(reader.double()); + } else + message.rho.push(reader.double()); + break; + case 7: + if (!(message.probA && message.probA.length)) + message.probA = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.probA.push(reader.double()); + } else + message.probA.push(reader.double()); + break; + case 8: + if (!(message.probB && message.probB.length)) + message.probB = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.probB.push(reader.double()); + } else + message.probB.push(reader.double()); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SupportVectorClassifier; + })(); + + Specification.TreeEnsemblePostEvaluationTransform = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "NoTransform"] = 0; + values[valuesById[1] = "Classification_SoftMax"] = 1; + values[valuesById[2] = "Regression_Logistic"] = 2; + values[valuesById[3] = "Classification_SoftMaxWithZeroClassReference"] = 3; + return values; + })(); + + Specification.TreeEnsembleParameters = (function() { + + function TreeEnsembleParameters(properties) { + this.nodes = []; + this.basePredictionValue = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TreeEnsembleParameters.prototype.nodes = $util.emptyArray; + TreeEnsembleParameters.prototype.numPredictionDimensions = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + TreeEnsembleParameters.prototype.basePredictionValue = $util.emptyArray; + + TreeEnsembleParameters.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.TreeEnsembleParameters(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.nodes && message.nodes.length)) + message.nodes = []; + message.nodes.push($root.CoreML.Specification.TreeEnsembleParameters.TreeNode.decode(reader, reader.uint32())); + break; + case 2: + message.numPredictionDimensions = reader.uint64(); + break; + case 3: + if (!(message.basePredictionValue && message.basePredictionValue.length)) + message.basePredictionValue = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.basePredictionValue.push(reader.double()); + } else + message.basePredictionValue.push(reader.double()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TreeEnsembleParameters.TreeNode = (function() { + + function TreeNode(properties) { + this.evaluationInfo = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TreeNode.prototype.treeId = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + TreeNode.prototype.nodeId = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + TreeNode.prototype.nodeBehavior = 0; + TreeNode.prototype.branchFeatureIndex = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + TreeNode.prototype.branchFeatureValue = 0; + TreeNode.prototype.trueChildNodeId = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + TreeNode.prototype.falseChildNodeId = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + TreeNode.prototype.missingValueTracksTrueChild = false; + TreeNode.prototype.evaluationInfo = $util.emptyArray; + TreeNode.prototype.relativeHitRate = 0; + + TreeNode.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.TreeEnsembleParameters.TreeNode(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.treeId = reader.uint64(); + break; + case 2: + message.nodeId = reader.uint64(); + break; + case 3: + message.nodeBehavior = reader.int32(); + break; + case 10: + message.branchFeatureIndex = reader.uint64(); + break; + case 11: + message.branchFeatureValue = reader.double(); + break; + case 12: + message.trueChildNodeId = reader.uint64(); + break; + case 13: + message.falseChildNodeId = reader.uint64(); + break; + case 14: + message.missingValueTracksTrueChild = reader.bool(); + break; + case 20: + if (!(message.evaluationInfo && message.evaluationInfo.length)) + message.evaluationInfo = []; + message.evaluationInfo.push($root.CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo.decode(reader, reader.uint32())); + break; + case 30: + message.relativeHitRate = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TreeNode.TreeNodeBehavior = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "BranchOnValueLessThanEqual"] = 0; + values[valuesById[1] = "BranchOnValueLessThan"] = 1; + values[valuesById[2] = "BranchOnValueGreaterThanEqual"] = 2; + values[valuesById[3] = "BranchOnValueGreaterThan"] = 3; + values[valuesById[4] = "BranchOnValueEqual"] = 4; + values[valuesById[5] = "BranchOnValueNotEqual"] = 5; + values[valuesById[6] = "LeafNode"] = 6; + return values; + })(); + + TreeNode.EvaluationInfo = (function() { + + function EvaluationInfo(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + EvaluationInfo.prototype.evaluationIndex = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + EvaluationInfo.prototype.evaluationValue = 0; + + EvaluationInfo.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.TreeEnsembleParameters.TreeNode.EvaluationInfo(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.evaluationIndex = reader.uint64(); + break; + case 2: + message.evaluationValue = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return EvaluationInfo; + })(); + + return TreeNode; + })(); + + return TreeEnsembleParameters; + })(); + + Specification.TreeEnsembleClassifier = (function() { + + function TreeEnsembleClassifier(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TreeEnsembleClassifier.prototype.treeEnsemble = null; + TreeEnsembleClassifier.prototype.postEvaluationTransform = 0; + TreeEnsembleClassifier.prototype.stringClassLabels = null; + TreeEnsembleClassifier.prototype.int64ClassLabels = null; + + var $oneOfFields; + + Object.defineProperty(TreeEnsembleClassifier.prototype, "ClassLabels", { + get: $util.oneOfGetter($oneOfFields = ["stringClassLabels", "int64ClassLabels"]), + set: $util.oneOfSetter($oneOfFields) + }); + + TreeEnsembleClassifier.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.TreeEnsembleClassifier(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.treeEnsemble = $root.CoreML.Specification.TreeEnsembleParameters.decode(reader, reader.uint32()); + break; + case 2: + message.postEvaluationTransform = reader.int32(); + break; + case 100: + message.stringClassLabels = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 101: + message.int64ClassLabels = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return TreeEnsembleClassifier; + })(); + + Specification.TreeEnsembleRegressor = (function() { + + function TreeEnsembleRegressor(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TreeEnsembleRegressor.prototype.treeEnsemble = null; + TreeEnsembleRegressor.prototype.postEvaluationTransform = 0; + + TreeEnsembleRegressor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.TreeEnsembleRegressor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.treeEnsemble = $root.CoreML.Specification.TreeEnsembleParameters.decode(reader, reader.uint32()); + break; + case 2: + message.postEvaluationTransform = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return TreeEnsembleRegressor; + })(); + + Specification.ItemSimilarityRecommender = (function() { + + function ItemSimilarityRecommender(properties) { + this.itemItemSimilarities = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ItemSimilarityRecommender.prototype.itemItemSimilarities = $util.emptyArray; + ItemSimilarityRecommender.prototype.itemStringIds = null; + ItemSimilarityRecommender.prototype.itemInt64Ids = null; + ItemSimilarityRecommender.prototype.itemInputFeatureName = ""; + ItemSimilarityRecommender.prototype.numRecommendationsInputFeatureName = ""; + ItemSimilarityRecommender.prototype.itemRestrictionInputFeatureName = ""; + ItemSimilarityRecommender.prototype.itemExclusionInputFeatureName = ""; + ItemSimilarityRecommender.prototype.recommendedItemListOutputFeatureName = ""; + ItemSimilarityRecommender.prototype.recommendedItemScoreOutputFeatureName = ""; + + ItemSimilarityRecommender.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ItemSimilarityRecommender(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.itemItemSimilarities && message.itemItemSimilarities.length)) + message.itemItemSimilarities = []; + message.itemItemSimilarities.push($root.CoreML.Specification.ItemSimilarityRecommender.SimilarItems.decode(reader, reader.uint32())); + break; + case 2: + message.itemStringIds = $root.CoreML.Specification.StringVector.decode(reader, reader.uint32()); + break; + case 3: + message.itemInt64Ids = $root.CoreML.Specification.Int64Vector.decode(reader, reader.uint32()); + break; + case 10: + message.itemInputFeatureName = reader.string(); + break; + case 11: + message.numRecommendationsInputFeatureName = reader.string(); + break; + case 12: + message.itemRestrictionInputFeatureName = reader.string(); + break; + case 13: + message.itemExclusionInputFeatureName = reader.string(); + break; + case 20: + message.recommendedItemListOutputFeatureName = reader.string(); + break; + case 21: + message.recommendedItemScoreOutputFeatureName = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ItemSimilarityRecommender.ConnectedItem = (function() { + + function ConnectedItem(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ConnectedItem.prototype.itemId = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + ConnectedItem.prototype.similarityScore = 0; + + ConnectedItem.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ItemSimilarityRecommender.ConnectedItem(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.itemId = reader.uint64(); + break; + case 2: + message.similarityScore = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ConnectedItem; + })(); + + ItemSimilarityRecommender.SimilarItems = (function() { + + function SimilarItems(properties) { + this.similarItemList = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SimilarItems.prototype.itemId = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + SimilarItems.prototype.similarItemList = $util.emptyArray; + SimilarItems.prototype.itemScoreAdjustment = 0; + + SimilarItems.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.ItemSimilarityRecommender.SimilarItems(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.itemId = reader.uint64(); + break; + case 2: + if (!(message.similarItemList && message.similarItemList.length)) + message.similarItemList = []; + message.similarItemList.push($root.CoreML.Specification.ItemSimilarityRecommender.ConnectedItem.decode(reader, reader.uint32())); + break; + case 3: + message.itemScoreAdjustment = reader.double(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return SimilarItems; + })(); + + return ItemSimilarityRecommender; + })(); + + Specification.LinkedModel = (function() { + + function LinkedModel(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LinkedModel.prototype.linkedModelFile = null; + + var $oneOfFields; + + Object.defineProperty(LinkedModel.prototype, "LinkType", { + get: $util.oneOfGetter($oneOfFields = ["linkedModelFile"]), + set: $util.oneOfSetter($oneOfFields) + }); + + LinkedModel.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LinkedModel(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.linkedModelFile = $root.CoreML.Specification.LinkedModelFile.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LinkedModel; + })(); + + Specification.LinkedModelFile = (function() { + + function LinkedModelFile(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LinkedModelFile.prototype.linkedModelFileName = null; + LinkedModelFile.prototype.linkedModelSearchPath = null; + + LinkedModelFile.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.CoreML.Specification.LinkedModelFile(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.linkedModelFileName = $root.CoreML.Specification.StringParameter.decode(reader, reader.uint32()); + break; + case 2: + message.linkedModelSearchPath = $root.CoreML.Specification.StringParameter.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return LinkedModelFile; + })(); + + return Specification; + })(); + + return CoreML; + })(); + + return $root; +})(protobuf); diff --git a/frontend/packages/core/public/netron/coreml.js b/frontend/packages/core/public/netron/coreml.js new file mode 100644 index 00000000..d32fb151 --- /dev/null +++ b/frontend/packages/core/public/netron/coreml.js @@ -0,0 +1,1234 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var coreml = coreml || {}; +var base = base || require('./base'); +var long = long || { Long: require('long') }; +var protobuf = protobuf || require('protobufjs'); + +coreml.ModelFactory = class { + + match(context) { + const extension = context.identifier.split('.').pop().toLowerCase(); + return extension == 'mlmodel'; + } + + open(context, host) { + return host.require('./coreml-proto').then(() => { + const identifier = context.identifier; + let decodedBuffer = null; + try { + coreml.proto = protobuf.roots.coreml.CoreML.Specification; + decodedBuffer = coreml.proto.Model.decode(context.buffer); + } + catch (error) { + throw new coreml.Error("File format is not coreml.Model (" + error.message + ") in '" + identifier + "'."); + } + return coreml.Metadata.open(host).then((metadata) => { + try { + return new coreml.Model(metadata, decodedBuffer); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new coreml.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + }); + } +}; + +coreml.Model = class { + + constructor(metadata, model) { + this._specificationVersion = model.specificationVersion; + this._graphs = [ new coreml.Graph(metadata, model) ]; + if (model.description && model.description.metadata) { + const properties = model.description.metadata; + if (properties.versionString) { + this._version = properties.versionString; + } + if (properties.author) { + this._author = properties.author; + } + if (properties.shortDescription) { + this._description = properties.shortDescription; + } + if (properties.license) { + this._license = properties.license; + } + if (metadata.userDefined && Object.keys(properties.userDefined).length > 0) { + /* empty */ + } + } + } + + get format() { + return 'Core ML v' + this._specificationVersion.toString(); + } + + get version() { + return this._version || null; + } + + get description() { + return this._description || null; + } + + get author() { + return this._author || null; + } + + get license() { + return this._license || null; + } + + get graphs() { + return this._graphs; + } +}; + +coreml.Graph = class { + + constructor(metadata, model) { + this._metadata = metadata; + this._description = model.description; + this._groups = false; + this._inputs = []; + this._outputs = []; + this._nodes = []; + + if (this._description) { + this._inputs = this._description.input.map((input) => { + const argument = new coreml.Argument(input.name, coreml.Graph._formatFeatureType(input.type), input.shortDescription, null); + return new coreml.Parameter(input.name, true, [ argument ]); + }); + + this._outputs = this._description.output.map((output) => { + const argument = new coreml.Argument(output.name, coreml.Graph._formatFeatureType(output.type), output.shortDescription, null); + return new coreml.Parameter(output.name, true, [ argument ]); + }); + } + + this._type = this._loadModel(model, {}, ''); + } + + get name() { + return ''; + } + + get type() { + return this._type; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + get groups() { + return this._groups; + } + + _updateOutput(name, newName) { + for (const node of this._nodes) { + for (const output of node.outputs) { + for (const argument of output.arguments) { + if (argument.name === name) { + argument.name = newName; + } + } + } + } + return newName; + } + + _updateClassifierOutput(group, classifier) { + let labelProbabilityLayerName = classifier.labelProbabilityLayerName; + if (!labelProbabilityLayerName && this._nodes.length > 0) { + const node = this._nodes.slice(-1).pop(); + if (node && node.outputs.length == 1 && node.outputs[0].arguments.length == 1) { + labelProbabilityLayerName = node.outputs[0].arguments[0].name; + } + } + let predictedFeatureName = this._description.predictedFeatureName; + let predictedProbabilitiesName = this._description.predictedProbabilitiesName; + if ((predictedFeatureName || predictedProbabilitiesName) && labelProbabilityLayerName && classifier.ClassLabels) { + predictedFeatureName = predictedFeatureName ? predictedFeatureName : '?'; + predictedProbabilitiesName = predictedProbabilitiesName ? predictedProbabilitiesName : '?'; + const labelProbabilityInput = this._updateOutput(labelProbabilityLayerName, labelProbabilityLayerName + ':labelProbabilityLayerName'); + const type = classifier.ClassLabels; + this._nodes.push(new coreml.Node(this._metadata, this._group, type, null, '', classifier[type], [ labelProbabilityInput ], [ predictedProbabilitiesName, predictedFeatureName ])); + } + } + + _updatePreprocessing(scope, group, preprocessing) { + if (preprocessing && preprocessing.length > 0) { + const preprocessingInput = this._description.input[0].name; + const inputNodes = []; + for (const node of this._nodes) { + if (node.inputs.some((input) => input.arguments.some((arg) => arg.name == preprocessingInput))) { + inputNodes.push(node); + } + } + let preprocessorOutput = preprocessingInput; + let preprocessorIndex = 0; + for (const p of preprocessing) { + const input = p.featureName ? p.featureName : preprocessorOutput; + preprocessorOutput = preprocessingInput + ':' + preprocessorIndex.toString(); + this._createNode(scope, group, p.preprocessor, null, '', p[p.preprocessor], [ input ], [ preprocessorOutput ]); + preprocessorIndex++; + } + for (const node of inputNodes) { + for (const input of node.inputs) { + for (const arg of input.arguments) { + if (arg.name === preprocessingInput) { + arg.name = preprocessorOutput; + } + } + } + } + } + } + + _loadModel(model, scope, group) { + this._groups = this._groups | (group.length > 0 ? true : false); + const description = model && model.description && model.description.metadata && model.description.metadata.shortDescription ? model.description.metadata.shortDescription : ''; + if (model.neuralNetworkClassifier) { + const neuralNetworkClassifier = model.neuralNetworkClassifier; + for (const layer of neuralNetworkClassifier.layers) { + this._createNode(scope, group, layer.layer, layer.name, description, layer[layer.layer], layer.input, layer.output); + } + this._updateClassifierOutput(group, neuralNetworkClassifier); + this._updatePreprocessing(scope, group, neuralNetworkClassifier.preprocessing); + return 'Neural Network Classifier'; + } + else if (model.neuralNetwork) { + const neuralNetwork = model.neuralNetwork; + for (const layer of neuralNetwork.layers) { + this._createNode(scope, group, layer.layer, layer.name, description, layer[layer.layer], layer.input, layer.output); + } + this._updatePreprocessing(scope, group, neuralNetwork.preprocessing); + return 'Neural Network'; + } + else if (model.neuralNetworkRegressor) { + const neuralNetworkRegressor = model.neuralNetworkRegressor; + for (const layer of neuralNetworkRegressor.layers) { + this._createNode(scope, group, layer.layer, layer.name, description, layer[layer.layer], layer.input, layer.output); + } + this._updatePreprocessing(scope, group, neuralNetworkRegressor); + return 'Neural Network Regressor'; + } + else if (model.pipeline) { + for (let i = 0; i < model.pipeline.models.length; i++) { + this._loadModel(model.pipeline.models[i], scope, (group ? (group + '/') : '') + 'pipeline[' + i.toString() + ']'); + } + return 'Pipeline'; + } + else if (model.pipelineClassifier) { + for (let i = 0; i < model.pipelineClassifier.pipeline.models.length; i++) { + this._loadModel(model.pipelineClassifier.pipeline.models[i], scope, (group ? (group + '/') : '') + 'pipelineClassifier[' + i.toString() + ']'); + } + return 'Pipeline Classifier'; + } + else if (model.pipelineRegressor) { + for (let i = 0; i < model.pipelineRegressor.pipeline.models.length; i++) { + this._loadModel(model.pipelineRegressor.pipeline.models[i], scope, (group ? (group + '/') : '') + 'pipelineRegressor[' + i.toString() + ']'); + } + return 'Pipeline Regressor'; + } + else if (model.glmClassifier) { + this._createNode(scope, group, 'glmClassifier', null, description, + { + classEncoding: model.glmClassifier.classEncoding, + offset: model.glmClassifier.offset, + weights: model.glmClassifier.weights + }, + [ model.description.input[0].name ], + [ model.description.predictedProbabilitiesName ]); + this._updateClassifierOutput(group, model.glmClassifier); + return 'Generalized Linear Classifier'; + } + else if (model.glmRegressor) { + this._createNode(scope, group, 'glmRegressor', null, description, + model.glmRegressor, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Generalized Linear Regressor'; + } + else if (model.dictVectorizer) { + this._createNode(scope, group, 'dictVectorizer', null, description, + model.dictVectorizer, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Dictionary Vectorizer'; + } + else if (model.featureVectorizer) { + this._createNode(scope, group, 'featureVectorizer', null, description, + model.featureVectorizer, + coreml.Graph._formatFeatureDescriptionList(model.description.input), + [ model.description.output[0].name ]); + return 'Feature Vectorizer'; + } + else if (model.treeEnsembleClassifier) { + this._createNode(scope, group, 'treeEnsembleClassifier', null, description, + model.treeEnsembleClassifier.treeEnsemble, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + this._updateClassifierOutput(group, model.treeEnsembleClassifier); + return 'Tree Ensemble Classifier'; + } + else if (model.treeEnsembleRegressor) { + this._createNode(scope, group, 'treeEnsembleRegressor', null, description, + model.treeEnsembleRegressor.treeEnsemble, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Tree Ensemble Regressor'; + } + else if (model.supportVectorClassifier) { + this._createNode(scope, group, 'supportVectorClassifier', null, description, + { + coefficients: model.supportVectorClassifier.coefficients, + denseSupportVectors: model.supportVectorClassifier.denseSupportVectors, + kernel: model.supportVectorClassifier.kernel, + numberOfSupportVectorsPerClass: model.supportVectorClassifier.numberOfSupportVectorsPerClass, + probA: model.supportVectorClassifier.probA, + probB: model.supportVectorClassifier.probB, + rho: model.supportVectorClassifier.rho, + supportVectors: model.supportVectorClassifier.supportVectors + }, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + this._updateClassifierOutput(group, model.supportVectorClassifier); + return 'Support Vector Classifier'; + } + else if (model.supportVectorRegressor) { + this._createNode(scope, group, 'supportVectorRegressor', null, description, + { + coefficients: model.supportVectorRegressor.coefficients, + kernel: model.supportVectorRegressor.kernel, + rho: model.supportVectorRegressor.rho, + supportVectors: model.supportVectorRegressor.supportVectors + }, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Support Vector Regressor'; + } + else if (model.arrayFeatureExtractor) { + this._createNode(scope, group, 'arrayFeatureExtractor', null, description, + { extractIndex: model.arrayFeatureExtractor.extractIndex }, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Array Feature Extractor'; + } + else if (model.oneHotEncoder) { + const categoryType = model.oneHotEncoder.CategoryType; + const oneHotEncoderParams = { outputSparse: model.oneHotEncoder.outputSparse }; + oneHotEncoderParams[categoryType] = model.oneHotEncoder[categoryType]; + this._createNode(scope, group, 'oneHotEncoder', null, description, + oneHotEncoderParams, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'One Hot Encoder'; + } + else if (model.imputer) { + const imputedValue = model.imputer.ImputedValue; + const replaceValue = model.imputer.ReplaceValue; + const imputerParams = {}; + imputerParams[imputedValue] = model.imputer[imputedValue]; + imputerParams[replaceValue] = model.imputer[replaceValue]; + this._createNode(scope, group, 'oneHotEncoder', null, description, + imputerParams, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Imputer'; + } + else if (model.normalizer) { + this._createNode(scope, group, 'normalizer', null, description, + model.normalizer, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Normalizer'; + } + else if (model.wordTagger) { + this._createNode(scope, group, 'wordTagger', null, description, + model.wordTagger, + [ model.description.input[0].name ], + [ + model.wordTagger.tokensOutputFeatureName, + model.wordTagger.tokenTagsOutputFeatureName, + model.wordTagger.tokenLocationsOutputFeatureName, + model.wordTagger.tokenLengthsOutputFeatureName + ]); + return 'Word Tagger'; + } + else if (model.textClassifier) { + this._createNode(scope, group, 'textClassifier', null, description, + model.textClassifier, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Text Classifier'; + } + else if (model.nonMaximumSuppression) { + const nonMaximumSuppressionParams = { + pickTop: model.nonMaximumSuppression.pickTop, + stringClassLabels: model.nonMaximumSuppression.stringClassLabels, + iouThreshold: model.nonMaximumSuppression.iouThreshold, + confidenceThreshold: model.nonMaximumSuppression.confidenceThreshold + }; + this._createNode(scope, group, 'nonMaximumSuppression', null, description, + nonMaximumSuppressionParams, + [ + model.nonMaximumSuppression.confidenceInputFeatureName, + model.nonMaximumSuppression.coordinatesInputFeatureName, + model.nonMaximumSuppression.iouThresholdInputFeatureName, + model.nonMaximumSuppression.confidenceThresholdInputFeatureName, + ], + [ + model.nonMaximumSuppression.confidenceOutputFeatureName, + model.nonMaximumSuppression.coordinatesOutputFeatureName + ]); + return 'Non Maximum Suppression'; + } + else if (model.visionFeaturePrint) { + const visionFeaturePrintParams = { + scene: model.visionFeaturePrint.scene + }; + this._createNode(scope, group, 'visionFeaturePrint', null, description, + visionFeaturePrintParams, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Vision Feature Print'; + } + else if (model.soundAnalysisPreprocessing) { + this._createNode(scope, group, 'soundAnalysisPreprocessing', null, description, + model.soundAnalysisPreprocessing, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'Sound Analysis Preprocessing'; + } + else if (model.kNearestNeighborsClassifier) { + this._createNode(scope, group, 'kNearestNeighborsClassifier', null, description, + model.kNearestNeighborsClassifier, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + this._updateClassifierOutput(group, model.kNearestNeighborsClassifier); + return 'kNearestNeighborsClassifier'; + } + else if (model.itemSimilarityRecommender) { + this._createNode(scope, group, 'itemSimilarityRecommender', null, description, + { + itemStringIds: model.itemSimilarityRecommender.itemStringIds.vector, + itemItemSimilarities: model.itemSimilarityRecommender.itemItemSimilarities + }, + model.description.input.map((feature) => feature.name), + model.description.output.map((feature) => feature.name)); + return 'itemSimilarityRecommender'; + } + else if (model.linkedModel) { + this._createNode(scope, group, 'linkedModel', null, description, + model.linkedModel.linkedModelFile, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'customModel'; + } + else if (model.customModel) { + this._createNode(scope, group, 'customModel', null, description, + { className: model.customModel.className, parameters: model.customModel.parameters }, + [ model.description.input[0].name ], + [ model.description.output[0].name ]); + return 'customModel'; + } + throw new coreml.Error("Unknown model type '" + JSON.stringify(Object.keys(model)) + "'."); + } + + _createNode(scope, group, type, name, description, data, inputs, outputs) { + inputs = inputs.map((input) => scope[input] ? scope[input].argument : input); + outputs = outputs.map((output) => { + if (scope[output]) { + scope[output].counter++; + const next = output + '\n' + scope[output].counter.toString(); // custom argument id + scope[output].argument = next; + return next; + } + scope[output] = { + argument: output, + counter: 0 + }; + return output; + }); + + const node = new coreml.Node(this._metadata, group, type, name, description, data, inputs, outputs); + this._nodes.push(node); + return node; + } + + static _formatFeatureType(type) { + let result = '?'; + if (type) { + switch (type.Type) { + case 'multiArrayType': { + let shape = new coreml.TensorShape([]); + if (type.multiArrayType.shape && type.multiArrayType.shape.length > 0) { + shape = new coreml.TensorShape(type.multiArrayType.shape); + } + let dataType = '?'; + switch (type.multiArrayType.dataType) { + case coreml.proto.ArrayFeatureType.ArrayDataType.FLOAT32: + dataType = 'float32'; + break; + case coreml.proto.ArrayFeatureType.ArrayDataType.INT32: + dataType = 'int32'; + break; + case coreml.proto.ArrayFeatureType.ArrayDataType.DOUBLE: + dataType = 'float64'; + break; + } + result = new coreml.TensorType(dataType, shape); + break; + } + case 'stringType': { + result = new coreml.TensorType('string'); + break; + } + case 'doubleType': { + result = new coreml.TensorType('float64'); + break; + } + case 'int64Type': { + result = new coreml.TensorType('int64'); + break; + } + case 'dictionaryType': { + result = new coreml.MapType(type.dictionaryType.KeyType.replace('KeyType', ''), 'float64'); + break; + } + case 'imageType': { + result = new coreml.ImageType(type.imageType.colorSpace, type.imageType.width, type.imageType.height); + break; + } + } + if (type.isOptional) { + result = new coreml.OptionalType(result); + } + } + return result; + } + + static _formatFeatureDescriptionList(list) { + return list.map((item) => item.name); + } +}; + +coreml.Parameter = class { + + constructor(name, visible, args) { + this._name = name; + this._visible = visible; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get arguments() { + return this._arguments; + } +}; + +coreml.Argument = class { + + constructor(name, type, description, initializer) { + if (typeof name !== 'string') { + throw new coreml.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type; + this._description = description || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + set name(value) { + this._name = value; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get description() { + return this._description; + } + + get quantization() { + if (this._initializer) { + return this._initializer.quantization; + } + return null; + } + + get initializer() { + return this._initializer; + } +}; + +coreml.Node = class { + + constructor(metadata, group, type, name, description, data, inputs, outputs) { + this._metadata = metadata; + if (group) { + this._group = group; + } + this._type = type; + this._name = name || ''; + this._description = description || ''; + this._attributes = []; + const initializers = []; + if (data) { + const initializerMap = this._initialize(data, initializers); + for (const key of Object.keys(data)) { + if (!initializerMap[key]) { + const schema = metadata.attribute(this.type, key); + this._attributes.push(new coreml.Attribute(schema, key, data[key])); + } + } + } + this._inputs = this._metadata.getInputs(this._type, inputs).map((input) => { + return new coreml.Parameter(input.name, true, input.arguments.map((argument) => { + return new coreml.Argument(argument.name, argument.type, null, null); + })); + }); + this._inputs = this._inputs.concat(initializers); + this._outputs = outputs.map((output, index) => { + const name = this._metadata.getOutputName(this._type, index); + return new coreml.Parameter(name, true, [ new coreml.Argument(output, null, null, null) ]); + }); + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get description() { + return this._description; + } + + get metadata() { + return this._metadata.type(this.type); + } + + get group() { + return this._group ? this._group : null; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } + + _initialize(data, initializers) { + switch (this._type) { + case 'convolution': { + const weightsShape = [ data.outputChannels, data.kernelChannels, data.kernelSize[0], data.kernelSize[1] ]; + if (data.isDeconvolution) { + weightsShape[0] = data.kernelChannels; + weightsShape[1] = Math.floor(data.outputChannels / (data.nGroups != 0 ? data.nGroups : 1)); + } + this._initializer(initializers, 'Weights', 'weights', weightsShape, data.weights); + if (data.hasBias) { + this._initializer(initializers, 'Weights', 'bias', [ data.outputChannels ], data.bias); + } + return { 'weights': true, 'bias': data.hasBias }; + } + case 'innerProduct': + this._initializer(initializers, 'Weights', 'weights', [ data.outputChannels, data.inputChannels ], data.weights); + if (data.hasBias) { + this._initializer(initializers, 'Weights', 'bias', [ data.outputChannels ], data.bias); + } + return { 'weights': true, 'bias': data.hasBias }; + case 'batchnorm': + this._initializer(initializers, 'Weights', 'gamma', [ data.channels ], data.gamma); + this._initializer(initializers, 'Weights', 'beta', [ data.channels ], data.beta); + if (data.mean) { + this._initializer(initializers, 'Weights', 'mean', [ data.channels ], data.mean); + } + if (data.variance) { + this._initializer(initializers, 'Weights', 'variance', [ data.channels ], data.variance); + } + return { 'gamma': true, 'beta': true, 'mean': true, 'variance': true }; + case 'embedding': + this._initializer(initializers, 'Weights', 'weights', [ data.inputDim, data.outputChannels ], data.weights); + return { 'weights': true }; + case 'loadConstant': + this._initializer(initializers, 'Weights', 'data', data.shape, data.data); + return { 'data': true }; + case 'scale': + this._initializer(initializers, 'Weights', 'scale', data.shapeScale, data.scale); + if (data.hasBias) { + this._initializer(initializers, 'Weights', 'bias', data.shapeBias, data.bias); + } + return { 'scale': true, 'bias': data.hasBias }; + case 'bias': + this._initializer(initializers, 'Weights', 'bias', data.shape, data.bias); + return { 'bias': true }; + case 'simpleRecurrent': + this._initializer(initializers, 'Weights', 'weights', [ data.outputVectorSize, data.inputVectorSize ], data.weightMatrix); + this._initializer(initializers, 'Weights', 'recurrent', [ data.outputVectorSize, data.inputVectorSize ], data.recursionMatrix); + if (data.hasBiasVectors) { + this._initializer(initializers, 'Weights', 'bias', [ data.outputVectorSize ], data.biasVector); + } + return { 'weightMatrix': true, 'recursionMatrix': true, 'biasVector': data.hasBiasVectors }; + case 'gru': { + const recursionMatrixShape = [ data.outputVectorSize, data.outputVectorSize ]; + const weightMatrixShape = [ data.outputVectorSize, data.inputVectorSize ]; + const biasVectorShape = [ data.outputVectorSize ]; + this._initializer(initializers, 'Weights', 'updateGateWeightMatrix', weightMatrixShape, data.updateGateWeightMatrix); + this._initializer(initializers, 'Weights', 'resetGateWeightMatrix', weightMatrixShape, data.resetGateWeightMatrix); + this._initializer(initializers, 'Weights', 'outputGateWeightMatrix', weightMatrixShape, data.outputGateWeightMatrix); + this._initializer(initializers, 'Weights', 'updateGateRecursionMatrix', recursionMatrixShape, data.updateGateRecursionMatrix); + this._initializer(initializers, 'Weights', 'resetGateRecursionMatrix', recursionMatrixShape, data.resetGateRecursionMatrix); + this._initializer(initializers, 'Weights', 'outputGateRecursionMatrix', recursionMatrixShape, data.outputGateRecursionMatrix); + if (data.hasBiasVectors) { + this._initializer(initializers, 'Weights', 'updateGateBiasVector', biasVectorShape, data.updateGateBiasVector); + this._initializer(initializers, 'Weights', 'resetGateBiasVector', biasVectorShape, data.resetGateBiasVector); + this._initializer(initializers, 'Weights', 'outputGateBiasVector', biasVectorShape, data.outputGateBiasVector); + } + return { + 'updateGateWeightMatrix': true, 'resetGateWeightMatrix': true, 'outputGateWeightMatrix': true, + 'updateGateRecursionMatrix': true, 'resetGateRecursionMatrix': true, 'outputGateRecursionMatrix': true, + 'updateGateBiasVector': data.hasBiasVectors, 'resetGateBiasVector': data.hasBiasVectors, 'outputGateBiasVector': data.hasBiasVectors + }; + } + case 'uniDirectionalLSTM': + case 'biDirectionalLSTM': { + const count = (this._type == 'uniDirectionalLSTM') ? 1 : 2; + const matrixShape = [ data.outputVectorSize, data.inputVectorSize ]; + const vectorShape = [ data.outputVectorSize ]; + for (let i = 0; i < count; i++) { + const weights = count == 1 ? data.weightParams : data.weightParams[i]; + const suffix = (i == 0) ? '' : '_rev'; + this._initializer(initializers, 'Weights', 'inputGateWeightMatrix' + suffix, matrixShape, weights.inputGateWeightMatrix); + this._initializer(initializers, 'Weights', 'forgetGateWeightMatrix' + suffix, matrixShape, weights.forgetGateWeightMatrix); + this._initializer(initializers, 'Weights', 'blockInputWeightMatrix' + suffix, matrixShape, weights.blockInputWeightMatrix); + this._initializer(initializers, 'Weights', 'outputGateWeightMatrix' + suffix, matrixShape, weights.outputGateWeightMatrix); + this._initializer(initializers, 'Weights', 'inputGateRecursionMatrix' + suffix, matrixShape, weights.inputGateRecursionMatrix); + this._initializer(initializers, 'Weights', 'forgetGateRecursionMatrix' + suffix, matrixShape,weights.forgetGateRecursionMatrix); + this._initializer(initializers, 'Weights', 'blockInputRecursionMatrix' + suffix, matrixShape, weights.blockInputRecursionMatrix); + this._initializer(initializers, 'Weights', 'outputGateRecursionMatrix' + suffix, matrixShape, weights.outputGateRecursionMatrix); + if (data.params.hasBiasVectors) { + this._initializer(initializers, 'Weights', 'inputGateBiasVector' + suffix, vectorShape, weights.inputGateBiasVector); + this._initializer(initializers, 'Weights', 'forgetGateBiasVector' + suffix, vectorShape, weights.forgetGateBiasVector); + this._initializer(initializers, 'Weights', 'blockInputBiasVector' + suffix, vectorShape, weights.blockInputBiasVector); + this._initializer(initializers, 'Weights', 'outputGateBiasVector' + suffix, vectorShape, weights.outputGateBiasVector); + } + if (data.params.hasPeepholeVectors) { + this._initializer(initializers, 'Weights', 'inputGatePeepholeVector' + suffix, vectorShape, weights.inputGatePeepholeVector); + this._initializer(initializers, 'Weights', 'forgetGatePeepholeVector' + suffix, vectorShape, weights.forgetGatePeepholeVector); + this._initializer(initializers, 'Weights', 'outputGatePeepholeVector' + suffix, vectorShape, weights.outputGatePeepholeVector); + } + } + return { 'weightParams': true }; + } + case 'dictVectorizer': + data.stringToIndex = this._convertVector(data.stringToIndex); + return {}; + case 'wordTagger': + data.modelParameterData = Array.from(data.modelParameterData); + data.stringTags = this._convertVector(data.stringTags); + return { tokensOutputFeatureName: true, tokenTagsOutputFeatureName: true, tokenLengthsOutputFeatureName: true, tokenLocationsOutputFeatureName: true }; + case 'textClassifier': + data.modelParameterData = Array.from(data.modelParameterData); + data.stringClassLabels = this._convertVector(data.stringClassLabels); + return {}; + case 'nonMaximumSuppression': + data.stringClassLabels = this._convertVector(data.stringClassLabels); + return {}; + } + return {}; + } + + _convertVector(value) { + if (value && Object.keys(value).length == 1 && value.vector) { + return value.vector; + } + return value; + } + + _initializer(initializers, kind, name, shape, data) { + const initializer = new coreml.Tensor(kind, name, shape, data); + const argument = new coreml.Argument('', null, null, initializer); + let visible = true; + const schema = this._metadata.getInputSchema(this._type, name); + if (schema && Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + visible = false; + } + initializers.push(new coreml.Parameter(name, visible, [ argument ])); + } +}; + +coreml.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + this._value = value; + if (schema) { + if (schema.type) { + this._type = schema.type; + } + if (this._type && coreml.proto) { + let type = coreml.proto; + const parts = this._type.split('.'); + while (type && parts.length > 0) { + type = type[parts.shift()]; + } + if (type && type[this._value]) { + this._value = type[this.value]; + } + } + + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + if (Array.isArray(value)) { + value = value.map((item) => { + if (item && long.Long.isLong(item)) { + return item.toNumber(); + } + return item; + }); + } + if (JSON.stringify(schema.default) == JSON.stringify(value)) { + this._visible = false; + } + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +coreml.Tensor = class { + + constructor(kind, name, shape, data) { + this._kind = kind; + this._name = name; + this._data = null; + let dataType = '?'; + if (data) { + if (data.floatValue && data.floatValue.length > 0) { + this._data = data.floatValue; + dataType = 'float32'; + } + else if (data.float16Value && data.float16Value.length > 0) { + this._data = data.float16Value; // byte[] + dataType = 'float16'; + } + else if (data.rawValue && data.rawValue.length > 0) { + if (data.quantization) { + this._data = data.rawValue; + dataType = 'uint' + data.quantization.numberOfBits.toString(); + } + else { + shape = []; + } + } + this._quantization = data.quantization || null; + } + this._type = new coreml.TensorType(dataType, new coreml.TensorShape(shape)); + } + + get name() { + return this._name; + } + + get kind() { + return this._kind; + } + + get type() { + return this._type; + } + + get quantization() { + if (this._quantization) { + if (this._quantization.lookupTableQuantization && + this._quantization.lookupTableQuantization.floatValue && + this._quantization.lookupTableQuantization.floatValue.length > 0) { + const map = []; + for (const key of Object.keys(this._quantization.lookupTableQuantization.floatValue)) { + map.push(key.toString() + ' = ' + this._quantization.lookupTableQuantization.floatValue[key].toString()); + } + return map.join('; '); + } + return '?'; + } + return null; + } + + get state() { + return this._context().state; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + const context = {}; + context.state = null; + context.index = 0; + context.count = 0; + context.dataType = this._type.dataType; + context.dimensions = this._type.shape.dimensions; + + if (!this._data) { + context.state = 'Tensor data is empty.'; + return context; + } + + switch (context.dataType) { + case 'float32': + context.data = this._data; + break; + case 'float16': + context.data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + break; + default: + if (this._quantization) { + context.dataType = 'quantization'; + context.bits = long.Long.isLong(this._quantization.numberOfBits) ? this._quantization.numberOfBits.toNumber() : this._quantization.numberOfBits; + context.data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + } + else { + context.state = 'Tensor data type is not implemented.'; + } + break; + } + + return context; + } + + _decode(context, dimension) { + const results = []; + const size = context.dimensions[dimension]; + if (dimension == context.dimensions.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (context.dataType) { + case 'float32': + results.push(this._data[context.index]); + context.index++; + break; + case 'float16': + results.push(context.data.getFloat16(context.index, true)); + context.index += 2; + break; + case 'quantization': + results.push(context.data.getBits(context.index, context.bits)); + context.index++; + break; + + } + context.count++; + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + return results; + } +}; + +coreml.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape || new coreml.TensorShape([]); + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +coreml.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']'; + } +}; + +coreml.MapType = class { + + constructor(keyType, valueType) { + this._keyType = keyType; + this._valueType = valueType; + } + + get keyType() { + return this._keyType; + } + + get valueType() { + return this._valueType; + } + + toString() { + return 'map<' + this._keyType + ',' + this._valueType.toString() + '>'; + } +}; + +coreml.ImageType = class { + + constructor(colorSpace, width, height) { + this._colorSpace = '?'; + switch (colorSpace) { + case coreml.proto.ImageFeatureType.ColorSpace.GRAYSCALE: + this._colorSpace = 'Grayscale'; + break; + case coreml.proto.ImageFeatureType.ColorSpace.RGB: + this._colorSpace = 'RGB'; + break; + case coreml.proto.ImageFeatureType.ColorSpace.BGR: + this._colorSpace = 'BGR'; + break; + } + this._width = width; + this._height = height; + } + + toString() { + return 'image<' + this._colorSpace + ',' + this._width. toString() + 'x' + this._height.toString() + '>'; + } +}; + +coreml.OptionalType = class { + + constructor(type) { + this._type = type; + } + + toString() { + return this._type.toString() + '?'; + } +}; + +coreml.Metadata = class { + + static open(host) { + if (coreml.Metadata._metadata) { + return Promise.resolve(coreml.Metadata._metadata); + } + return host.request(null, 'coreml-metadata.json', 'utf-8').then((data) => { + coreml.Metadata._metadata = new coreml.Metadata(data); + return coreml.Metadata._metadata; + }).catch(() => { + coreml.Metadata._metadata = new coreml.Metadata(null); + return coreml.Metadata._metadata; + }); + } + + constructor(data) { + this._map = new Map(); + this._attributeCache = new Map(); + this._inputCache = {}; + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map.set(item.name, item.schema); + } + } + } + } + } + + type(name) { + return this._map.get(name); + } + + attribute(type, name) { + const key = type + ':' + name; + if (!this._attributeCache.has(key)) { + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + this._attributeCache.set(type + ':' + attribute.name, attribute); + } + } + if (!this._attributeCache.has(key)) { + this._attributeCache.set(key, null); + } + } + return this._attributeCache.get(key); + } + + getInputSchema(type, name) { + let map = this._inputCache[type]; + if (!map) { + map = {}; + const schema = this.type(type); + if (schema && schema.inputs && schema.inputs.length > 0) { + for (const input of schema.inputs) { + map[input.name] = input; + } + } + this._inputCache[type] = map; + } + return map[name] || null; + } + + getInputs(type, inputs) { + let results = []; + const schema = this._map[type]; + let index = 0; + while (index < inputs.length) { + let result = { arguments: [] }; + let count = 1; + let name = null; + if (schema && schema.inputs) { + if (index < schema.inputs.length) { + let input = schema.inputs[index]; + name = input.name; + if (schema.inputs[index].option == 'variadic') { + count = inputs.length - index; + } + } + } + else { + if (index == 0) { + name = 'input'; + } + } + result.name = name ? name : '(' + index.toString() + ')'; + let array = inputs.slice(index, index + count); + for (let j = 0; j < array.length; j++) { + result.arguments.push({ name: array[j] }); + } + index += count; + results.push(result); + } + return results; + } + + getOutputName(type, index) { + const schema = this._map[type]; + if (schema) { + let outputs = schema.outputs; + if (outputs && index < outputs.length) { + let output = outputs[index]; + if (output) { + let name = output.name; + if (name) { + return name; + } + } + } + } + if (index == 0) { + return 'output'; + } + return '(' + index.toString() + ')'; + } +}; + +coreml.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Error loading Core ML model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = coreml.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/darknet-metadata.json b/frontend/packages/core/public/netron/darknet-metadata.json new file mode 100644 index 00000000..f195424c --- /dev/null +++ b/frontend/packages/core/public/netron/darknet-metadata.json @@ -0,0 +1,561 @@ +[ + { + "name": "convolutional", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "filters", "type": "int32", "default": 1 }, + { "name": "size", "type": "int32", "default": 1 }, + { "name": "stride", "type": "int32", "default": 1 }, + { "name": "stride_x", "type": "int32", "default": -1 }, + { "name": "stride_y", "type": "int32", "default": -1 }, + { "name": "groups", "type": "int32", "default": 1 }, + { "name": "padding", "type": "int32", "default": 0 }, + { "name": "pad", "type": "int32", "default": 0 }, + { "name": "dilation", "default": 1 }, + { "name": "share_index", "default": -1000000000 }, + { "name": "binary", "type": "int32", "default": 0 }, + { "name": "xnor", "type": "int32", "default": 0 }, + { "name": "bin_output", "type": "int32", "default": 0 }, + { "name": "flipped", "type": "int32", "default": 0 }, + { "name": "dot", "type": "float32", "default": 0 }, + { "name": "batch_normalize", "type": "int32", "default": 0 }, + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" } + ] + } + }, + { + "name": "deconvolutional", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "filters", "type": "int32", "visible": false, "default": 1 }, + { "name": "size", "type": "int32", "default": 1 }, + { "name": "stride", "type": "int32", "default": 1 }, + { "name": "padding", "type": "int32", "default": 0 }, + { "name": "pad", "type": "int32", "default": 0 }, + { "name": "batch_normalize", "type": "int32", "default": 0 }, + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" } + ] + } + }, + { + "name": "local", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "filters", "type": "int32", "visible": false, "default": 1 }, + { "name": "size", "type": "int32", "default": 1 }, + { "name": "stride", "type": "int32", "default": 1 }, + { "name": "padding", "type": "int32", "default": 0 }, + { "name": "pad", "type": "int32", "default": 0 }, + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" } + ] + } + }, + { + "name": "dropout", + "schema": { + "category": "Dropout", + "attributes": [ + { "name": "probability", "type": "float32", "default": 0.5 }, + { "name": "dropblock", "type": "int32", "default": 0 }, + { "name": "dropblock_size_rel", "type": "float32", "default": 0 }, + { "name": "dropblock_size_abs ", "type": "int32", "default": 7 } + ] + } + }, + { + "name": "maxpool", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "stride", "type": "int32", "default": 1 }, + { "name": "stride_x", "type": "int32", "default": 1 }, + { "name": "stride_y", "type": "int32", "default": 1 }, + { "name": "size", "type": "int32", "default": 1 }, + { "name": "padding", "type": "int32", "default": 0 }, + { "name": "maxpool_depth", "type": "int32", "default": 0 }, + { "name": "out_channels", "default": 1 }, + { "name": "antialiasing", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "avgpool", + "schema": { + "category": "Pool" + } + }, + { + "name": "connected", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "output", "type": "int32", "visible": false, "default": 1 }, + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" }, + { "name": "batch_normalize", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "softmax", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "groups", "type": "int32", "default": 1 }, + { "name": "temperature", "type": "float32", "default": 1 }, + { "name": "tree", "type": "string", "default": 0 }, + { "name": "spatial", "type": "int32", "default": 0 }, + { "name": "noloss", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "gru", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "output", "type": "int32", "visible": false, "default": 1 }, + { "name": "batch_normalize", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "lstm", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "output", "type": "int32", "visible": false, "default": 1 }, + { "name": "batch_normalize", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "conv_lstm", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "batch_normalize", "type": "int32", "default": 0 }, + { "name": "activation", "type": "string", "default": "linear", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" }, + { "name": "size", "type": "int32", "default": 3 }, + { "name": "stride", "type": "int32", "default": 1 }, + { "name": "dilation", "default": 1 }, + { "name": "groups", "type": "int32", "default": 1 }, + { "name": "padding", "type": "int32", "default": 0 }, + { "name": "pad", "type": "int32", "default": 0 }, + { "name": "xnor", "type": "int32", "default": 0 }, + { "name": "shortcut", "default": 0 }, + { "name": "output", "type": "int32", "default": 1 }, + { "name": "state_constrain", "type": "int32", "default": 16 }, + { "name": "peephole", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "crnn", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "batch_normalize", "type": "int32", "default": 0 }, + { "name": "activation", "type": "string", "default": "logistic" }, + { "name": "dilation", "default": 1 }, + { "name": "padding", "default": 0 }, + { "name": "pad", "type": "int32", "default": 0 }, + { "name": "groups", "type": "int32", "default": 1 }, + { "name": "xnor", "type": "int32", "default": 0 }, + { "name": "shortcut", "type": "int32", "default": 0 }, + { "name": "output_filters", "default": 1 }, + { "name": "hidden_filters", "default": 1 } + ] + } + }, + { + "name": "rnn", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "output", "type": "int32", "visible": false }, + { "name": "hidden", "visible": false, "default": 1 }, + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" }, + { "name": "groups", "type": "int32", "default": 1 }, + { "name": "xnor", "type": "int32", "default": 0 }, + { "name": "shortcut", "default": 0 }, + { "name": "logistic", "default": 0 }, + { "name": "batch_normalize", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "crop", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "crop_height", "type": "int32", "default": 1 }, + { "name": "crop_width", "type": "int32", "default": 1 }, + { "name": "flip", "type": "int32", "default": 0 }, + { "name": "exposure", "type": "float32", "default": 1 }, + { "name": "saturation", "type": "float32", "default": 1 }, + { "name": "angle", "type": "float32", "default": 0 }, + { "name": "noadjust", "default": 0 }, + { "name": "shift", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "reorg", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "stride", "default": 1 }, + { "name": "reverse", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "shortcut", + "schema": { + "category": "Tensor", + "attributes": [ + { "name": "activation", "type": "string", "default": "logistic", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU, SWISH, MISH" }, + { "name": "assisted_excitation", "default": 0 }, + { "name": "from", "description": "This params link the layer to another one, the index of the layer is either positive in which case it's a direct address, if negative it's relative to the layer position" } + ] + } + }, + { + "name": "scale_channels", + "schema": { + "category": "Tensor", + "attributes": [ + { "name": "activation", "type": "string", "default": "linear", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU" }, + { "name": "scale_wh", "default": 0 }, + { "name": "from", "description": "This params link the layer to another one, the index of the layer is either positive in which case it's a direct address, if negative it's relative to the layer position" } + ] + } + }, + { + "name": "sam", + "schema": { + "category": "Tensor", + "attributes": [ + { "name": "activation", "type": "string", "default": "linear", "description": "options are: LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN, SELU" }, + { "name": "from", "description": "This params link the layer to another one, the index of the layer is either positive in which case it's a direct address, if negative it's relative to the layer position" } + ] + } + }, + { + "name": "batchnorm", + "schema": { + "category": "Normalization" + } + }, + { + "name": "normalization", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0.0001 }, + { "name": "beta", "type": "float32", "default": 0.75 }, + { "name": "kappa", "type": "float32", "default": 1 }, + { "name": "size", "default": 5 } + ] + } + }, + { + "name": "cost", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "type", "type": "string", "default": "sse" }, + { "name": "scale", "type": "float32", "default": 1 }, + { "name": "ratio", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "route", + "schema": { + "category": "Tensor", + "attributes": [ + { "name": "groups_id", "type": "int32", "default": 0 }, + { "name": "groups", "type": "int32", "default": 1 } + ] + } + }, + { + "name": "logistic", + "schema": { + "category": "Activation" + } + }, + { + "name": "relu", + "schema": { + "category": "Activation" + } + }, + { + "name": "relie", + "schema": { + "category": "Activation" + } + }, + { + "name": "linear", + "schema": { + "category": "Activation" + } + }, + { + "name": "ramp", + "schema": { + "category": "Activation" + } + }, + { + "name": "tanh", + "schema": { + "category": "Activation" + } + }, + { + "name": "plse", + "schema": { + "category": "Activation" + } + }, + { + "name": "leaky", + "schema": { + "category": "Activation" + } + }, + { + "name": "elu", + "schema": { + "category": "Activation" + } + }, + { + "name": "loggy", + "schema": { + "category": "Activation" + } + }, + { + "name": "stair", + "schema": { + "category": "Activation" + } + }, + { + "name": "hardtan", + "schema": { + "category": "Activation" + } + }, + { + "name": "lhtan", + "schema": { + "category": "Activation" + } + }, + { + "name": "selu", + "schema": { + "category": "Activation" + } + }, + { + "name": "swish", + "schema": { + "category": "Activation" + } + }, + { + "name": "mish", + "schema": { + "category": "Activation" + } + }, + { + "name": "norm_chan", + "schema": { + "category": "Activation" + } + }, + { + "name": "norm_chan_softmax", + "schema": { + "category": "Activation" + } + }, + { + "name": "upsample", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "stride", "type": "int32", "default": 2 }, + { "name": "scale", "type": "float32", "default": 1 } + ] + } + }, + { + "name": "region", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "classes", "type": "int32", "default": 20 }, + { "name": "coord", "default": 4 }, + { "name": "num", "default": 1 }, + { "name": "mask", "type": "string", "default": 0 }, + { "name": "jitter", "type": "float32", "default": 0.2 }, + { "name": "classfix", "type": "int32", "default": 0 }, + { "name": "coord_scale", "type": "float32", "default": 1 }, + { "name": "object_scale", "type": "float32", "default": 1 }, + { "name": "noobject_scale", "type": "float32", "default": 1 }, + { "name": "mask_scale", "type": "float32", "default": 1 }, + { "name": "class_scale", "type": "float32", "default": 1 }, + { "name": "bias_match", "type": "int32", "default": 0 }, + { "name": "focal_loss", "type": "int32", "default": 0 }, + { "name": "max", "type": "int32", "default": 90 }, + { "name": "softmax", "type": "int32", "default": 0 }, + { "name": "rescore", "type": "int32", "default": 0 }, + { "name": "thresh", "type": "float32", "default": 0.5 }, + { "name": "random", "type": "int32", "default": 0 }, + { "name": "map", "type": "string", "default": 0 }, + { "name": "tree", "type": "string", "default": 0 }, + { "name": "anchors", "type": "string", "default": 0 }, + { "name": "absolute", "default": 0 }, + { "name": "log", "default": 0 }, + { "name": "sqrt", "default": 0 } + ] + } + }, + { + "name": "detection", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "classes", "type": "int32", "default": 1 }, + { "name": "coord", "type": "int32", "default": 1 }, + { "name": "num", "type": "int32", "default": 1 }, + { "name": "jitter", "type": "float32", "default": 0.2 }, + { "name": "coord_scale", "type": "float32", "default": 1 }, + { "name": "object_scale", "type": "float32", "default": 1 }, + { "name": "noobject_scale", "type": "float32", "default": 1 }, + { "name": "class_scale", "type": "float32", "default": 1 }, + { "name": "forced", "type": "int32", "default": 0 }, + { "name": "side", "type": "int32", "default": 7 }, + { "name": "softmax", "type": "int32", "default": 0 }, + { "name": "sqrt", "type": "int32", "default": 0 }, + { "name": "max", "type": "int32", "default": 30 }, + { "name": "rescore", "type": "int32", "default": 0 }, + { "name": "random", "type": "int32", "default": 0 }, + { "name": "reorg", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "yolo", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "classes", "type": "int32", "default": 20 }, + { "name": "num", "type": "int32", "default": 1 }, + { "name": "mask", "type": "int32[]", "default": 0 }, + { "name": "jitter", "type": "float32", "default": 0.2 }, + { "name": "label_smooth_eps", "type": "float32", "default": 0 }, + { "name": "scale_x_y", "type": "float32", "default": 1 }, + { "name": "iou_normalizer", "type": "float32", "default": 0.75 }, + { "name": "cls_normalizer", "type": "float32", "default": 1 }, + { "name": "iou_loss", "type": "string", "default": "mse", "description": "options are: mse, giou, diou, and ciou"}, + { "name": "focal_loss", "type": "int32", "default": 0 }, + { "name": "max", "type": "int32", "default": 90 }, + { "name": "ignore_thresh", "type": "float32", "default": 0.5 }, + { "name": "truth_thresh", "type": "float32", "default": 1 }, + { "name": "iou_thresh", "type": "float32", "default": 1 , "description": "recommended to use iou_thresh=0.213" }, + { "name": "random", "type": "int32", "default": 0 }, + { "name": "map", "type": "string", "default": 0 }, + { "name": "nms_kind", "type": "string", "default": "default", "description": "options are: greedynms, diounms, cornersnms, or defaultnms" }, + { "name": "anchors", "type": "int32[]", "default": 0 } + ] + } + }, + { + "name": "gaussian_yolo", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "classes", "type": "int32", "default": 20 }, + { "name": "num", "type": "int32", "default": 1 }, + { "name": "mask", "type": "string", "default": 0 }, + { "name": "jitter", "type": "float32", "default": 0.2 }, + { "name": "label_smooth_eps", "type": "float32", "default": 0 }, + { "name": "scale_x_y", "type": "float32", "default": 1 }, + { "name": "uc_normalizer", "type": "float32", "default": 1 }, + { "name": "iou_normalizer", "type": "float32", "default": 0.75 }, + { "name": "cls_normalizer", "type": "float32", "default": 1 }, + { "name": "iou_loss", "type": "string", "default": "mse", "description": "options are: mse, giou, diou, and ciou" }, + { "name": "max", "default": 90 }, + { "name": "ignore_thresh", "type": "float32", "default": 0.5 }, + { "name": "truth_thresh", "default": 1 }, + { "name": "iou_thresh", "type": "float32", "default": 1 , "description": "recommended to use iou_thresh=0.213" }, + { "name": "random", "type": "int32", "default": 0 }, + { "name": "map", "type": "string", "default": 0 }, + { "name": "beta_nms", "type": "float32", "default": 0.6 }, + { "name": "nms_kind", "type": "string", "default": "default", "description": "options are: greedynms, diounms, cornersnms, or defaultnms"}, + { "name": "anchors", "type": "string", "default": 0 }, + { "name": "yolo_point", "type": "string", "default": "center", "description": "options are: center, left_top, and right_bottom" } + ] + } + }, + { + "name": "net", + "schema": { + "attributes": [ + { "name": "batch", "type": "int32", "default": 1 }, + { "name": "max_batches", "type": "int32", "default": 0, "description": "Limits the maximum number of iterations" }, + { "name": "learning_rate", "type": "float32", "default": 0.001 }, + { "name": "momentum", "type": "float32", "default": 0.9 }, + { "name": "decay", "type": "float32", "default": 0.0001 }, + { "name": "subdivisions", "type": "int32", "default": 1, "description": "In concert with batch property, this greatly affect memory usage, minimal working number is recommended" }, + { "name": "time_steps", "type": "int32", "default": 1 }, + { "name": "notruth", "type": "int32", "default": 0 }, + { "name": "random", "type": "int32", "default": 0 }, + { "name": "adam", "type": "int32", "default": 0 }, + { "name": "B1", "type": "float32", "default": 0.9 }, + { "name": "B2", "type": "float32", "default": 0.999 }, + { "name": "eps", "type": "float32", "default": 0.0000001 }, + { "name": "height", "type": "int32", "default": 0 }, + { "name": "width", "type": "int32", "default": 0 }, + { "name": "channels", "type": "int32", "default": 0 }, + { "name": "inputs", "type": "int32" }, + { "name": "max_crop", "type": "int32" }, + { "name": "min_crop", "type": "int32" }, + { "name": "max_ratio", "type": "float32" }, + { "name": "min_ratio", "type": "float32" }, + { "name": "center", "type": "int32", "default": 0 }, + { "name": "clip", "type": "int32", "default": 0 }, + { "name": "angle", "type": "float32", "default": 0 }, + { "name": "aspect", "type": "float32", "default": 1 }, + { "name": "saturation", "type": "float32", "default": 1 }, + { "name": "exposure", "type": "float32", "default": 1 }, + { "name": "hue", "type": "float32", "default": 0 }, + { "name": "power", "type": "float32", "default": 4 }, + { "name": "flip", "type": "int32", "default": 1, "description": "Enables augmentation method: horizontal flip"}, + { "name": "blur", "type": "int32", "default": 0, "description": "Enables augmentation method: backgound blurring" }, + { "name": "mixup", "type": "int32", "default": 0, "description": "Enables augmentation method: images mixup"}, + { "name": "cutmix", "type": "int32", "default": 0, "description": "Enables augmentation method: images cutmix" }, + { "name": "mosaic", "type": "int32", "default": 0, "description": "Enables augmentation method: images mosaicing" }, + { "name": "letter_box", "type": "int32", "default": 0, "description": "Enables letter-box resizing (keeping the aspect ratio)" }, + { "name": "policy", "type": "string", "default": "constant" }, + { "name": "burn_in", "type": "int32", "default": 0, "description": "Is used for MAP calculation: permit a minimal number of iteration before first MAP check" }, + { "name": "letter_box", "type": "int32", "default": 0 }, + { "name": "optimized_memory", "type": "int32", "default": 0 , "description": "can offload memory from GPU into CPU at the cost of speed, 3 options are possible please look at: https://github.com/AlexeyAB/darknet/issues/4386"}, + { "name": "workspace_size_limit_MB", "type": "float32", "default": 1024 } + ] + } + } +] diff --git a/frontend/packages/core/public/netron/darknet.js b/frontend/packages/core/public/netron/darknet.js new file mode 100644 index 00000000..3b8eb0ba --- /dev/null +++ b/frontend/packages/core/public/netron/darknet.js @@ -0,0 +1,1096 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var darknet = darknet || {}; +var base = base || require('./base'); +var long = long || { Long: require('long') }; + +darknet.ModelFactory = class { + + match(context) { + const extension = context.identifier.split('.').pop().toLowerCase(); + if (extension == 'cfg' || extension == 'model') { + const text = context.text; + if (text.substring(0, Math.min(text.length, 1024)).indexOf('[net]') !== -1) { + return true; + } + } + return false; + } + + open(context, host) { + return darknet.Metadata.open(host).then((metadata) => { + const identifier = context.identifier; + const parts = identifier.split('.'); + parts.pop(); + const basename = parts.join('.'); + return context.request(basename + '.weights', null).then((weights) => { + return this._openModel(metadata, identifier, context.text, weights); + }).catch(() => { + return this._openModel(metadata, identifier, context.text, null); + }); + }); + } + _openModel( metadata, identifier, cfg, weights) { + try { + return new darknet.Model(metadata, cfg, weights ? new darknet.Weights(weights) : null); + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new darknet.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + } +}; + +darknet.Model = class { + + constructor(metadata, cfg, weights) { + this._graphs = []; + this._graphs.push(new darknet.Graph(metadata, cfg, weights)); + } + + get format() { + return 'Darknet'; + } + + get graphs() { + return this._graphs; + } +}; + +darknet.Graph = class { + + constructor(metadata, cfg, weights) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + + // read_cfg + const sections = []; + let section = null; + const lines = cfg.split('\n'); + let lineNumber = 0; + while (lines.length > 0) { + lineNumber++; + const text = lines.shift(); + const line = text.replace(/\s/g, ''); + if (line.length > 0) { + switch (line[0]) { + case '#': + case ';': + break; + case '[': { + section = {}; + section.line = lineNumber; + section.type = line[line.length - 1] === ']' ? line.substring(1, line.length - 1) : line.substring(1); + section.options = {}; + sections.push(section); + break; + } + default: { + if (!section || line[0] < 0x20 || line[0] > 0x7E) { + throw new darknet.Error("Invalid cfg '" + text.replace(/[^\x20-\x7E]+/g, '').trim() + "' at line " + lineNumber.toString() + "."); + } + if (section) { + const index = line.indexOf('='); + if (index < 0) { + throw new darknet.Error("Invalid cfg '" + text.replace(/[^\x20-\x7E]+/g, '').trim() + "' at line " + lineNumber.toString() + "."); + } + const key = line.substring(0, index); + const value = line.substring(index + 1); + section.options[key] = value; + } + break; + } + } + } + } + + const option_find_int = (options, key, defaultValue) => { + let value = options[key]; + if (typeof value === 'string' && value.startsWith('$')) { + const key = value.substring(1); + value = globals.has(key) ? globals.get(key) : value; + } + if (value !== undefined) { + const number = parseInt(value, 10); + if (!Number.isInteger(number)) { + throw new darknet.Error("Invalid int option '" + JSON.stringify(options[key]) + "'."); + } + return number; + } + return defaultValue; + }; + + const option_find_str = (options, key, defaultValue) => { + const value = options[key]; + return value !== undefined ? value : defaultValue; + }; + + const make_shape = (dimensions, source) => { + if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) { + throw new darknet.Error("Invalid tensor shape '" + JSON.stringify(dimensions) + "' in '" + source + "'."); + } + return new darknet.TensorShape(dimensions); + }; + + const load_weights = (name, shape, visible) => { + const data = weights ? weights.bytes(4 * shape.reduce((a, b) => a * b)) : null; + const type = new darknet.TensorType('float32', make_shape(shape, 'load_weights')); + const initializer = new darknet.Tensor(type, data); + const argument = new darknet.Argument('', null, initializer); + return new darknet.Parameter(name, visible === false ? false : true, [ argument ]); + }; + + const load_batch_normalize_weights = (layer, prefix, size) => { + layer.weights.push(load_weights(prefix + 'scale', [ size ], prefix === '')); + layer.weights.push(load_weights(prefix + 'mean', [ size ], prefix === '')); + layer.weights.push(load_weights(prefix + 'variance', [ size ], prefix === '')); + }; + + const make_convolutional_layer = (layer, prefix, w, h, c, n, groups, size, stride_x, stride_y, padding, batch_normalize) => { + layer.out_w = Math.floor((w + 2 * padding - size) / stride_x) + 1; + layer.out_h = Math.floor((h + 2 * padding - size) / stride_y) + 1; + layer.out_c = n; + layer.out = layer.out_w * layer.out_h * layer.out_c; + layer.weights.push(load_weights(prefix + 'biases', [ n ], prefix === '')); + if (batch_normalize) { + load_batch_normalize_weights(layer, prefix, n); + } + layer.weights.push(load_weights(prefix + 'weights', [ Math.floor(c / groups), n, size, size ], prefix === '')); + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'make_convolutional_layer')); + }; + + const make_connected_layer = (layer, prefix, inputs, outputs, batch_normalize) => { + layer.out_h = 1; + layer.out_w = 1; + layer.out_c = outputs; + layer.out = outputs; + layer.weights.push(load_weights(prefix + 'biases', [ outputs ], prefix === '')); + if (batch_normalize) { + load_batch_normalize_weights(layer, prefix, outputs); + } + layer.weights.push(load_weights(prefix + 'weights', [ inputs, outputs ], prefix === '')); + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'make_connected_layer')); + }; + + const params = {}; + const globals = new Map(); + const net = sections.shift(); + switch (net.type) { + case 'net': + case 'network': { + params.h = option_find_int(net.options, 'height', 0); + params.w = option_find_int(net.options, 'width', 0); + params.c = option_find_int(net.options, 'channels', 0); + params.inputs = option_find_int(net.options, 'inputs', params.h * params.w * params.c); + for (const key of Object.keys(net.options)) { + globals.set(key, net.options[key]); + } + break; + } + } + + const inputType = params.w && params.h && params.c ? + new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'params-if')) : + new darknet.TensorType('float32', make_shape([ params.inputs ], 'params-else')); + const inputName = 'input'; + params.arguments = [ new darknet.Argument(inputName, inputType, null) ]; + this._inputs.push(new darknet.Parameter(inputName, true, params.arguments)); + + if (sections.length === 0) { + throw new darknet.Error('Config file has no sections.'); + } + + let infer = true; + for (let i = 0; i < sections.length; i++) { + const section = sections[i]; + section.name = i.toString(); + section.chain = []; + section.layer = {}; + const options = section.options; + const layer = section.layer; + layer.inputs = [].concat(params.arguments); + layer.outputs = [ new darknet.Argument(i.toString(), null, null) ]; + layer.weights = []; + switch (section.type) { + case 'shortcut': { + const from = options.from ? options.from.split(',').map((item) => Number.parseInt(item.trim(), 10)) : []; + for (let index of from) { + index = (index < 0) ? i + index : index; + const item = sections[index].layer; + if (item) { + layer.inputs.push(item.outputs[0]); + } + } + delete options.from; + break; + } + case 'sam': + case 'scale_channels': { + let index = option_find_int(options, 'from', 0); + index = (index < 0) ? i + index : index; + const item = sections[index].layer; + if (item) { + layer.inputs.push(item.outputs[0]); + } + delete options.from; + break; + } + case 'route': { + layer.inputs = []; + layer.layers = []; + const routes = options.layers ? options.layers.split(',').map((route) => Number.parseInt(route.trim(), 10)) : []; + for (let j = 0; j < routes.length; j++) { + const index = (routes[j] < 0) ? i + routes[j] : routes[j]; + const route = sections[index].layer; + if (route) { + layer.inputs.push(route.outputs[0]); + layer.layers.push(route); + } + } + delete options.layers; + break; + } + } + if (infer) { + switch (section.type) { + case 'conv': + case 'convolutional': + case 'deconvolutional': { + const shape = layer.inputs[0].type.shape.dimensions; + if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) { + throw new darknet.Error('Layer before convolutional layer must output image.'); + } + const size = option_find_int(options, 'size', 1); + const n = option_find_int(options, 'filters', 1); + const pad = option_find_int(options, 'pad', 0); + const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0); + let stride_x = option_find_int(options, 'stride_x', -1); + let stride_y = option_find_int(options, 'stride_y', -1); + if (stride_x < 1 || stride_y < 1) { + const stride = option_find_int(options, 'stride', 1); + stride_x = stride_x < 1 ? stride : stride_x; + stride_y = stride_y < 1 ? stride : stride_y; + } + const groups = option_find_int(options, 'groups', 1); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + const activation = option_find_str(options, 'activation', 'logistic'); + make_convolutional_layer(layer, '', params.w, params.h, params.c, n, groups, size, stride_x, stride_y, padding, batch_normalize); + if (activation !== 'logistic') { + section.chain.push({ type: activation }); + } + break; + } + case 'connected': { + const outputs = option_find_int(options, 'output', 1); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + const activation = option_find_str(options, 'activation', 'logistic'); + make_connected_layer(layer, '', params.inputs, outputs, batch_normalize); + if (activation !== 'logistic') { + section.chain.push({ type: activation }); + } + break; + } + case 'local': { + const shape = layer.inputs[0].type.shape.dimensions; + if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) { + throw new darknet.Error('Layer before avgpool layer must output image.'); + } + const n = option_find_int(options, 'filters' , 1); + const size = option_find_int(options, 'size', 1); + const stride = option_find_int(options, 'stride', 1); + const pad = option_find_int(options, 'pad', 0); + const activation = option_find_str(options, 'activation', 'logistic'); + layer.out_h = Math.floor((params.h - (pad ? 1 : size)) / stride) + 1; + layer.out_w = Math.floor((params.w - (pad ? 1 : size)) / stride) + 1; + layer.out_c = n; + layer.out = layer.out_w * layer.out_h * layer.out_c; + layer.weights.push(load_weights('weights', [ params.c, n, size, size, layer.out_h * layer.out_w ])); + layer.weights.push(load_weights('biases',[ layer.out_w * layer.out_h * layer.out_c ])); + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'local')); + if (activation !== 'logistic') { + section.chain.push({ type: activation }); + } + break; + } + case 'batchnorm': { + layer.out_h = params.h; + layer.out_w = params.w; + layer.out_c = params.c; + layer.out = layer.in; + load_batch_normalize_weights(weights, section, '', layer.out); + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.ouputs ], 'batchnorm')); + break; + } + case 'activation': { + layer.out_h = params.h; + layer.out_w = params.w; + layer.out_c = params.c; + layer.out = layer.in; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.ouputs ], 'activation')); + break; + } + case 'max': + case 'maxpool': { + const shape = layer.inputs[0].type.shape.dimensions; + if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) { + throw new darknet.Error('Layer before maxpool layer must output image.'); + } + const antialiasing = option_find_int(options, 'antialiasing', 0); + const stride = option_find_int(options, 'stride', 1); + const blur_stride_x = option_find_int(options, 'stride_x', stride); + const blur_stride_y = option_find_int(options, 'stride_y', stride); + const stride_x = antialiasing ? 1 : blur_stride_x; + const stride_y = antialiasing ? 1 : blur_stride_y; + const size = option_find_int(options, 'size', stride); + const padding = option_find_int(options, 'padding', size - 1); + const out_channels = option_find_int(options, 'out_channels', 1); + const maxpool_depth = option_find_int(options, 'maxpool_depth', 0); + if (maxpool_depth) { + layer.out_c = out_channels; + layer.out_w = params.w; + layer.out_h = params.h; + } + else { + layer.out_w = Math.floor((params.w + padding - size) / stride_x) + 1; + layer.out_h = Math.floor((params.h + padding - size) / stride_y) + 1; + layer.out_c = params.c; + } + if (antialiasing) { + const blur_size = antialiasing === 2 ? 2 : 3; + const blur_pad = antialiasing === 2 ? 0 : Math.floor(blur_size / 3); + layer.input_layer = { weights: [], outputs: layer.outputs }; + make_convolutional_layer(layer.input_layer, '', layer.out_h, layer.out_w, layer.out_c, layer.out_c, layer.out_c, blur_size, blur_stride_x, blur_stride_y, blur_pad, 0); + layer.out_w = layer.input_layer.out_w; + layer.out_h = layer.input_layer.out_h; + layer.out_c = layer.input_layer.out_c; + } + else { + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'maxpool')); + } + layer.out = layer.out_w * layer.out_h * layer.out_c; + break; + } + case 'avgpool': { + const shape = layer.inputs[0].type.shape.dimensions; + if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) { + throw new darknet.Error('Layer before avgpool layer must output image.'); + } + layer.out_w = 1; + layer.out_h = 1; + layer.out_c = params.c; + layer.out = layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'avgpool')); + break; + } + case 'crnn': { + const size = option_find_int(options, 'size', 3); + const stride = option_find_int(options, 'stride', 1); + const output_filters = option_find_int(options, 'output', 1); + const hidden_filters = option_find_int(options, 'hidden', 1); + const groups = option_find_int(options, 'groups', 1); + const pad = option_find_int(options, 'pad', 0); + const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + layer.input_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_convolutional_layer(layer.input_layer, 'input_', params.h, params.w, params.c, hidden_filters, groups, size, stride, stride, padding, batch_normalize); + layer.self_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_convolutional_layer(layer.self_layer, 'self_', params.h, params.w, hidden_filters, hidden_filters, groups, size, stride, stride, padding, batch_normalize); + layer.output_layer = { weights: [], outputs: layer.outputs }; + make_convolutional_layer(layer.output_layer, 'output_', params.h, params.w, hidden_filters, output_filters, groups, size, stride, stride, padding, batch_normalize); + layer.weights = layer.weights.concat(layer.input_layer.weights); + layer.weights = layer.weights.concat(layer.self_layer.weights); + layer.weights = layer.weights.concat(layer.output_layer.weights); + layer.out_h = layer.output_layer.out_h; + layer.out_w = layer.output_layer.out_w; + layer.out_c = output_filters; + layer.out = layer.output_layer.out; + break; + } + case 'rnn': { + const outputs = option_find_int(options, 'output', 1); + const hidden = option_find_int(options, 'hidden', 1); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + const inputs = params.inputs; + layer.input_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.input_layer, 'input_', inputs, hidden, batch_normalize); + layer.self_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.self_layer, 'self_', hidden, hidden, batch_normalize); + layer.output_layer = { weights: [], outputs: layer.outputs }; + make_connected_layer(layer.output_layer, 'output_', hidden, outputs, batch_normalize); + layer.weights = layer.weights.concat(layer.input_layer.weights); + layer.weights = layer.weights.concat(layer.self_layer.weights); + layer.weights = layer.weights.concat(layer.output_layer.weights); + layer.out_w = 1; + layer.out_h = 1; + layer.out_c = outputs; + layer.out = outputs; + break; + } + case 'gru': { + const inputs = params.inputs; + const outputs = option_find_int(options, 'output', 1); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + layer.input_z_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.input_z_layer, 'input_z', inputs, outputs, batch_normalize); + layer.state_z_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.state_z_layer, 'state_z', outputs, outputs, batch_normalize); + layer.input_r_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.input_r_layer, 'input_r', inputs, outputs, batch_normalize); + layer.state_r_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.state_r_layer, 'state_r', outputs, outputs, batch_normalize); + layer.input_h_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.input_h_layer, 'input_h', inputs, outputs, batch_normalize); + layer.state_h_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.state_h_layer, 'state_h', outputs, outputs, batch_normalize); + layer.weights = layer.weights.concat(layer.input_z_layer.weights); + layer.weights = layer.weights.concat(layer.state_z_layer.weights); + layer.weights = layer.weights.concat(layer.input_r_layer.weights); + layer.weights = layer.weights.concat(layer.state_r_layer.weights); + layer.weights = layer.weights.concat(layer.input_h_layer.weights); + layer.weights = layer.weights.concat(layer.state_h_layer.weights); + layer.out = outputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'gru')); + break; + } + case 'lstm': { + const inputs = params.inputs; + const outputs = option_find_int(options, 'output', 1); + const batch_normalize = option_find_int(options, 'batch_normalize', 0); + layer.uf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.uf, 'uf_', inputs, outputs, batch_normalize); + layer.ui = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.ui, 'ui_', inputs, outputs, batch_normalize); + layer.ug = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.ug, 'ug_', inputs, outputs, batch_normalize); + layer.uo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.uo, 'uo_', inputs, outputs, batch_normalize); + layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.wf, 'wf_', outputs, outputs, batch_normalize); + layer.wi = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.wi, 'wi_', outputs, outputs, batch_normalize); + layer.wg = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.wg, 'wg_', outputs, outputs, batch_normalize); + layer.wo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] }; + make_connected_layer(layer.wo, 'wo_', outputs, outputs, batch_normalize); + layer.weights = layer.weights.concat(layer.uf.weights); + layer.weights = layer.weights.concat(layer.ui.weights); + layer.weights = layer.weights.concat(layer.ug.weights); + layer.weights = layer.weights.concat(layer.uo.weights); + layer.weights = layer.weights.concat(layer.wf.weights); + layer.weights = layer.weights.concat(layer.wi.weights); + layer.weights = layer.weights.concat(layer.wg.weights); + layer.weights = layer.weights.concat(layer.wo.weights); + layer.out_w = 1; + layer.out_h = 1; + layer.out_c = outputs; + layer.out = outputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'lstm')); + weights = null; + break; + } + case 'softmax': { + layer.out_w = params.w; + layer.out_h = params.h; + layer.out_c = params.c; + layer.out = params.inputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'softmax')); + break; + } + case 'dropout': { + layer.out_w = params.w; + layer.out_h = params.h; + layer.out_c = params.c; + layer.out = params.inputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'dropout')); + break; + } + case 'upsample': { + const stride = option_find_int(options, 'stride', 2); + layer.out_w = params.w * stride; + layer.out_h = params.h * stride; + layer.out_c = params.c; + layer.out = layer.out_w * layer.out_h * layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'upsample')); + break; + } + case 'crop': { + const shape = layer.inputs[0].type.shape.dimensions; + if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) { + throw new darknet.Error('Layer before crop layer must output image.'); + } + const crop_height = option_find_int(options, 'crop_height', 1); + const crop_width = option_find_int(options, 'crop_width', 1); + layer.out_w = crop_width; + layer.out_h = crop_height; + layer.out_c = params.c; + layer.out = layer.out_w * layer.out_h * layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'crop')); + break; + } + case 'yolo': { + const classes = option_find_int(options, 'classes', 20); + const n = option_find_int(options, 'num', 1); + layer.out_h = params.h; + layer.out_w = params.w; + layer.out_c = n * (classes + 4 + 1); + layer.out = layer.out_h * layer.out_w * layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'yolo')); + break; + } + case 'Gaussian_yolo': { + const classes = option_find_int(options, 'classes', 20); + const n = option_find_int(options, 'num', 1); + layer.out_h = params.h; + layer.out_w = params.w; + layer.out_c = n * (classes + 8 + 1); + layer.out = layer.out_h * layer.out_w * layer.out_c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'Gaussian_yolo')); + break; + } + case 'region': { + const coords = option_find_int(options, 'coords', 4); + const classes = option_find_int(options, 'classes', 20); + const num = option_find_int(options, 'num', 1); + layer.out = params.h * params.w * num * (classes + coords + 1); + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.h, params.w, num, (classes + coords + 1) ], 'region')); + break; + } + case 'cost': { + layer.out = params.inputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'cost')); + break; + } + case 'reorg': { + const stride = option_find_int(options, 'stride', 1); + const reverse = option_find_int(options, 'reverse', 0); + const extra = option_find_int(options, 'extra', 0); + if (reverse) { + layer.out_w = params.w * stride; + layer.out_h = params.h * stride; + layer.out_c = Math.floor(params.c / (stride * stride)); + } + else { + layer.out_w = Math.floor(params.w / stride); + layer.out_h = Math.floor(params.h / stride); + layer.out_c = params.c * (stride * stride); + } + layer.out = layer.out_h * layer.out_w * layer.out_c; + if (extra) { + layer.out_w = 0; + layer.out_h = 0; + layer.out_c = 0; + layer.out = (params.h * params.w * params.c) + extra; + } + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'reorg')); + break; + } + case 'route': { + const layers = [].concat(layer.layers); + layer.out = 0; + for (const next of layers) { + layer.out += next.out; + } + const first = layers.shift(); + layer.out_w = first.out_w; + layer.out_h = first.out_h; + layer.out_c = first.out_c; + while (layers.length > 0) { + const next = layers.shift(); + if (next.out_w === first.out_w && next.out_h === first.out_h) { + layer.out_c += next.out_c; + } + else { + layer.out_h = 0; + layer.out_w = 0; + layer.out_c = 0; + } + } + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'route')); + break; + } + case 'shortcut': + case 'scale_channels': + case 'sam': { + const activation = option_find_str(options, 'activation', 'linear'); + layer.out_w = params.w; + layer.out_h = params.h; + layer.out_c = params.c; + layer.out = params.w * params.h * params.c; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'shortcut|scale_channels|sam')); + if (activation !== 'linear') { + section.chain.push({ type: activation }); + } + break; + } + case 'detection': { + layer.out_w = params.w; + layer.out_h = params.h; + layer.out_c = params.c; + layer.out = params.inputs; + layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'detection')); + break; + } + default: { + infer = false; + break; + } + } + params.h = layer.out_h; + params.w = layer.out_w; + params.c = layer.out_c; + params.inputs = layer.out; + params.last = section; + } + params.arguments = layer.outputs; + } + + for (let i = 0; i < sections.length; i++) { + this._nodes.push(new darknet.Node(metadata, net, sections[i])); + } + + /* if (sections.length > 0) { + const last = sections[sections.length - 1].layer; + for (let i = 0; i < last.outputs.length; i++) { + const outputName = 'output' + (i > 1 ? i.toString() : ''); + this._outputs.push(new darknet.Parameter(outputName, true, [ last.outputs[i] ])); + } + } */ + + if (weights) { + weights.validate(); + } + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +darknet.Parameter = class { + + constructor(name, visible, args) { + this._name = name; + this._visible = visible; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get arguments() { + return this._arguments; + } +}; + +darknet.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new darknet.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type; + this._initializer = initializer; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + set type(value) { + if (this._type) { + throw new darknet.Error('Invalid argument type set operation.'); + } + this._type = value; + } + + get initializer() { + return this._initializer; + } +}; + +darknet.Node = class { + + constructor(metadata, net, section) { + this._name = section.name || ''; + this._location = section.line !== undefined ? section.line.toString() : undefined; + this._metadata = metadata; + this._type = section.type; + this._attributes = []; + this._inputs = []; + this._outputs = []; + this._chain = []; + const layer = section.layer; + if (layer && layer.inputs && layer.inputs.length > 0) { + this._inputs.push(new darknet.Parameter(layer.inputs.length <= 1 ? 'input' : 'inputs', true, layer.inputs)); + } + if (layer && layer.weights && layer.weights.length > 0) { + this._inputs = this._inputs.concat(layer.weights); + } + if (layer && layer.outputs && layer.outputs.length > 0) { + this._outputs.push(new darknet.Parameter(layer.outputs.length <= 1 ? 'output' : 'outputs', true, layer.outputs)); + } + if (section.chain) { + for (const chain of section.chain) { + this._chain.push(new darknet.Node(metadata, net, chain, '')); + } + } + const options = section.options; + if (options) { + for (const key of Object.keys(options)) { + this._attributes.push(new darknet.Attribute(metadata.attribute(this._type, key), key, options[key])); + } + } + } + + get name() { + return this._name; + } + + get location() { + return this._location; + } + + get type() { + return this._type; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get chain() { + return this._chain; + } +}; + +darknet.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + this._value = value; + if (schema) { + this._type = schema.type || ''; + switch (this._type) { + case 'int32': { + const number = parseInt(this._value, 10); + if (Number.isInteger(number)) { + this._value = number; + } + break; + } + case 'float32': { + const number = parseFloat(this._value); + if (!isNaN(number)) { + this._value = number; + } + break; + } + case 'int32[]': { + const numbers = this._value.split(',').map((item) => parseInt(item.trim(), 10)); + if (numbers.every((number) => Number.isInteger(number))) { + this._value = numbers; + } + break; + } + } + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + if (this._value == schema.default) { + this._visible = false; + } + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +darknet.Tensor = class { + + constructor(type, data) { + this._type = type; + this._data = data; + } + + get kind() { + return 'Tensor'; + } + + get name() { + return ''; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + const context = {}; + if (!this._data) { + context.state = 'Tensor data is empty.'; + return context; + } + context.state = null; + context.position = 0; + context.count = 0; + context.dataView = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + context.dimensions = this.type.shape.dimensions; + return context; + } + + _decode(context, dimension) { + const results = []; + const size = context.dimensions[dimension]; + if (dimension == context.dimensions.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(context.dataView.getFloat32(context.position, true)); + context.position += 4; + context.count++; + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + return results; + } +}; + +darknet.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return (this._dataType || '?') + this._shape.toString(); + } +}; + +darknet.TensorShape = class { + + constructor(dimensions) { + if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) { + throw new darknet.Error("Invalid tensor shape '" + JSON.stringify(dimensions) + "'."); + } + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (this._dimensions) { + if (this._dimensions.length == 0) { + return ''; + } + return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']'; + } + return ''; + } +}; + +darknet.Weights = class { + + constructor(buffer) { + this._buffer = buffer; + this._dataView = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._position = 0; + const major = this.int32(); + const minor = this.int32(); + const revision = this.int32(); + this._seen = ((major * 10 + minor) >= 2) ? this.int64() : this.int32(); + const transpose = (major > 1000) || (minor > 1000); + if (transpose) { + throw new darknet.Error("Unsupported transpose weights file version '" + [ major, minor, revision ].join('.') + "'."); + } + } + + int32() { + const position = this._position; + this.skip(4); + return this._dataView.getInt32(position, true); + } + + int64() { + const hi = this.int32(); + const lo = this.int32(); + return new long.Long(hi, lo, true).toNumber(); + } + + bytes(length) { + const position = this._position; + this.skip(length); + return this._buffer.subarray(position, this._position); + } + + skip(offset) { + this._position += offset; + if (this._position > this._buffer.length) { + throw new darknet.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + validate() { + if (this._position !== this._buffer.length) { + throw new darknet.Error('Invalid weights size.'); + } + } +}; + +darknet.Metadata = class { + + static open(host) { + if (darknet.Metadata._metadata) { + return Promise.resolve(darknet.Metadata._metadata); + } + return host.request(null, 'darknet-metadata.json', 'utf-8').then((data) => { + darknet.Metadata._metadata = new darknet.Metadata(data); + return darknet.Metadata._metadata; + }).catch(() => { + darknet.Metadata._metadata = new darknet.Metadata(null); + return darknet.Metadata._metadata; + }); + } + + constructor(data) { + this._map = new Map(); + this._attributeMap = new Map(); + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item && item.name && item.schema) { + if (this._map.has(item.name)) { + throw new darknet.Error("Duplicate metadata key '" + item.name + "'."); + } + item.schema.name = item.name; + this._map.set(item.name, item.schema); + } + } + } + } + } + + type(name) { + return this._map.get(name) || null; + } + + attribute(type, name) { + const key = type + ':' + name; + if (!this._attributeMap.has(key)) { + this._attributeMap.set(key, null); + const schema = this.type(type); + if (schema && schema.attributes) { + for (const attribute of schema.attributes) { + this._attributeMap.set(type + ':' + attribute.name, attribute); + } + } + } + return this._attributeMap.get(key); + } +}; + +darknet.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Error loading Darknet model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = darknet.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/deps/d3.min.js b/frontend/packages/core/public/netron/deps/d3.min.js new file mode 100644 index 00000000..344d26cc --- /dev/null +++ b/frontend/packages/core/public/netron/deps/d3.min.js @@ -0,0 +1,2 @@ +// https://d3js.org v5.16.0 Copyright 2020 Mike Bostock +!function(t,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n((t=t||self).d3=t.d3||{})}(this,function(t){"use strict";function n(t,n){return tn?1:t>=n?0:NaN}function e(t){var e;return 1===t.length&&(e=t,t=function(t,r){return n(e(t),r)}),{left:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r>>1;t(n[o],e)<0?r=o+1:i=o}return r},right:function(n,e,r,i){for(null==r&&(r=0),null==i&&(i=n.length);r>>1;t(n[o],e)>0?i=o:r=o+1}return r}}}var r=e(n),i=r.right,o=r.left;function a(t,n){return[t,n]}function u(t){return null===t?NaN:+t}function c(t,n){var e,r,i=t.length,o=0,a=-1,c=0,f=0;if(null==n)for(;++a1)return f/(o-1)}function f(t,n){var e=c(t,n);return e?Math.sqrt(e):e}function s(t,n){var e,r,i,o=t.length,a=-1;if(null==n){for(;++a=e)for(r=i=e;++ae&&(r=e),i=e)for(r=i=e;++ae&&(r=e),i0)return[t];if((r=n0)for(t=Math.ceil(t/a),n=Math.floor(n/a),o=new Array(i=Math.ceil(n-t+1));++u=0?(o>=y?10:o>=_?5:o>=b?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(o>=y?10:o>=_?5:o>=b?2:1)}function w(t,n,e){var r=Math.abs(n-t)/Math.max(0,e),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),o=r/i;return o>=y?i*=10:o>=_?i*=5:o>=b&&(i*=2),n=1)return+e(t[r-1],r-1,t);var r,i=(r-1)*n,o=Math.floor(i),a=+e(t[o],o,t);return a+(+e(t[o+1],o+1,t)-a)*(i-o)}}function T(t,n){var e,r,i=t.length,o=-1;if(null==n){for(;++o=e)for(r=e;++or&&(r=e)}else for(;++o=e)for(r=e;++or&&(r=e);return r}function A(t){for(var n,e,r,i=t.length,o=-1,a=0;++o=0;)for(n=(r=t[i]).length;--n>=0;)e[--a]=r[n];return e}function S(t,n){var e,r,i=t.length,o=-1;if(null==n){for(;++o=e)for(r=e;++oe&&(r=e)}else for(;++o=e)for(r=e;++oe&&(r=e);return r}function k(t){if(!(i=t.length))return[];for(var n=-1,e=S(t,E),r=new Array(e);++n=0&&(e=t.slice(r+1),t=t.slice(0,r)),t&&!n.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:e}})}function X(t,n){for(var e,r=0,i=t.length;r0)for(var e,r,i=new Array(e),o=0;o=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),$.hasOwnProperty(n)?{space:$[n],local:t}:t}function Z(t){var n=W(t);return(n.local?function(t){return function(){return this.ownerDocument.createElementNS(t.space,t.local)}}:function(t){return function(){var n=this.ownerDocument,e=this.namespaceURI;return e===G&&n.documentElement.namespaceURI===G?n.createElement(t):n.createElementNS(e,t)}})(n)}function Q(){}function K(t){return null==t?Q:function(){return this.querySelector(t)}}function J(){return[]}function tt(t){return null==t?J:function(){return this.querySelectorAll(t)}}function nt(t){return function(){return this.matches(t)}}function et(t){return new Array(t.length)}function rt(t,n){this.ownerDocument=t.ownerDocument,this.namespaceURI=t.namespaceURI,this._next=null,this._parent=t,this.__data__=n}rt.prototype={constructor:rt,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,n){return this._parent.insertBefore(t,n)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var it="$";function ot(t,n,e,r,i,o){for(var a,u=0,c=n.length,f=o.length;un?1:t>=n?0:NaN}function ct(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function ft(t,n){return t.style.getPropertyValue(n)||ct(t).getComputedStyle(t,null).getPropertyValue(n)}function st(t){return t.trim().split(/^|\s+/)}function lt(t){return t.classList||new ht(t)}function ht(t){this._node=t,this._names=st(t.getAttribute("class")||"")}function dt(t,n){for(var e=lt(t),r=-1,i=n.length;++r=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var Mt={};(t.event=null,"undefined"!=typeof document)&&("onmouseenter"in document.documentElement||(Mt={mouseenter:"mouseover",mouseleave:"mouseout"}));function Nt(t,n,e){return t=Tt(t,n,e),function(n){var e=n.relatedTarget;e&&(e===this||8&e.compareDocumentPosition(this))||t.call(this,n)}}function Tt(n,e,r){return function(i){var o=t.event;t.event=i;try{n.call(this,this.__data__,e,r)}finally{t.event=o}}}function At(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;r=m&&(m=b+1);!(_=g[m])&&++m=0;)(r=i[o])&&(a&&4^r.compareDocumentPosition(a)&&a.parentNode.insertBefore(r,a),a=r);return this},sort:function(t){function n(n,e){return n&&e?t(n.__data__,e.__data__):!n-!e}t||(t=ut);for(var e=this._groups,r=e.length,i=new Array(r),o=0;o1?this.each((null==n?function(t){return function(){this.style.removeProperty(t)}}:"function"==typeof n?function(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}:function(t,n,e){return function(){this.style.setProperty(t,n,e)}})(t,n,null==e?"":e)):ft(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?function(t){return function(){delete this[t]}}:"function"==typeof n?function(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}:function(t,n){return function(){this[t]=n}})(t,n)):this.node()[t]},classed:function(t,n){var e=st(t+"");if(arguments.length<2){for(var r=lt(this.node()),i=-1,o=e.length;++i=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}})}(t+""),a=o.length;if(!(arguments.length<2)){for(u=n?St:At,null==e&&(e=!1),r=0;r>8&15|n>>4&240,n>>4&15|240&n,(15&n)<<4|15&n,1):8===e?gn(n>>24&255,n>>16&255,n>>8&255,(255&n)/255):4===e?gn(n>>12&15|n>>8&240,n>>8&15|n>>4&240,n>>4&15|240&n,((15&n)<<4|15&n)/255):null):(n=on.exec(t))?new bn(n[1],n[2],n[3],1):(n=an.exec(t))?new bn(255*n[1]/100,255*n[2]/100,255*n[3]/100,1):(n=un.exec(t))?gn(n[1],n[2],n[3],n[4]):(n=cn.exec(t))?gn(255*n[1]/100,255*n[2]/100,255*n[3]/100,n[4]):(n=fn.exec(t))?Mn(n[1],n[2]/100,n[3]/100,1):(n=sn.exec(t))?Mn(n[1],n[2]/100,n[3]/100,n[4]):ln.hasOwnProperty(t)?vn(ln[t]):"transparent"===t?new bn(NaN,NaN,NaN,0):null}function vn(t){return new bn(t>>16&255,t>>8&255,255&t,1)}function gn(t,n,e,r){return r<=0&&(t=n=e=NaN),new bn(t,n,e,r)}function yn(t){return t instanceof Jt||(t=pn(t)),t?new bn((t=t.rgb()).r,t.g,t.b,t.opacity):new bn}function _n(t,n,e,r){return 1===arguments.length?yn(t):new bn(t,n,e,null==r?1:r)}function bn(t,n,e,r){this.r=+t,this.g=+n,this.b=+e,this.opacity=+r}function mn(){return"#"+wn(this.r)+wn(this.g)+wn(this.b)}function xn(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function wn(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function Mn(t,n,e,r){return r<=0?t=n=e=NaN:e<=0||e>=1?t=n=NaN:n<=0&&(t=NaN),new An(t,n,e,r)}function Nn(t){if(t instanceof An)return new An(t.h,t.s,t.l,t.opacity);if(t instanceof Jt||(t=pn(t)),!t)return new An;if(t instanceof An)return t;var n=(t=t.rgb()).r/255,e=t.g/255,r=t.b/255,i=Math.min(n,e,r),o=Math.max(n,e,r),a=NaN,u=o-i,c=(o+i)/2;return u?(a=n===o?(e-r)/u+6*(e0&&c<1?0:a,new An(a,u,c,t.opacity)}function Tn(t,n,e,r){return 1===arguments.length?Nn(t):new An(t,n,e,null==r?1:r)}function An(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function Sn(t,n,e){return 255*(t<60?n+(e-n)*t/60:t<180?e:t<240?n+(e-n)*(240-t)/60:n)}Qt(Jt,pn,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:hn,formatHex:hn,formatHsl:function(){return Nn(this).formatHsl()},formatRgb:dn,toString:dn}),Qt(bn,_n,Kt(Jt,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new bn(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new bn(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:mn,formatHex:mn,formatRgb:xn,toString:xn})),Qt(An,Tn,Kt(Jt,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new An(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new An(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),n=isNaN(t)||isNaN(this.s)?0:this.s,e=this.l,r=e+(e<.5?e:1-e)*n,i=2*e-r;return new bn(Sn(t>=240?t-240:t+120,i,r),Sn(t,i,r),Sn(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));var kn=Math.PI/180,En=180/Math.PI,Cn=.96422,Pn=1,zn=.82521,Rn=4/29,Dn=6/29,qn=3*Dn*Dn,Ln=Dn*Dn*Dn;function Un(t){if(t instanceof Bn)return new Bn(t.l,t.a,t.b,t.opacity);if(t instanceof Vn)return Gn(t);t instanceof bn||(t=yn(t));var n,e,r=Hn(t.r),i=Hn(t.g),o=Hn(t.b),a=Fn((.2225045*r+.7168786*i+.0606169*o)/Pn);return r===i&&i===o?n=e=a:(n=Fn((.4360747*r+.3850649*i+.1430804*o)/Cn),e=Fn((.0139322*r+.0971045*i+.7141733*o)/zn)),new Bn(116*a-16,500*(n-a),200*(a-e),t.opacity)}function On(t,n,e,r){return 1===arguments.length?Un(t):new Bn(t,n,e,null==r?1:r)}function Bn(t,n,e,r){this.l=+t,this.a=+n,this.b=+e,this.opacity=+r}function Fn(t){return t>Ln?Math.pow(t,1/3):t/qn+Rn}function Yn(t){return t>Dn?t*t*t:qn*(t-Rn)}function In(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function Hn(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function jn(t){if(t instanceof Vn)return new Vn(t.h,t.c,t.l,t.opacity);if(t instanceof Bn||(t=Un(t)),0===t.a&&0===t.b)return new Vn(NaN,0=1?(e=1,n-1):Math.floor(e*n),i=t[r],o=t[r+1],a=r>0?t[r-1]:2*i-o,u=r180||e<-180?e-360*Math.round(e/360):e):ue(isNaN(t)?n:t)}function se(t){return 1==(t=+t)?le:function(n,e){return e-n?function(t,n,e){return t=Math.pow(t,e),n=Math.pow(n,e)-t,e=1/e,function(r){return Math.pow(t+r*n,e)}}(n,e,t):ue(isNaN(n)?e:n)}}function le(t,n){var e=n-t;return e?ce(t,e):ue(isNaN(t)?n:t)}Qt(re,ee,Kt(Jt,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new re(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new re(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=isNaN(this.h)?0:(this.h+120)*kn,n=+this.l,e=isNaN(this.s)?0:this.s*n*(1-n),r=Math.cos(t),i=Math.sin(t);return new bn(255*(n+e*($n*r+Wn*i)),255*(n+e*(Zn*r+Qn*i)),255*(n+e*(Kn*r)),this.opacity)}}));var he=function t(n){var e=se(n);function r(t,n){var r=e((t=_n(t)).r,(n=_n(n)).r),i=e(t.g,n.g),o=e(t.b,n.b),a=le(t.opacity,n.opacity);return function(n){return t.r=r(n),t.g=i(n),t.b=o(n),t.opacity=a(n),t+""}}return r.gamma=t,r}(1);function de(t){return function(n){var e,r,i=n.length,o=new Array(i),a=new Array(i),u=new Array(i);for(e=0;eo&&(i=n.slice(o,i),u[a]?u[a]+=i:u[++a]=i),(e=e[0])===(r=r[0])?u[a]?u[a]+=r:u[++a]=r:(u[++a]=null,c.push({i:a,x:me(e,r)})),o=Me.lastIndex;return o180?n+=360:n-t>180&&(t+=360),o.push({i:e.push(i(e)+"rotate(",null,r)-2,x:me(t,n)})):n&&e.push(i(e)+"rotate("+n+r)}(o.rotate,a.rotate,u,c),function(t,n,e,o){t!==n?o.push({i:e.push(i(e)+"skewX(",null,r)-2,x:me(t,n)}):n&&e.push(i(e)+"skewX("+n+r)}(o.skewX,a.skewX,u,c),function(t,n,e,r,o,a){if(t!==e||n!==r){var u=o.push(i(o)+"scale(",null,",",null,")");a.push({i:u-4,x:me(t,e)},{i:u-2,x:me(n,r)})}else 1===e&&1===r||o.push(i(o)+"scale("+e+","+r+")")}(o.scaleX,o.scaleY,a.scaleX,a.scaleY,u,c),o=a=null,function(t){for(var n,e=-1,r=c.length;++e=0&&n._call.call(null,t),n=n._next;--tr}function pr(){or=(ir=ur.now())+ar,tr=nr=0;try{dr()}finally{tr=0,function(){var t,n,e=Ke,r=1/0;for(;e;)e._call?(r>e._time&&(r=e._time),t=e,e=e._next):(n=e._next,e._next=null,e=t?t._next=n:Ke=n);Je=t,gr(r)}(),or=0}}function vr(){var t=ur.now(),n=t-ir;n>rr&&(ar-=n,ir=t)}function gr(t){tr||(nr&&(nr=clearTimeout(nr)),t-or>24?(t<1/0&&(nr=setTimeout(pr,t-ur.now()-ar)),er&&(er=clearInterval(er))):(er||(ir=ur.now(),er=setInterval(vr,rr)),tr=1,cr(pr)))}function yr(t,n,e){var r=new lr;return n=null==n?0:+n,r.restart(function(e){r.stop(),t(e+n)},n,e),r}lr.prototype=hr.prototype={constructor:lr,restart:function(t,n,e){if("function"!=typeof t)throw new TypeError("callback is not a function");e=(null==e?fr():+e)+(null==n?0:+n),this._next||Je===this||(Je?Je._next=this:Ke=this,Je=this),this._call=t,this._time=e,gr()},stop:function(){this._call&&(this._call=null,this._time=1/0,gr())}};var _r=I("start","end","cancel","interrupt"),br=[],mr=0,xr=1,wr=2,Mr=3,Nr=4,Tr=5,Ar=6;function Sr(t,n,e,r,i,o){var a=t.__transition;if(a){if(e in a)return}else t.__transition={};!function(t,n,e){var r,i=t.__transition;function o(c){var f,s,l,h;if(e.state!==xr)return u();for(f in i)if((h=i[f]).name===e.name){if(h.state===Mr)return yr(o);h.state===Nr?(h.state=Ar,h.timer.stop(),h.on.call("interrupt",t,t.__data__,h.index,h.group),delete i[f]):+fmr)throw new Error("too late; already scheduled");return e}function Er(t,n){var e=Cr(t,n);if(e.state>Mr)throw new Error("too late; already running");return e}function Cr(t,n){var e=t.__transition;if(!e||!(e=e[n]))throw new Error("transition not found");return e}function Pr(t,n){var e,r,i,o=t.__transition,a=!0;if(o){for(i in n=null==n?null:n+"",o)(e=o[i]).name===n?(r=e.state>wr&&e.state=0&&(t=t.slice(0,n)),!t||"start"===t})}(n)?kr:Er;return function(){var a=o(this,t),u=a.on;u!==r&&(i=(r=u).copy()).on(n,e),a.on=i}}(e,t,n))},attr:function(t,n){var e=W(t),r="transform"===e?Le:Rr;return this.attrTween(t,"function"==typeof n?(e.local?function(t,n,e){var r,i,o;return function(){var a,u,c=e(this);if(null!=c)return(a=this.getAttributeNS(t.space,t.local))===(u=c+"")?null:a===r&&u===i?o:(i=u,o=n(r=a,c));this.removeAttributeNS(t.space,t.local)}}:function(t,n,e){var r,i,o;return function(){var a,u,c=e(this);if(null!=c)return(a=this.getAttribute(t))===(u=c+"")?null:a===r&&u===i?o:(i=u,o=n(r=a,c));this.removeAttribute(t)}})(e,r,zr(this,"attr."+t,n)):null==n?(e.local?function(t){return function(){this.removeAttributeNS(t.space,t.local)}}:function(t){return function(){this.removeAttribute(t)}})(e):(e.local?function(t,n,e){var r,i,o=e+"";return function(){var a=this.getAttributeNS(t.space,t.local);return a===o?null:a===r?i:i=n(r=a,e)}}:function(t,n,e){var r,i,o=e+"";return function(){var a=this.getAttribute(t);return a===o?null:a===r?i:i=n(r=a,e)}})(e,r,n))},attrTween:function(t,n){var e="attr."+t;if(arguments.length<2)return(e=this.tween(e))&&e._value;if(null==n)return this.tween(e,null);if("function"!=typeof n)throw new Error;var r=W(t);return this.tween(e,(r.local?function(t,n){var e,r;function i(){var i=n.apply(this,arguments);return i!==r&&(e=(r=i)&&function(t,n){return function(e){this.setAttributeNS(t.space,t.local,n.call(this,e))}}(t,i)),e}return i._value=n,i}:function(t,n){var e,r;function i(){var i=n.apply(this,arguments);return i!==r&&(e=(r=i)&&function(t,n){return function(e){this.setAttribute(t,n.call(this,e))}}(t,i)),e}return i._value=n,i})(r,n))},style:function(t,n,e){var r="transform"==(t+="")?qe:Rr;return null==n?this.styleTween(t,function(t,n){var e,r,i;return function(){var o=ft(this,t),a=(this.style.removeProperty(t),ft(this,t));return o===a?null:o===e&&a===r?i:i=n(e=o,r=a)}}(t,r)).on("end.style."+t,qr(t)):"function"==typeof n?this.styleTween(t,function(t,n,e){var r,i,o;return function(){var a=ft(this,t),u=e(this),c=u+"";return null==u&&(this.style.removeProperty(t),c=u=ft(this,t)),a===c?null:a===r&&c===i?o:(i=c,o=n(r=a,u))}}(t,r,zr(this,"style."+t,n))).each(function(t,n){var e,r,i,o,a="style."+n,u="end."+a;return function(){var c=Er(this,t),f=c.on,s=null==c.value[a]?o||(o=qr(n)):void 0;f===e&&i===s||(r=(e=f).copy()).on(u,i=s),c.on=r}}(this._id,t)):this.styleTween(t,function(t,n,e){var r,i,o=e+"";return function(){var a=ft(this,t);return a===o?null:a===r?i:i=n(r=a,e)}}(t,r,n),e).on("end.style."+t,null)},styleTween:function(t,n,e){var r="style."+(t+="");if(arguments.length<2)return(r=this.tween(r))&&r._value;if(null==n)return this.tween(r,null);if("function"!=typeof n)throw new Error;return this.tween(r,function(t,n,e){var r,i;function o(){var o=n.apply(this,arguments);return o!==i&&(r=(i=o)&&function(t,n,e){return function(r){this.style.setProperty(t,n.call(this,r),e)}}(t,o,e)),r}return o._value=n,o}(t,n,null==e?"":e))},text:function(t){return this.tween("text","function"==typeof t?function(t){return function(){var n=t(this);this.textContent=null==n?"":n}}(zr(this,"text",t)):function(t){return function(){this.textContent=t}}(null==t?"":t+""))},textTween:function(t){var n="text";if(arguments.length<1)return(n=this.tween(n))&&n._value;if(null==t)return this.tween(n,null);if("function"!=typeof t)throw new Error;return this.tween(n,function(t){var n,e;function r(){var r=t.apply(this,arguments);return r!==e&&(n=(e=r)&&function(t){return function(n){this.textContent=t.call(this,n)}}(r)),n}return r._value=t,r}(t))},remove:function(){return this.on("end.remove",function(t){return function(){var n=this.parentNode;for(var e in this.__transition)if(+e!==t)return;n&&n.removeChild(this)}}(this._id))},tween:function(t,n){var e=this._id;if(t+="",arguments.length<2){for(var r,i=Cr(this.node(),e).tween,o=0,a=i.length;o0&&(r=o-P),M<0?d=p-z:M>0&&(u=c-z),x=Mi,B.attr("cursor",Pi.selection),I());break;default:return}xi()},!0).on("keyup.brush",function(){switch(t.event.keyCode){case 16:R&&(g=y=R=!1,I());break;case 18:x===Ti&&(w<0?f=h:w>0&&(r=o),M<0?d=p:M>0&&(u=c),x=Ni,I());break;case 32:x===Mi&&(t.event.altKey?(w&&(f=h-P*w,r=o+P*w),M&&(d=p-z*M,u=c+z*M),x=Ti):(w<0?f=h:w>0&&(r=o),M<0?d=p:M>0&&(u=c),x=Ni),B.attr("cursor",Pi[m]),I());break;default:return}xi()},!0),Ht(t.event.view)}mi(),Pr(b),s.call(b),U.start()}function Y(){var t=D(b);!R||g||y||(Math.abs(t[0]-L[0])>Math.abs(t[1]-L[1])?y=!0:g=!0),L=t,v=!0,xi(),I()}function I(){var t;switch(P=L[0]-q[0],z=L[1]-q[1],x){case Mi:case wi:w&&(P=Math.max(S-r,Math.min(E-f,P)),o=r+P,h=f+P),M&&(z=Math.max(k-u,Math.min(C-d,z)),c=u+z,p=d+z);break;case Ni:w<0?(P=Math.max(S-r,Math.min(E-r,P)),o=r+P,h=f):w>0&&(P=Math.max(S-f,Math.min(E-f,P)),o=r,h=f+P),M<0?(z=Math.max(k-u,Math.min(C-u,z)),c=u+z,p=d):M>0&&(z=Math.max(k-d,Math.min(C-d,z)),c=u,p=d+z);break;case Ti:w&&(o=Math.max(S,Math.min(E,r-P*w)),h=Math.max(S,Math.min(E,f+P*w))),M&&(c=Math.max(k,Math.min(C,u-z*M)),p=Math.max(k,Math.min(C,d+z*M)))}h1e-6)if(Math.abs(s*u-c*f)>1e-6&&i){var h=e-o,d=r-a,p=u*u+c*c,v=h*h+d*d,g=Math.sqrt(p),y=Math.sqrt(l),_=i*Math.tan((Qi-Math.acos((p+l-v)/(2*g*y)))/2),b=_/y,m=_/g;Math.abs(b-1)>1e-6&&(this._+="L"+(t+b*f)+","+(n+b*s)),this._+="A"+i+","+i+",0,0,"+ +(s*h>f*d)+","+(this._x1=t+m*u)+","+(this._y1=n+m*c)}else this._+="L"+(this._x1=t)+","+(this._y1=n);else;},arc:function(t,n,e,r,i,o){t=+t,n=+n,o=!!o;var a=(e=+e)*Math.cos(r),u=e*Math.sin(r),c=t+a,f=n+u,s=1^o,l=o?r-i:i-r;if(e<0)throw new Error("negative radius: "+e);null===this._x1?this._+="M"+c+","+f:(Math.abs(this._x1-c)>1e-6||Math.abs(this._y1-f)>1e-6)&&(this._+="L"+c+","+f),e&&(l<0&&(l=l%Ki+Ki),l>Ji?this._+="A"+e+","+e+",0,1,"+s+","+(t-a)+","+(n-u)+"A"+e+","+e+",0,1,"+s+","+(this._x1=c)+","+(this._y1=f):l>1e-6&&(this._+="A"+e+","+e+",0,"+ +(l>=Qi)+","+s+","+(this._x1=t+e*Math.cos(i))+","+(this._y1=n+e*Math.sin(i))))},rect:function(t,n,e,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+n)+"h"+ +e+"v"+ +r+"h"+-e+"Z"},toString:function(){return this._}};function uo(){}function co(t,n){var e=new uo;if(t instanceof uo)t.each(function(t,n){e.set(n,t)});else if(Array.isArray(t)){var r,i=-1,o=t.length;if(null==n)for(;++ir!=d>r&&e<(h-f)*(r-s)/(d-s)+f&&(i=-i)}return i}function wo(t,n,e){var r,i,o,a;return function(t,n,e){return(n[0]-t[0])*(e[1]-t[1])==(e[0]-t[0])*(n[1]-t[1])}(t,n,e)&&(i=t[r=+(t[0]===n[0])],o=e[r],a=n[r],i<=o&&o<=a||a<=o&&o<=i)}function Mo(){}var No=[[],[[[1,1.5],[.5,1]]],[[[1.5,1],[1,1.5]]],[[[1.5,1],[.5,1]]],[[[1,.5],[1.5,1]]],[[[1,1.5],[.5,1]],[[1,.5],[1.5,1]]],[[[1,.5],[1,1.5]]],[[[1,.5],[.5,1]]],[[[.5,1],[1,.5]]],[[[1,1.5],[1,.5]]],[[[.5,1],[1,.5]],[[1.5,1],[1,1.5]]],[[[1.5,1],[1,.5]]],[[[.5,1],[1.5,1]]],[[[1,1.5],[1.5,1]]],[[[.5,1],[1,1.5]]],[]];function To(){var t=1,n=1,e=M,r=u;function i(t){var n=e(t);if(Array.isArray(n))n=n.slice().sort(_o);else{var r=s(t),i=r[0],a=r[1];n=w(i,a,n),n=g(Math.floor(i/n)*n,Math.floor(a/n)*n,n)}return n.map(function(n){return o(t,n)})}function o(e,i){var o=[],u=[];return function(e,r,i){var o,u,c,f,s,l,h=new Array,d=new Array;o=u=-1,f=e[0]>=r,No[f<<1].forEach(p);for(;++o=r,No[c|f<<1].forEach(p);No[f<<0].forEach(p);for(;++u=r,s=e[u*t]>=r,No[f<<1|s<<2].forEach(p);++o=r,l=s,s=e[u*t+o+1]>=r,No[c|f<<1|s<<2|l<<3].forEach(p);No[f|s<<3].forEach(p)}o=-1,s=e[u*t]>=r,No[s<<2].forEach(p);for(;++o=r,No[s<<2|l<<3].forEach(p);function p(t){var n,e,r=[t[0][0]+o,t[0][1]+u],c=[t[1][0]+o,t[1][1]+u],f=a(r),s=a(c);(n=d[f])?(e=h[s])?(delete d[n.end],delete h[e.start],n===e?(n.ring.push(c),i(n.ring)):h[n.start]=d[e.end]={start:n.start,end:e.end,ring:n.ring.concat(e.ring)}):(delete d[n.end],n.ring.push(c),d[n.end=s]=n):(n=h[s])?(e=d[f])?(delete h[n.start],delete d[e.end],n===e?(n.ring.push(c),i(n.ring)):h[e.start]=d[n.end]={start:e.start,end:n.end,ring:e.ring.concat(n.ring)}):(delete h[n.start],n.ring.unshift(r),h[n.start=f]=n):h[f]=d[s]={start:f,end:s,ring:[r,c]}}No[s<<3].forEach(p)}(e,i,function(t){r(t,e,i),function(t){for(var n=0,e=t.length,r=t[e-1][1]*t[0][0]-t[e-1][0]*t[0][1];++n0?o.push([t]):u.push(t)}),u.forEach(function(t){for(var n,e=0,r=o.length;e0&&a0&&u0&&o>0))throw new Error("invalid size");return t=r,n=o,i},i.thresholds=function(t){return arguments.length?(e="function"==typeof t?t:Array.isArray(t)?bo(yo.call(t)):bo(t),i):e},i.smooth=function(t){return arguments.length?(r=t?u:Mo,i):r===u},i}function Ao(t,n,e){for(var r=t.width,i=t.height,o=1+(e<<1),a=0;a=e&&(u>=o&&(c-=t.data[u-o+a*r]),n.data[u-e+a*r]=c/Math.min(u+1,r-1+o-u,o))}function So(t,n,e){for(var r=t.width,i=t.height,o=1+(e<<1),a=0;a=e&&(u>=o&&(c-=t.data[a+(u-o)*r]),n.data[a+(u-e)*r]=c/Math.min(u+1,i-1+o-u,o))}function ko(t){return t[0]}function Eo(t){return t[1]}function Co(){return 1}var Po={},zo={},Ro=34,Do=10,qo=13;function Lo(t){return new Function("d","return {"+t.map(function(t,n){return JSON.stringify(t)+": d["+n+'] || ""'}).join(",")+"}")}function Uo(t){var n=Object.create(null),e=[];return t.forEach(function(t){for(var r in t)r in n||e.push(n[r]=r)}),e}function Oo(t,n){var e=t+"",r=e.length;return r9999?"+"+Oo(t,6):Oo(t,4)}(t.getUTCFullYear())+"-"+Oo(t.getUTCMonth()+1,2)+"-"+Oo(t.getUTCDate(),2)+(i?"T"+Oo(n,2)+":"+Oo(e,2)+":"+Oo(r,2)+"."+Oo(i,3)+"Z":r?"T"+Oo(n,2)+":"+Oo(e,2)+":"+Oo(r,2)+"Z":e||n?"T"+Oo(n,2)+":"+Oo(e,2)+"Z":"")}function Fo(t){var n=new RegExp('["'+t+"\n\r]"),e=t.charCodeAt(0);function r(t,n){var r,i=[],o=t.length,a=0,u=0,c=o<=0,f=!1;function s(){if(c)return zo;if(f)return f=!1,Po;var n,r,i=a;if(t.charCodeAt(i)===Ro){for(;a++=o?c=!0:(r=t.charCodeAt(a++))===Do?f=!0:r===qo&&(f=!0,t.charCodeAt(a)===Do&&++a),t.slice(i+1,n-1).replace(/""/g,'"')}for(;a=(o=(v+y)/2))?v=o:y=o,(s=e>=(a=(g+_)/2))?g=a:_=a,i=d,!(d=d[l=s<<1|f]))return i[l]=p,t;if(u=+t._x.call(null,d.data),c=+t._y.call(null,d.data),n===u&&e===c)return p.next=d,i?i[l]=p:t._root=p,t;do{i=i?i[l]=new Array(4):t._root=new Array(4),(f=n>=(o=(v+y)/2))?v=o:y=o,(s=e>=(a=(g+_)/2))?g=a:_=a}while((l=s<<1|f)==(h=(c>=a)<<1|u>=o));return i[h]=d,i[l]=p,t}function ba(t,n,e,r,i){this.node=t,this.x0=n,this.y0=e,this.x1=r,this.y1=i}function ma(t){return t[0]}function xa(t){return t[1]}function wa(t,n,e){var r=new Ma(null==n?ma:n,null==e?xa:e,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function Ma(t,n,e,r,i,o){this._x=t,this._y=n,this._x0=e,this._y0=r,this._x1=i,this._y1=o,this._root=void 0}function Na(t){for(var n={data:t.data},e=n;t=t.next;)e=e.next={data:t.data};return n}var Ta=wa.prototype=Ma.prototype;function Aa(t){return t.x+t.vx}function Sa(t){return t.y+t.vy}function ka(t){return t.index}function Ea(t,n){var e=t.get(n);if(!e)throw new Error("missing: "+n);return e}function Ca(t){return t.x}function Pa(t){return t.y}Ta.copy=function(){var t,n,e=new Ma(this._x,this._y,this._x0,this._y0,this._x1,this._y1),r=this._root;if(!r)return e;if(!r.length)return e._root=Na(r),e;for(t=[{source:r,target:e._root=new Array(4)}];r=t.pop();)for(var i=0;i<4;++i)(n=r.source[i])&&(n.length?t.push({source:n,target:r.target[i]=new Array(4)}):r.target[i]=Na(n));return e},Ta.add=function(t){var n=+this._x.call(null,t),e=+this._y.call(null,t);return _a(this.cover(n,e),n,e,t)},Ta.addAll=function(t){var n,e,r,i,o=t.length,a=new Array(o),u=new Array(o),c=1/0,f=1/0,s=-1/0,l=-1/0;for(e=0;es&&(s=r),il&&(l=i));if(c>s||f>l)return this;for(this.cover(c,f).cover(s,l),e=0;et||t>=i||r>n||n>=o;)switch(u=(nh||(o=c.y0)>d||(a=c.x1)=y)<<1|t>=g)&&(c=p[p.length-1],p[p.length-1]=p[p.length-1-f],p[p.length-1-f]=c)}else{var _=t-+this._x.call(null,v.data),b=n-+this._y.call(null,v.data),m=_*_+b*b;if(m=(u=(p+g)/2))?p=u:g=u,(s=a>=(c=(v+y)/2))?v=c:y=c,n=d,!(d=d[l=s<<1|f]))return this;if(!d.length)break;(n[l+1&3]||n[l+2&3]||n[l+3&3])&&(e=n,h=l)}for(;d.data!==t;)if(r=d,!(d=d.next))return this;return(i=d.next)&&delete d.next,r?(i?r.next=i:delete r.next,this):n?(i?n[l]=i:delete n[l],(d=n[0]||n[1]||n[2]||n[3])&&d===(n[3]||n[2]||n[1]||n[0])&&!d.length&&(e?e[h]=d:this._root=d),this):(this._root=i,this)},Ta.removeAll=function(t){for(var n=0,e=t.length;n1?r[0]+r.slice(2):r,+t.slice(e+1)]}function qa(t){return(t=Da(Math.abs(t)))?t[1]:NaN}var La,Ua=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Oa(t){if(!(n=Ua.exec(t)))throw new Error("invalid format: "+t);var n;return new Ba({fill:n[1],align:n[2],sign:n[3],symbol:n[4],zero:n[5],width:n[6],comma:n[7],precision:n[8]&&n[8].slice(1),trim:n[9],type:n[10]})}function Ba(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}function Fa(t,n){var e=Da(t,n);if(!e)return t+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}Oa.prototype=Ba.prototype,Ba.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};var Ya={"%":function(t,n){return(100*t).toFixed(n)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.round(t).toString(10)},e:function(t,n){return t.toExponential(n)},f:function(t,n){return t.toFixed(n)},g:function(t,n){return t.toPrecision(n)},o:function(t){return Math.round(t).toString(8)},p:function(t,n){return Fa(100*t,n)},r:Fa,s:function(t,n){var e=Da(t,n);if(!e)return t+"";var r=e[0],i=e[1],o=i-(La=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,a=r.length;return o===a?r:o>a?r+new Array(o-a+1).join("0"):o>0?r.slice(0,o)+"."+r.slice(o):"0."+new Array(1-o).join("0")+Da(t,Math.max(0,n+o-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}};function Ia(t){return t}var Ha,ja=Array.prototype.map,Xa=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];function Va(t){var n,e,r=void 0===t.grouping||void 0===t.thousands?Ia:(n=ja.call(t.grouping,Number),e=t.thousands+"",function(t,r){for(var i=t.length,o=[],a=0,u=n[0],c=0;i>0&&u>0&&(c+u+1>r&&(u=Math.max(1,r-c)),o.push(t.substring(i-=u,i+u)),!((c+=u+1)>r));)u=n[a=(a+1)%n.length];return o.reverse().join(e)}),i=void 0===t.currency?"":t.currency[0]+"",o=void 0===t.currency?"":t.currency[1]+"",a=void 0===t.decimal?".":t.decimal+"",u=void 0===t.numerals?Ia:function(t){return function(n){return n.replace(/[0-9]/g,function(n){return t[+n]})}}(ja.call(t.numerals,String)),c=void 0===t.percent?"%":t.percent+"",f=void 0===t.minus?"-":t.minus+"",s=void 0===t.nan?"NaN":t.nan+"";function l(t){var n=(t=Oa(t)).fill,e=t.align,l=t.sign,h=t.symbol,d=t.zero,p=t.width,v=t.comma,g=t.precision,y=t.trim,_=t.type;"n"===_?(v=!0,_="g"):Ya[_]||(void 0===g&&(g=12),y=!0,_="g"),(d||"0"===n&&"="===e)&&(d=!0,n="0",e="=");var b="$"===h?i:"#"===h&&/[boxX]/.test(_)?"0"+_.toLowerCase():"",m="$"===h?o:/[%p]/.test(_)?c:"",x=Ya[_],w=/[defgprs%]/.test(_);function M(t){var i,o,c,h=b,M=m;if("c"===_)M=x(t)+M,t="";else{var N=(t=+t)<0||1/t<0;if(t=isNaN(t)?s:x(Math.abs(t),g),y&&(t=function(t){t:for(var n,e=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(n+1):t}(t)),N&&0==+t&&"+"!==l&&(N=!1),h=(N?"("===l?l:f:"-"===l||"("===l?"":l)+h,M=("s"===_?Xa[8+La/3]:"")+M+(N&&"("===l?")":""),w)for(i=-1,o=t.length;++i(c=t.charCodeAt(i))||c>57){M=(46===c?a+t.slice(i+1):t.slice(i))+M,t=t.slice(0,i);break}}v&&!d&&(t=r(t,1/0));var T=h.length+t.length+M.length,A=T>1)+h+t+M+A.slice(T);break;default:t=A+h+t+M}return u(t)}return g=void 0===g?6:/[gprs]/.test(_)?Math.max(1,Math.min(21,g)):Math.max(0,Math.min(20,g)),M.toString=function(){return t+""},M}return{format:l,formatPrefix:function(t,n){var e=l(((t=Oa(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor(qa(n)/3))),i=Math.pow(10,-r),o=Xa[8+r/3];return function(t){return e(i*t)+o}}}}function Ga(n){return Ha=Va(n),t.format=Ha.format,t.formatPrefix=Ha.formatPrefix,Ha}function $a(t){return Math.max(0,-qa(Math.abs(t)))}function Wa(t,n){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor(qa(n)/3)))-qa(Math.abs(t)))}function Za(t,n){return t=Math.abs(t),n=Math.abs(n)-t,Math.max(0,qa(n)-qa(t))+1}function Qa(){return new Ka}function Ka(){this.reset()}Ga({decimal:".",thousands:",",grouping:[3],currency:["$",""],minus:"-"}),Ka.prototype={constructor:Ka,reset:function(){this.s=this.t=0},add:function(t){tu(Ja,t,this.t),tu(this,Ja.s,this.s),this.s?this.t+=Ja.t:this.s=Ja.t},valueOf:function(){return this.s}};var Ja=new Ka;function tu(t,n,e){var r=t.s=n+e,i=r-n,o=r-i;t.t=n-o+(e-i)}var nu=1e-6,eu=1e-12,ru=Math.PI,iu=ru/2,ou=ru/4,au=2*ru,uu=180/ru,cu=ru/180,fu=Math.abs,su=Math.atan,lu=Math.atan2,hu=Math.cos,du=Math.ceil,pu=Math.exp,vu=Math.log,gu=Math.pow,yu=Math.sin,_u=Math.sign||function(t){return t>0?1:t<0?-1:0},bu=Math.sqrt,mu=Math.tan;function xu(t){return t>1?0:t<-1?ru:Math.acos(t)}function wu(t){return t>1?iu:t<-1?-iu:Math.asin(t)}function Mu(t){return(t=yu(t/2))*t}function Nu(){}function Tu(t,n){t&&Su.hasOwnProperty(t.type)&&Su[t.type](t,n)}var Au={Feature:function(t,n){Tu(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;++r=0?1:-1,i=r*e,o=hu(n=(n*=cu)/2+ou),a=yu(n),u=qu*a,c=Du*o+u*hu(i),f=u*r*yu(i);Lu.add(lu(f,c)),Ru=t,Du=o,qu=a}function Hu(t){return[lu(t[1],t[0]),wu(t[2])]}function ju(t){var n=t[0],e=t[1],r=hu(e);return[r*hu(n),r*yu(n),yu(e)]}function Xu(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]}function Vu(t,n){return[t[1]*n[2]-t[2]*n[1],t[2]*n[0]-t[0]*n[2],t[0]*n[1]-t[1]*n[0]]}function Gu(t,n){t[0]+=n[0],t[1]+=n[1],t[2]+=n[2]}function $u(t,n){return[t[0]*n,t[1]*n,t[2]*n]}function Wu(t){var n=bu(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=n,t[1]/=n,t[2]/=n}var Zu,Qu,Ku,Ju,tc,nc,ec,rc,ic,oc,ac,uc,cc,fc,sc,lc,hc,dc,pc,vc,gc,yc,_c,bc,mc,xc,wc=Qa(),Mc={point:Nc,lineStart:Ac,lineEnd:Sc,polygonStart:function(){Mc.point=kc,Mc.lineStart=Ec,Mc.lineEnd=Cc,wc.reset(),Ou.polygonStart()},polygonEnd:function(){Ou.polygonEnd(),Mc.point=Nc,Mc.lineStart=Ac,Mc.lineEnd=Sc,Lu<0?(Zu=-(Ku=180),Qu=-(Ju=90)):wc>nu?Ju=90:wc<-nu&&(Qu=-90),oc[0]=Zu,oc[1]=Ku},sphere:function(){Zu=-(Ku=180),Qu=-(Ju=90)}};function Nc(t,n){ic.push(oc=[Zu=t,Ku=t]),nJu&&(Ju=n)}function Tc(t,n){var e=ju([t*cu,n*cu]);if(rc){var r=Vu(rc,e),i=Vu([r[1],-r[0],0],r);Wu(i),i=Hu(i);var o,a=t-tc,u=a>0?1:-1,c=i[0]*uu*u,f=fu(a)>180;f^(u*tcJu&&(Ju=o):f^(u*tc<(c=(c+360)%360-180)&&cJu&&(Ju=n)),f?tPc(Zu,Ku)&&(Ku=t):Pc(t,Ku)>Pc(Zu,Ku)&&(Zu=t):Ku>=Zu?(tKu&&(Ku=t)):t>tc?Pc(Zu,t)>Pc(Zu,Ku)&&(Ku=t):Pc(t,Ku)>Pc(Zu,Ku)&&(Zu=t)}else ic.push(oc=[Zu=t,Ku=t]);nJu&&(Ju=n),rc=e,tc=t}function Ac(){Mc.point=Tc}function Sc(){oc[0]=Zu,oc[1]=Ku,Mc.point=Nc,rc=null}function kc(t,n){if(rc){var e=t-tc;wc.add(fu(e)>180?e+(e>0?360:-360):e)}else nc=t,ec=n;Ou.point(t,n),Tc(t,n)}function Ec(){Ou.lineStart()}function Cc(){kc(nc,ec),Ou.lineEnd(),fu(wc)>nu&&(Zu=-(Ku=180)),oc[0]=Zu,oc[1]=Ku,rc=null}function Pc(t,n){return(n-=t)<0?n+360:n}function zc(t,n){return t[0]-n[0]}function Rc(t,n){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:nru?t+Math.round(-t/au)*au:t,n]}function $c(t,n,e){return(t%=au)?n||e?Vc(Zc(t),Qc(n,e)):Zc(t):n||e?Qc(n,e):Gc}function Wc(t){return function(n,e){return[(n+=t)>ru?n-au:n<-ru?n+au:n,e]}}function Zc(t){var n=Wc(t);return n.invert=Wc(-t),n}function Qc(t,n){var e=hu(t),r=yu(t),i=hu(n),o=yu(n);function a(t,n){var a=hu(n),u=hu(t)*a,c=yu(t)*a,f=yu(n),s=f*e+u*r;return[lu(c*i-s*o,u*e-f*r),wu(s*i+c*o)]}return a.invert=function(t,n){var a=hu(n),u=hu(t)*a,c=yu(t)*a,f=yu(n),s=f*i-c*o;return[lu(c*i+f*o,u*e+s*r),wu(s*e-u*r)]},a}function Kc(t){function n(n){return(n=t(n[0]*cu,n[1]*cu))[0]*=uu,n[1]*=uu,n}return t=$c(t[0]*cu,t[1]*cu,t.length>2?t[2]*cu:0),n.invert=function(n){return(n=t.invert(n[0]*cu,n[1]*cu))[0]*=uu,n[1]*=uu,n},n}function Jc(t,n,e,r,i,o){if(e){var a=hu(n),u=yu(n),c=r*e;null==i?(i=n+r*au,o=n-c/2):(i=tf(a,i),o=tf(a,o),(r>0?io)&&(i+=r*au));for(var f,s=i;r>0?s>o:s1&&n.push(n.pop().concat(n.shift()))},result:function(){var e=n;return n=[],t=null,e}}}function ef(t,n){return fu(t[0]-n[0])=0;--o)i.point((s=f[o])[0],s[1]);else r(h.x,h.p.x,-1,i);h=h.p}f=(h=h.o).z,d=!d}while(!h.v);i.lineEnd()}}}function af(t){if(n=t.length){for(var n,e,r=0,i=t[0];++r=0?1:-1,T=N*M,A=T>ru,S=v*x;if(uf.add(lu(S*N*yu(T),g*w+S*hu(T))),a+=A?M+N*au:M,A^d>=e^b>=e){var k=Vu(ju(h),ju(_));Wu(k);var E=Vu(o,k);Wu(E);var C=(A^M>=0?-1:1)*wu(E[2]);(r>C||r===C&&(k[0]||k[1]))&&(u+=A^M>=0?1:-1)}}return(a<-nu||a0){for(l||(i.polygonStart(),l=!0),i.lineStart(),t=0;t1&&2&c&&h.push(h.pop().concat(h.shift())),a.push(h.filter(lf))}return h}}function lf(t){return t.length>1}function hf(t,n){return((t=t.x)[0]<0?t[1]-iu-nu:iu-t[1])-((n=n.x)[0]<0?n[1]-iu-nu:iu-n[1])}var df=sf(function(){return!0},function(t){var n,e=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),n=1},point:function(o,a){var u=o>0?ru:-ru,c=fu(o-e);fu(c-ru)0?iu:-iu),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(u,r),t.point(o,r),n=0):i!==u&&c>=ru&&(fu(e-i)nu?su((yu(n)*(o=hu(r))*yu(e)-yu(r)*(i=hu(n))*yu(t))/(i*o*a)):(n+r)/2}(e,r,o,a),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(u,r),n=0),t.point(e=o,r=a),i=u},lineEnd:function(){t.lineEnd(),e=r=NaN},clean:function(){return 2-n}}},function(t,n,e,r){var i;if(null==t)i=e*iu,r.point(-ru,i),r.point(0,i),r.point(ru,i),r.point(ru,0),r.point(ru,-i),r.point(0,-i),r.point(-ru,-i),r.point(-ru,0),r.point(-ru,i);else if(fu(t[0]-n[0])>nu){var o=t[0]0,i=fu(n)>nu;function o(t,e){return hu(t)*hu(e)>n}function a(t,e,r){var i=[1,0,0],o=Vu(ju(t),ju(e)),a=Xu(o,o),u=o[0],c=a-u*u;if(!c)return!r&&t;var f=n*a/c,s=-n*u/c,l=Vu(i,o),h=$u(i,f);Gu(h,$u(o,s));var d=l,p=Xu(h,d),v=Xu(d,d),g=p*p-v*(Xu(h,h)-1);if(!(g<0)){var y=bu(g),_=$u(d,(-p-y)/v);if(Gu(_,h),_=Hu(_),!r)return _;var b,m=t[0],x=e[0],w=t[1],M=e[1];x0^_[1]<(fu(_[0]-m)ru^(m<=_[0]&&_[0]<=x)){var A=$u(d,(-p+y)/v);return Gu(A,h),[_,Hu(A)]}}}function u(n,e){var i=r?t:ru-t,o=0;return n<-i?o|=1:n>i&&(o|=2),e<-i?o|=4:e>i&&(o|=8),o}return sf(o,function(t){var n,e,c,f,s;return{lineStart:function(){f=c=!1,s=1},point:function(l,h){var d,p=[l,h],v=o(l,h),g=r?v?0:u(l,h):v?u(l+(l<0?ru:-ru),h):0;if(!n&&(f=c=v)&&t.lineStart(),v!==c&&(!(d=a(n,p))||ef(n,d)||ef(p,d))&&(p[0]+=nu,p[1]+=nu,v=o(p[0],p[1])),v!==c)s=0,v?(t.lineStart(),d=a(p,n),t.point(d[0],d[1])):(d=a(n,p),t.point(d[0],d[1]),t.lineEnd()),n=d;else if(i&&n&&r^v){var y;g&e||!(y=a(p,n,!0))||(s=0,r?(t.lineStart(),t.point(y[0][0],y[0][1]),t.point(y[1][0],y[1][1]),t.lineEnd()):(t.point(y[1][0],y[1][1]),t.lineEnd(),t.lineStart(),t.point(y[0][0],y[0][1])))}!v||n&&ef(n,p)||t.point(p[0],p[1]),n=p,c=v,e=g},lineEnd:function(){c&&t.lineEnd(),n=null},clean:function(){return s|(f&&c)<<1}}},function(n,r,i,o){Jc(o,t,e,i,n,r)},r?[0,-t]:[-ru,t-ru])}var vf=1e9,gf=-vf;function yf(t,n,e,r){function i(i,o){return t<=i&&i<=e&&n<=o&&o<=r}function o(i,o,u,f){var s=0,l=0;if(null==i||(s=a(i,u))!==(l=a(o,u))||c(i,o)<0^u>0)do{f.point(0===s||3===s?t:e,s>1?r:n)}while((s=(s+u+4)%4)!==l);else f.point(o[0],o[1])}function a(r,i){return fu(r[0]-t)0?0:3:fu(r[0]-e)0?2:1:fu(r[1]-n)0?1:0:i>0?3:2}function u(t,n){return c(t.x,n.x)}function c(t,n){var e=a(t,1),r=a(n,1);return e!==r?e-r:0===e?n[1]-t[1]:1===e?t[0]-n[0]:2===e?t[1]-n[1]:n[0]-t[0]}return function(a){var c,f,s,l,h,d,p,v,g,y,_,b=a,m=nf(),x={point:w,lineStart:function(){x.point=M,f&&f.push(s=[]);y=!0,g=!1,p=v=NaN},lineEnd:function(){c&&(M(l,h),d&&g&&m.rejoin(),c.push(m.result()));x.point=w,g&&b.lineEnd()},polygonStart:function(){b=m,c=[],f=[],_=!0},polygonEnd:function(){var n=function(){for(var n=0,e=0,i=f.length;er&&(h-o)*(r-a)>(d-a)*(t-o)&&++n:d<=r&&(h-o)*(r-a)<(d-a)*(t-o)&&--n;return n}(),e=_&&n,i=(c=A(c)).length;(e||i)&&(a.polygonStart(),e&&(a.lineStart(),o(null,null,1,a),a.lineEnd()),i&&of(c,u,n,o,a),a.polygonEnd());b=a,c=f=s=null}};function w(t,n){i(t,n)&&b.point(t,n)}function M(o,a){var u=i(o,a);if(f&&s.push([o,a]),y)l=o,h=a,d=u,y=!1,u&&(b.lineStart(),b.point(o,a));else if(u&&g)b.point(o,a);else{var c=[p=Math.max(gf,Math.min(vf,p)),v=Math.max(gf,Math.min(vf,v))],m=[o=Math.max(gf,Math.min(vf,o)),a=Math.max(gf,Math.min(vf,a))];!function(t,n,e,r,i,o){var a,u=t[0],c=t[1],f=0,s=1,l=n[0]-u,h=n[1]-c;if(a=e-u,l||!(a>0)){if(a/=l,l<0){if(a0){if(a>s)return;a>f&&(f=a)}if(a=i-u,l||!(a<0)){if(a/=l,l<0){if(a>s)return;a>f&&(f=a)}else if(l>0){if(a0)){if(a/=h,h<0){if(a0){if(a>s)return;a>f&&(f=a)}if(a=o-c,h||!(a<0)){if(a/=h,h<0){if(a>s)return;a>f&&(f=a)}else if(h>0){if(a0&&(t[0]=u+f*l,t[1]=c+f*h),s<1&&(n[0]=u+s*l,n[1]=c+s*h),!0}}}}}(c,m,t,n,e,r)?u&&(b.lineStart(),b.point(o,a),_=!1):(g||(b.lineStart(),b.point(c[0],c[1])),b.point(m[0],m[1]),u||b.lineEnd(),_=!1)}p=o,v=a,g=u}return x}}var _f,bf,mf,xf=Qa(),wf={sphere:Nu,point:Nu,lineStart:function(){wf.point=Nf,wf.lineEnd=Mf},lineEnd:Nu,polygonStart:Nu,polygonEnd:Nu};function Mf(){wf.point=wf.lineEnd=Nu}function Nf(t,n){_f=t*=cu,bf=yu(n*=cu),mf=hu(n),wf.point=Tf}function Tf(t,n){t*=cu;var e=yu(n*=cu),r=hu(n),i=fu(t-_f),o=hu(i),a=r*yu(i),u=mf*e-bf*r*o,c=bf*e+mf*r*o;xf.add(lu(bu(a*a+u*u),c)),_f=t,bf=e,mf=r}function Af(t){return xf.reset(),Cu(t,wf),+xf}var Sf=[null,null],kf={type:"LineString",coordinates:Sf};function Ef(t,n){return Sf[0]=t,Sf[1]=n,Af(kf)}var Cf={Feature:function(t,n){return zf(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;++r0&&(i=Ef(t[o],t[o-1]))>0&&e<=i&&r<=i&&(e+r-i)*(1-Math.pow((e-r)/i,2))nu}).map(c)).concat(g(du(o/d)*d,i,d).filter(function(t){return fu(t%v)>nu}).map(f))}return _.lines=function(){return b().map(function(t){return{type:"LineString",coordinates:t}})},_.outline=function(){return{type:"Polygon",coordinates:[s(r).concat(l(a).slice(1),s(e).reverse().slice(1),l(u).reverse().slice(1))]}},_.extent=function(t){return arguments.length?_.extentMajor(t).extentMinor(t):_.extentMinor()},_.extentMajor=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],u=+t[0][1],a=+t[1][1],r>e&&(t=r,r=e,e=t),u>a&&(t=u,u=a,a=t),_.precision(y)):[[r,u],[e,a]]},_.extentMinor=function(e){return arguments.length?(n=+e[0][0],t=+e[1][0],o=+e[0][1],i=+e[1][1],n>t&&(e=n,n=t,t=e),o>i&&(e=o,o=i,i=e),_.precision(y)):[[n,o],[t,i]]},_.step=function(t){return arguments.length?_.stepMajor(t).stepMinor(t):_.stepMinor()},_.stepMajor=function(t){return arguments.length?(p=+t[0],v=+t[1],_):[p,v]},_.stepMinor=function(t){return arguments.length?(h=+t[0],d=+t[1],_):[h,d]},_.precision=function(h){return arguments.length?(y=+h,c=Of(o,i,90),f=Bf(n,t,y),s=Of(u,a,90),l=Bf(r,e,y),_):y},_.extentMajor([[-180,-90+nu],[180,90-nu]]).extentMinor([[-180,-80-nu],[180,80+nu]])}function Yf(t){return t}var If,Hf,jf,Xf,Vf=Qa(),Gf=Qa(),$f={point:Nu,lineStart:Nu,lineEnd:Nu,polygonStart:function(){$f.lineStart=Wf,$f.lineEnd=Kf},polygonEnd:function(){$f.lineStart=$f.lineEnd=$f.point=Nu,Vf.add(fu(Gf)),Gf.reset()},result:function(){var t=Vf/2;return Vf.reset(),t}};function Wf(){$f.point=Zf}function Zf(t,n){$f.point=Qf,If=jf=t,Hf=Xf=n}function Qf(t,n){Gf.add(Xf*t-jf*n),jf=t,Xf=n}function Kf(){Qf(If,Hf)}var Jf=1/0,ts=Jf,ns=-Jf,es=ns,rs={point:function(t,n){tns&&(ns=t);nes&&(es=n)},lineStart:Nu,lineEnd:Nu,polygonStart:Nu,polygonEnd:Nu,result:function(){var t=[[Jf,ts],[ns,es]];return ns=es=-(ts=Jf=1/0),t}};var is,os,as,us,cs=0,fs=0,ss=0,ls=0,hs=0,ds=0,ps=0,vs=0,gs=0,ys={point:_s,lineStart:bs,lineEnd:ws,polygonStart:function(){ys.lineStart=Ms,ys.lineEnd=Ns},polygonEnd:function(){ys.point=_s,ys.lineStart=bs,ys.lineEnd=ws},result:function(){var t=gs?[ps/gs,vs/gs]:ds?[ls/ds,hs/ds]:ss?[cs/ss,fs/ss]:[NaN,NaN];return cs=fs=ss=ls=hs=ds=ps=vs=gs=0,t}};function _s(t,n){cs+=t,fs+=n,++ss}function bs(){ys.point=ms}function ms(t,n){ys.point=xs,_s(as=t,us=n)}function xs(t,n){var e=t-as,r=n-us,i=bu(e*e+r*r);ls+=i*(as+t)/2,hs+=i*(us+n)/2,ds+=i,_s(as=t,us=n)}function ws(){ys.point=_s}function Ms(){ys.point=Ts}function Ns(){As(is,os)}function Ts(t,n){ys.point=As,_s(is=as=t,os=us=n)}function As(t,n){var e=t-as,r=n-us,i=bu(e*e+r*r);ls+=i*(as+t)/2,hs+=i*(us+n)/2,ds+=i,ps+=(i=us*t-as*n)*(as+t),vs+=i*(us+n),gs+=3*i,_s(as=t,us=n)}function Ss(t){this._context=t}Ss.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._context.moveTo(t,n),this._point=1;break;case 1:this._context.lineTo(t,n);break;default:this._context.moveTo(t+this._radius,n),this._context.arc(t,n,this._radius,0,au)}},result:Nu};var ks,Es,Cs,Ps,zs,Rs=Qa(),Ds={point:Nu,lineStart:function(){Ds.point=qs},lineEnd:function(){ks&&Ls(Es,Cs),Ds.point=Nu},polygonStart:function(){ks=!0},polygonEnd:function(){ks=null},result:function(){var t=+Rs;return Rs.reset(),t}};function qs(t,n){Ds.point=Ls,Es=Ps=t,Cs=zs=n}function Ls(t,n){Ps-=t,zs-=n,Rs.add(bu(Ps*Ps+zs*zs)),Ps=t,zs=n}function Us(){this._string=[]}function Os(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function Bs(t){return function(n){var e=new Fs;for(var r in t)e[r]=t[r];return e.stream=n,e}}function Fs(){}function Ys(t,n,e){var r=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=r&&t.clipExtent(null),Cu(e,t.stream(rs)),n(rs.result()),null!=r&&t.clipExtent(r),t}function Is(t,n,e){return Ys(t,function(e){var r=n[1][0]-n[0][0],i=n[1][1]-n[0][1],o=Math.min(r/(e[1][0]-e[0][0]),i/(e[1][1]-e[0][1])),a=+n[0][0]+(r-o*(e[1][0]+e[0][0]))/2,u=+n[0][1]+(i-o*(e[1][1]+e[0][1]))/2;t.scale(150*o).translate([a,u])},e)}function Hs(t,n,e){return Is(t,[[0,0],n],e)}function js(t,n,e){return Ys(t,function(e){var r=+n,i=r/(e[1][0]-e[0][0]),o=(r-i*(e[1][0]+e[0][0]))/2,a=-i*e[0][1];t.scale(150*i).translate([o,a])},e)}function Xs(t,n,e){return Ys(t,function(e){var r=+n,i=r/(e[1][1]-e[0][1]),o=-i*e[0][0],a=(r-i*(e[1][1]+e[0][1]))/2;t.scale(150*i).translate([o,a])},e)}Us.prototype={_radius:4.5,_circle:Os(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._string.push("M",t,",",n),this._point=1;break;case 1:this._string.push("L",t,",",n);break;default:null==this._circle&&(this._circle=Os(this._radius)),this._string.push("M",t,",",n,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}},Fs.prototype={constructor:Fs,point:function(t,n){this.stream.point(t,n)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var Vs=16,Gs=hu(30*cu);function $s(t,n){return+n?function(t,n){function e(r,i,o,a,u,c,f,s,l,h,d,p,v,g){var y=f-r,_=s-i,b=y*y+_*_;if(b>4*n&&v--){var m=a+h,x=u+d,w=c+p,M=bu(m*m+x*x+w*w),N=wu(w/=M),T=fu(fu(w)-1)n||fu((y*E+_*C)/b-.5)>.3||a*h+u*d+c*p2?t[2]%360*cu:0,E()):[g*uu,y*uu,_*uu]},S.angle=function(t){return arguments.length?(b=t%360*cu,E()):b*uu},S.reflectX=function(t){return arguments.length?(m=t?-1:1,E()):m<0},S.reflectY=function(t){return arguments.length?(x=t?-1:1,E()):x<0},S.precision=function(t){return arguments.length?(a=$s(u,A=t*t),C()):bu(A)},S.fitExtent=function(t,n){return Is(S,t,n)},S.fitSize=function(t,n){return Hs(S,t,n)},S.fitWidth=function(t,n){return js(S,t,n)},S.fitHeight=function(t,n){return Xs(S,t,n)},function(){return n=t.apply(this,arguments),S.invert=n.invert&&k,E()}}function Js(t){var n=0,e=ru/3,r=Ks(t),i=r(n,e);return i.parallels=function(t){return arguments.length?r(n=t[0]*cu,e=t[1]*cu):[n*uu,e*uu]},i}function tl(t,n){var e=yu(t),r=(e+yu(n))/2;if(fu(r)0?n<-iu+nu&&(n=-iu+nu):n>iu-nu&&(n=iu-nu);var e=i/gu(fl(n),r);return[e*yu(r*t),i-e*hu(r*t)]}return o.invert=function(t,n){var e=i-n,o=_u(r)*bu(t*t+e*e),a=lu(t,fu(e))*_u(e);return e*r<0&&(a-=ru*_u(t)*_u(e)),[a/r,2*su(gu(i/o,1/r))-iu]},o}function ll(t,n){return[t,n]}function hl(t,n){var e=hu(t),r=t===n?yu(t):(e-hu(n))/(n-t),i=e/r+t;if(fu(r)=0;)n+=e[r].value;else n=1;t.value=n}function kl(t,n){var e,r,i,o,a,u=new zl(t),c=+t.value&&(u.value=t.value),f=[u];for(null==n&&(n=El);e=f.pop();)if(c&&(e.value=+e.data.value),(i=n(e.data))&&(a=i.length))for(e.children=new Array(a),o=a-1;o>=0;--o)f.push(r=e.children[o]=new zl(i[o])),r.parent=e,r.depth=e.depth+1;return u.eachBefore(Pl)}function El(t){return t.children}function Cl(t){t.data=t.data.data}function Pl(t){var n=0;do{t.height=n}while((t=t.parent)&&t.height<++n)}function zl(t){this.data=t,this.depth=this.height=0,this.parent=null}_l.invert=function(t,n){for(var e,r=n,i=r*r,o=i*i*i,a=0;a<12&&(o=(i=(r-=e=(r*(dl+pl*i+o*(vl+gl*i))-n)/(dl+3*pl*i+o*(7*vl+9*gl*i)))*r)*i*i,!(fu(e)nu&&--i>0);return[t/(.8707+(o=r*r)*(o*(o*o*o*(.003971-.001529*o)-.013791)-.131979)),r]},xl.invert=il(wu),wl.invert=il(function(t){return 2*su(t)}),Ml.invert=function(t,n){return[-n,2*su(pu(t))-iu]},zl.prototype=kl.prototype={constructor:zl,count:function(){return this.eachAfter(Sl)},each:function(t){var n,e,r,i,o=this,a=[o];do{for(n=a.reverse(),a=[];o=n.pop();)if(t(o),e=o.children)for(r=0,i=e.length;r=0;--e)i.push(n[e]);return this},sum:function(t){return this.eachAfter(function(n){for(var e=+t(n.data)||0,r=n.children,i=r&&r.length;--i>=0;)e+=r[i].value;n.value=e})},sort:function(t){return this.eachBefore(function(n){n.children&&n.children.sort(t)})},path:function(t){for(var n=this,e=function(t,n){if(t===n)return t;var e=t.ancestors(),r=n.ancestors(),i=null;for(t=e.pop(),n=r.pop();t===n;)i=t,t=e.pop(),n=r.pop();return i}(n,t),r=[n];n!==e;)n=n.parent,r.push(n);for(var i=r.length;t!==e;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,n=[t];t=t.parent;)n.push(t);return n},descendants:function(){var t=[];return this.each(function(n){t.push(n)}),t},leaves:function(){var t=[];return this.eachBefore(function(n){n.children||t.push(n)}),t},links:function(){var t=this,n=[];return t.each(function(e){e!==t&&n.push({source:e.parent,target:e})}),n},copy:function(){return kl(this).eachBefore(Cl)}};var Rl=Array.prototype.slice;function Dl(t){for(var n,e,r=0,i=(t=function(t){for(var n,e,r=t.length;r;)e=Math.random()*r--|0,n=t[r],t[r]=t[e],t[e]=n;return t}(Rl.call(t))).length,o=[];r0&&e*e>r*r+i*i}function Ol(t,n){for(var e=0;e(a*=a)?(r=(f+a-i)/(2*f),o=Math.sqrt(Math.max(0,a/f-r*r)),e.x=t.x-r*u-o*c,e.y=t.y-r*c+o*u):(r=(f+i-a)/(2*f),o=Math.sqrt(Math.max(0,i/f-r*r)),e.x=n.x+r*u-o*c,e.y=n.y+r*c+o*u)):(e.x=n.x+e.r,e.y=n.y)}function Hl(t,n){var e=t.r+n.r-1e-6,r=n.x-t.x,i=n.y-t.y;return e>0&&e*e>r*r+i*i}function jl(t){var n=t._,e=t.next._,r=n.r+e.r,i=(n.x*e.r+e.x*n.r)/r,o=(n.y*e.r+e.y*n.r)/r;return i*i+o*o}function Xl(t){this._=t,this.next=null,this.previous=null}function Vl(t){if(!(i=t.length))return 0;var n,e,r,i,o,a,u,c,f,s,l;if((n=t[0]).x=0,n.y=0,!(i>1))return n.r;if(e=t[1],n.x=-e.r,e.x=n.r,e.y=0,!(i>2))return n.r+e.r;Il(e,n,r=t[2]),n=new Xl(n),e=new Xl(e),r=new Xl(r),n.next=r.previous=e,e.next=n.previous=r,r.next=e.previous=n;t:for(u=3;uh&&(h=u),g=s*s*v,(d=Math.max(h/g,g/l))>p){s-=u;break}p=d}y.push(a={value:s,dice:c1?n:1)},e}(vh);var _h=function t(n){function e(t,e,r,i,o){if((a=t._squarify)&&a.ratio===n)for(var a,u,c,f,s,l=-1,h=a.length,d=t.value;++l1?n:1)},e}(vh);function bh(t,n,e){return(n[0]-t[0])*(e[1]-t[1])-(n[1]-t[1])*(e[0]-t[0])}function mh(t,n){return t[0]-n[0]||t[1]-n[1]}function xh(t){for(var n=t.length,e=[0,1],r=2,i=2;i1&&bh(t[e[r-2]],t[e[r-1]],t[i])<=0;)--r;e[r++]=i}return e.slice(0,r)}function wh(){return Math.random()}var Mh=function t(n){function e(t,e){return t=null==t?0:+t,e=null==e?1:+e,1===arguments.length?(e=t,t=0):e-=t,function(){return n()*e+t}}return e.source=t,e}(wh),Nh=function t(n){function e(t,e){var r,i;return t=null==t?0:+t,e=null==e?1:+e,function(){var o;if(null!=r)o=r,r=null;else do{r=2*n()-1,o=2*n()-1,i=r*r+o*o}while(!i||i>1);return t+e*o*Math.sqrt(-2*Math.log(i)/i)}}return e.source=t,e}(wh),Th=function t(n){function e(){var t=Nh.source(n).apply(this,arguments);return function(){return Math.exp(t())}}return e.source=t,e}(wh),Ah=function t(n){function e(t){return function(){for(var e=0,r=0;rr&&(n=e,e=r,r=n),function(t){return Math.max(e,Math.min(r,t))}}function Ih(t,n,e){var r=t[0],i=t[1],o=n[0],a=n[1];return i2?Hh:Ih,i=o=null,l}function l(n){return isNaN(n=+n)?e:(i||(i=r(a.map(t),u,c)))(t(f(n)))}return l.invert=function(e){return f(n((o||(o=r(u,a.map(t),me)))(e)))},l.domain=function(t){return arguments.length?(a=zh.call(t,Uh),f===Bh||(f=Yh(a)),s()):a.slice()},l.range=function(t){return arguments.length?(u=Rh.call(t),s()):u.slice()},l.rangeRound=function(t){return u=Rh.call(t),c=Ae,s()},l.clamp=function(t){return arguments.length?(f=t?Yh(a):Bh,l):f!==Bh},l.interpolate=function(t){return arguments.length?(c=t,s()):c},l.unknown=function(t){return arguments.length?(e=t,l):e},function(e,r){return t=e,n=r,s()}}function Vh(t,n){return Xh()(t,n)}function Gh(n,e,r,i){var o,a=w(n,e,r);switch((i=Oa(null==i?",f":i)).type){case"s":var u=Math.max(Math.abs(n),Math.abs(e));return null!=i.precision||isNaN(o=Wa(a,u))||(i.precision=o),t.formatPrefix(i,u);case"":case"e":case"g":case"p":case"r":null!=i.precision||isNaN(o=Za(a,Math.max(Math.abs(n),Math.abs(e))))||(i.precision=o-("e"===i.type));break;case"f":case"%":null!=i.precision||isNaN(o=$a(a))||(i.precision=o-2*("%"===i.type))}return t.format(i)}function $h(t){var n=t.domain;return t.ticks=function(t){var e=n();return m(e[0],e[e.length-1],null==t?10:t)},t.tickFormat=function(t,e){var r=n();return Gh(r[0],r[r.length-1],null==t?10:t,e)},t.nice=function(e){null==e&&(e=10);var r,i=n(),o=0,a=i.length-1,u=i[o],c=i[a];return c0?r=x(u=Math.floor(u/r)*r,c=Math.ceil(c/r)*r,e):r<0&&(r=x(u=Math.ceil(u*r)/r,c=Math.floor(c*r)/r,e)),r>0?(i[o]=Math.floor(u/r)*r,i[a]=Math.ceil(c/r)*r,n(i)):r<0&&(i[o]=Math.ceil(u*r)/r,i[a]=Math.floor(c*r)/r,n(i)),t},t}function Wh(t,n){var e,r=0,i=(t=t.slice()).length-1,o=t[r],a=t[i];return a0){for(;hc)break;v.push(l)}}else for(;h=1;--s)if(!((l=f*s)c)break;v.push(l)}}else v=m(h,d,Math.min(d-h,p)).map(r);return n?v.reverse():v},i.tickFormat=function(n,o){if(null==o&&(o=10===a?".0e":","),"function"!=typeof o&&(o=t.format(o)),n===1/0)return o;null==n&&(n=10);var u=Math.max(1,a*n/i.ticks().length);return function(t){var n=t/r(Math.round(e(t)));return n*a0))return u;do{u.push(a=new Date(+e)),n(e,o),t(e)}while(a=n)for(;t(n),!e(n);)n.setTime(n-1)},function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;n(t,-1),!e(t););else for(;--r>=0;)for(;n(t,1),!e(t););})},e&&(i.count=function(n,r){return ld.setTime(+n),hd.setTime(+r),t(ld),t(hd),Math.floor(e(ld,hd))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(n){return r(n)%t==0}:function(n){return i.count(0,n)%t==0}):i:null}),i}var pd=dd(function(){},function(t,n){t.setTime(+t+n)},function(t,n){return n-t});pd.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?dd(function(n){n.setTime(Math.floor(n/t)*t)},function(n,e){n.setTime(+n+e*t)},function(n,e){return(e-n)/t}):pd:null};var vd=pd.range,gd=6e4,yd=6048e5,_d=dd(function(t){t.setTime(t-t.getMilliseconds())},function(t,n){t.setTime(+t+1e3*n)},function(t,n){return(n-t)/1e3},function(t){return t.getUTCSeconds()}),bd=_d.range,md=dd(function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds())},function(t,n){t.setTime(+t+n*gd)},function(t,n){return(n-t)/gd},function(t){return t.getMinutes()}),xd=md.range,wd=dd(function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds()-t.getMinutes()*gd)},function(t,n){t.setTime(+t+36e5*n)},function(t,n){return(n-t)/36e5},function(t){return t.getHours()}),Md=wd.range,Nd=dd(function(t){t.setHours(0,0,0,0)},function(t,n){t.setDate(t.getDate()+n)},function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*gd)/864e5},function(t){return t.getDate()-1}),Td=Nd.range;function Ad(t){return dd(function(n){n.setDate(n.getDate()-(n.getDay()+7-t)%7),n.setHours(0,0,0,0)},function(t,n){t.setDate(t.getDate()+7*n)},function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*gd)/yd})}var Sd=Ad(0),kd=Ad(1),Ed=Ad(2),Cd=Ad(3),Pd=Ad(4),zd=Ad(5),Rd=Ad(6),Dd=Sd.range,qd=kd.range,Ld=Ed.range,Ud=Cd.range,Od=Pd.range,Bd=zd.range,Fd=Rd.range,Yd=dd(function(t){t.setDate(1),t.setHours(0,0,0,0)},function(t,n){t.setMonth(t.getMonth()+n)},function(t,n){return n.getMonth()-t.getMonth()+12*(n.getFullYear()-t.getFullYear())},function(t){return t.getMonth()}),Id=Yd.range,Hd=dd(function(t){t.setMonth(0,1),t.setHours(0,0,0,0)},function(t,n){t.setFullYear(t.getFullYear()+n)},function(t,n){return n.getFullYear()-t.getFullYear()},function(t){return t.getFullYear()});Hd.every=function(t){return isFinite(t=Math.floor(t))&&t>0?dd(function(n){n.setFullYear(Math.floor(n.getFullYear()/t)*t),n.setMonth(0,1),n.setHours(0,0,0,0)},function(n,e){n.setFullYear(n.getFullYear()+e*t)}):null};var jd=Hd.range,Xd=dd(function(t){t.setUTCSeconds(0,0)},function(t,n){t.setTime(+t+n*gd)},function(t,n){return(n-t)/gd},function(t){return t.getUTCMinutes()}),Vd=Xd.range,Gd=dd(function(t){t.setUTCMinutes(0,0,0)},function(t,n){t.setTime(+t+36e5*n)},function(t,n){return(n-t)/36e5},function(t){return t.getUTCHours()}),$d=Gd.range,Wd=dd(function(t){t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCDate(t.getUTCDate()+n)},function(t,n){return(n-t)/864e5},function(t){return t.getUTCDate()-1}),Zd=Wd.range;function Qd(t){return dd(function(n){n.setUTCDate(n.getUTCDate()-(n.getUTCDay()+7-t)%7),n.setUTCHours(0,0,0,0)},function(t,n){t.setUTCDate(t.getUTCDate()+7*n)},function(t,n){return(n-t)/yd})}var Kd=Qd(0),Jd=Qd(1),tp=Qd(2),np=Qd(3),ep=Qd(4),rp=Qd(5),ip=Qd(6),op=Kd.range,ap=Jd.range,up=tp.range,cp=np.range,fp=ep.range,sp=rp.range,lp=ip.range,hp=dd(function(t){t.setUTCDate(1),t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCMonth(t.getUTCMonth()+n)},function(t,n){return n.getUTCMonth()-t.getUTCMonth()+12*(n.getUTCFullYear()-t.getUTCFullYear())},function(t){return t.getUTCMonth()}),dp=hp.range,pp=dd(function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)},function(t,n){t.setUTCFullYear(t.getUTCFullYear()+n)},function(t,n){return n.getUTCFullYear()-t.getUTCFullYear()},function(t){return t.getUTCFullYear()});pp.every=function(t){return isFinite(t=Math.floor(t))&&t>0?dd(function(n){n.setUTCFullYear(Math.floor(n.getUTCFullYear()/t)*t),n.setUTCMonth(0,1),n.setUTCHours(0,0,0,0)},function(n,e){n.setUTCFullYear(n.getUTCFullYear()+e*t)}):null};var vp=pp.range;function gp(t){if(0<=t.y&&t.y<100){var n=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return n.setFullYear(t.y),n}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function yp(t){if(0<=t.y&&t.y<100){var n=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return n.setUTCFullYear(t.y),n}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function _p(t,n,e){return{y:t,m:n,d:e,H:0,M:0,S:0,L:0}}function bp(t){var n=t.dateTime,e=t.date,r=t.time,i=t.periods,o=t.days,a=t.shortDays,u=t.months,c=t.shortMonths,f=Sp(i),s=kp(i),l=Sp(o),h=kp(o),d=Sp(a),p=kp(a),v=Sp(u),g=kp(u),y=Sp(c),_=kp(c),b={a:function(t){return a[t.getDay()]},A:function(t){return o[t.getDay()]},b:function(t){return c[t.getMonth()]},B:function(t){return u[t.getMonth()]},c:null,d:Wp,e:Wp,f:tv,H:Zp,I:Qp,j:Kp,L:Jp,m:nv,M:ev,p:function(t){return i[+(t.getHours()>=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:Cv,s:Pv,S:rv,u:iv,U:ov,V:av,w:uv,W:cv,x:null,X:null,y:fv,Y:sv,Z:lv,"%":Ev},m={a:function(t){return a[t.getUTCDay()]},A:function(t){return o[t.getUTCDay()]},b:function(t){return c[t.getUTCMonth()]},B:function(t){return u[t.getUTCMonth()]},c:null,d:hv,e:hv,f:yv,H:dv,I:pv,j:vv,L:gv,m:_v,M:bv,p:function(t){return i[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:Cv,s:Pv,S:mv,u:xv,U:wv,V:Mv,w:Nv,W:Tv,x:null,X:null,y:Av,Y:Sv,Z:kv,"%":Ev},x={a:function(t,n,e){var r=d.exec(n.slice(e));return r?(t.w=p[r[0].toLowerCase()],e+r[0].length):-1},A:function(t,n,e){var r=l.exec(n.slice(e));return r?(t.w=h[r[0].toLowerCase()],e+r[0].length):-1},b:function(t,n,e){var r=y.exec(n.slice(e));return r?(t.m=_[r[0].toLowerCase()],e+r[0].length):-1},B:function(t,n,e){var r=v.exec(n.slice(e));return r?(t.m=g[r[0].toLowerCase()],e+r[0].length):-1},c:function(t,e,r){return N(t,n,e,r)},d:Bp,e:Bp,f:Xp,H:Yp,I:Yp,j:Fp,L:jp,m:Op,M:Ip,p:function(t,n,e){var r=f.exec(n.slice(e));return r?(t.p=s[r[0].toLowerCase()],e+r[0].length):-1},q:Up,Q:Gp,s:$p,S:Hp,u:Cp,U:Pp,V:zp,w:Ep,W:Rp,x:function(t,n,r){return N(t,e,n,r)},X:function(t,n,e){return N(t,r,n,e)},y:qp,Y:Dp,Z:Lp,"%":Vp};function w(t,n){return function(e){var r,i,o,a=[],u=-1,c=0,f=t.length;for(e instanceof Date||(e=new Date(+e));++u53)return null;"w"in o||(o.w=1),"Z"in o?(i=(r=yp(_p(o.y,0,1))).getUTCDay(),r=i>4||0===i?Jd.ceil(r):Jd(r),r=Wd.offset(r,7*(o.V-1)),o.y=r.getUTCFullYear(),o.m=r.getUTCMonth(),o.d=r.getUTCDate()+(o.w+6)%7):(i=(r=gp(_p(o.y,0,1))).getDay(),r=i>4||0===i?kd.ceil(r):kd(r),r=Nd.offset(r,7*(o.V-1)),o.y=r.getFullYear(),o.m=r.getMonth(),o.d=r.getDate()+(o.w+6)%7)}else("W"in o||"U"in o)&&("w"in o||(o.w="u"in o?o.u%7:"W"in o?1:0),i="Z"in o?yp(_p(o.y,0,1)).getUTCDay():gp(_p(o.y,0,1)).getDay(),o.m=0,o.d="W"in o?(o.w+6)%7+7*o.W-(i+5)%7:o.w+7*o.U-(i+6)%7);return"Z"in o?(o.H+=o.Z/100|0,o.M+=o.Z%100,yp(o)):gp(o)}}function N(t,n,e,r){for(var i,o,a=0,u=n.length,c=e.length;a=c)return-1;if(37===(i=n.charCodeAt(a++))){if(i=n.charAt(a++),!(o=x[i in xp?n.charAt(a++):i])||(r=o(t,e,r))<0)return-1}else if(i!=e.charCodeAt(r++))return-1}return r}return b.x=w(e,b),b.X=w(r,b),b.c=w(n,b),m.x=w(e,m),m.X=w(r,m),m.c=w(n,m),{format:function(t){var n=w(t+="",b);return n.toString=function(){return t},n},parse:function(t){var n=M(t+="",!1);return n.toString=function(){return t},n},utcFormat:function(t){var n=w(t+="",m);return n.toString=function(){return t},n},utcParse:function(t){var n=M(t+="",!0);return n.toString=function(){return t},n}}}var mp,xp={"-":"",_:" ",0:"0"},wp=/^\s*\d+/,Mp=/^%/,Np=/[\\^$*+?|[\]().{}]/g;function Tp(t,n,e){var r=t<0?"-":"",i=(r?-t:t)+"",o=i.length;return r+(o68?1900:2e3),e+r[0].length):-1}function Lp(t,n,e){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(n.slice(e,e+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),e+r[0].length):-1}function Up(t,n,e){var r=wp.exec(n.slice(e,e+1));return r?(t.q=3*r[0]-3,e+r[0].length):-1}function Op(t,n,e){var r=wp.exec(n.slice(e,e+2));return r?(t.m=r[0]-1,e+r[0].length):-1}function Bp(t,n,e){var r=wp.exec(n.slice(e,e+2));return r?(t.d=+r[0],e+r[0].length):-1}function Fp(t,n,e){var r=wp.exec(n.slice(e,e+3));return r?(t.m=0,t.d=+r[0],e+r[0].length):-1}function Yp(t,n,e){var r=wp.exec(n.slice(e,e+2));return r?(t.H=+r[0],e+r[0].length):-1}function Ip(t,n,e){var r=wp.exec(n.slice(e,e+2));return r?(t.M=+r[0],e+r[0].length):-1}function Hp(t,n,e){var r=wp.exec(n.slice(e,e+2));return r?(t.S=+r[0],e+r[0].length):-1}function jp(t,n,e){var r=wp.exec(n.slice(e,e+3));return r?(t.L=+r[0],e+r[0].length):-1}function Xp(t,n,e){var r=wp.exec(n.slice(e,e+6));return r?(t.L=Math.floor(r[0]/1e3),e+r[0].length):-1}function Vp(t,n,e){var r=Mp.exec(n.slice(e,e+1));return r?e+r[0].length:-1}function Gp(t,n,e){var r=wp.exec(n.slice(e));return r?(t.Q=+r[0],e+r[0].length):-1}function $p(t,n,e){var r=wp.exec(n.slice(e));return r?(t.s=+r[0],e+r[0].length):-1}function Wp(t,n){return Tp(t.getDate(),n,2)}function Zp(t,n){return Tp(t.getHours(),n,2)}function Qp(t,n){return Tp(t.getHours()%12||12,n,2)}function Kp(t,n){return Tp(1+Nd.count(Hd(t),t),n,3)}function Jp(t,n){return Tp(t.getMilliseconds(),n,3)}function tv(t,n){return Jp(t,n)+"000"}function nv(t,n){return Tp(t.getMonth()+1,n,2)}function ev(t,n){return Tp(t.getMinutes(),n,2)}function rv(t,n){return Tp(t.getSeconds(),n,2)}function iv(t){var n=t.getDay();return 0===n?7:n}function ov(t,n){return Tp(Sd.count(Hd(t)-1,t),n,2)}function av(t,n){var e=t.getDay();return t=e>=4||0===e?Pd(t):Pd.ceil(t),Tp(Pd.count(Hd(t),t)+(4===Hd(t).getDay()),n,2)}function uv(t){return t.getDay()}function cv(t,n){return Tp(kd.count(Hd(t)-1,t),n,2)}function fv(t,n){return Tp(t.getFullYear()%100,n,2)}function sv(t,n){return Tp(t.getFullYear()%1e4,n,4)}function lv(t){var n=t.getTimezoneOffset();return(n>0?"-":(n*=-1,"+"))+Tp(n/60|0,"0",2)+Tp(n%60,"0",2)}function hv(t,n){return Tp(t.getUTCDate(),n,2)}function dv(t,n){return Tp(t.getUTCHours(),n,2)}function pv(t,n){return Tp(t.getUTCHours()%12||12,n,2)}function vv(t,n){return Tp(1+Wd.count(pp(t),t),n,3)}function gv(t,n){return Tp(t.getUTCMilliseconds(),n,3)}function yv(t,n){return gv(t,n)+"000"}function _v(t,n){return Tp(t.getUTCMonth()+1,n,2)}function bv(t,n){return Tp(t.getUTCMinutes(),n,2)}function mv(t,n){return Tp(t.getUTCSeconds(),n,2)}function xv(t){var n=t.getUTCDay();return 0===n?7:n}function wv(t,n){return Tp(Kd.count(pp(t)-1,t),n,2)}function Mv(t,n){var e=t.getUTCDay();return t=e>=4||0===e?ep(t):ep.ceil(t),Tp(ep.count(pp(t),t)+(4===pp(t).getUTCDay()),n,2)}function Nv(t){return t.getUTCDay()}function Tv(t,n){return Tp(Jd.count(pp(t)-1,t),n,2)}function Av(t,n){return Tp(t.getUTCFullYear()%100,n,2)}function Sv(t,n){return Tp(t.getUTCFullYear()%1e4,n,4)}function kv(){return"+0000"}function Ev(){return"%"}function Cv(t){return+t}function Pv(t){return Math.floor(+t/1e3)}function zv(n){return mp=bp(n),t.timeFormat=mp.format,t.timeParse=mp.parse,t.utcFormat=mp.utcFormat,t.utcParse=mp.utcParse,mp}zv({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});var Rv=Date.prototype.toISOString?function(t){return t.toISOString()}:t.utcFormat("%Y-%m-%dT%H:%M:%S.%LZ");var Dv=+new Date("2000-01-01T00:00:00.000Z")?function(t){var n=new Date(t);return isNaN(n)?null:n}:t.utcParse("%Y-%m-%dT%H:%M:%S.%LZ"),qv=1e3,Lv=60*qv,Uv=60*Lv,Ov=24*Uv,Bv=7*Ov,Fv=30*Ov,Yv=365*Ov;function Iv(t){return new Date(t)}function Hv(t){return t instanceof Date?+t:+new Date(+t)}function jv(t,n,r,i,o,a,u,c,f){var s=Vh(Bh,Bh),l=s.invert,h=s.domain,d=f(".%L"),p=f(":%S"),v=f("%I:%M"),g=f("%I %p"),y=f("%a %d"),_=f("%b %d"),b=f("%B"),m=f("%Y"),x=[[u,1,qv],[u,5,5*qv],[u,15,15*qv],[u,30,30*qv],[a,1,Lv],[a,5,5*Lv],[a,15,15*Lv],[a,30,30*Lv],[o,1,Uv],[o,3,3*Uv],[o,6,6*Uv],[o,12,12*Uv],[i,1,Ov],[i,2,2*Ov],[r,1,Bv],[n,1,Fv],[n,3,3*Fv],[t,1,Yv]];function M(e){return(u(e)=1?Cy:t<=-1?-Cy:Math.asin(t)}function Ry(t){return t.innerRadius}function Dy(t){return t.outerRadius}function qy(t){return t.startAngle}function Ly(t){return t.endAngle}function Uy(t){return t&&t.padAngle}function Oy(t,n,e,r,i,o,a){var u=t-e,c=n-r,f=(a?o:-o)/Sy(u*u+c*c),s=f*c,l=-f*u,h=t+s,d=n+l,p=e+s,v=r+l,g=(h+p)/2,y=(d+v)/2,_=p-h,b=v-d,m=_*_+b*b,x=i-o,w=h*v-p*d,M=(b<0?-1:1)*Sy(Ny(0,x*x*m-w*w)),N=(w*b-_*M)/m,T=(-w*_-b*M)/m,A=(w*b+_*M)/m,S=(-w*_+b*M)/m,k=N-g,E=T-y,C=A-g,P=S-y;return k*k+E*E>C*C+P*P&&(N=A,T=S),{cx:N,cy:T,x01:-s,y01:-l,x11:N*(i/x-1),y11:T*(i/x-1)}}function By(t){this._context=t}function Fy(t){return new By(t)}function Yy(t){return t[0]}function Iy(t){return t[1]}function Hy(){var t=Yy,n=Iy,e=my(!0),r=null,i=Fy,o=null;function a(a){var u,c,f,s=a.length,l=!1;for(null==r&&(o=i(f=no())),u=0;u<=s;++u)!(u=s;--l)u.point(g[l],y[l]);u.lineEnd(),u.areaEnd()}v&&(g[f]=+t(h,f,c),y[f]=+e(h,f,c),u.point(n?+n(h,f,c):g[f],r?+r(h,f,c):y[f]))}if(d)return u=null,d+""||null}function f(){return Hy().defined(i).curve(a).context(o)}return c.x=function(e){return arguments.length?(t="function"==typeof e?e:my(+e),n=null,c):t},c.x0=function(n){return arguments.length?(t="function"==typeof n?n:my(+n),c):t},c.x1=function(t){return arguments.length?(n=null==t?null:"function"==typeof t?t:my(+t),c):n},c.y=function(t){return arguments.length?(e="function"==typeof t?t:my(+t),r=null,c):e},c.y0=function(t){return arguments.length?(e="function"==typeof t?t:my(+t),c):e},c.y1=function(t){return arguments.length?(r=null==t?null:"function"==typeof t?t:my(+t),c):r},c.lineX0=c.lineY0=function(){return f().x(t).y(e)},c.lineY1=function(){return f().x(t).y(r)},c.lineX1=function(){return f().x(n).y(e)},c.defined=function(t){return arguments.length?(i="function"==typeof t?t:my(!!t),c):i},c.curve=function(t){return arguments.length?(a=t,null!=o&&(u=a(o)),c):a},c.context=function(t){return arguments.length?(null==t?o=u=null:u=a(o=t),c):o},c}function Xy(t,n){return nt?1:n>=t?0:NaN}function Vy(t){return t}By.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:this._context.lineTo(t,n)}}};var Gy=Wy(Fy);function $y(t){this._curve=t}function Wy(t){function n(n){return new $y(t(n))}return n._curve=t,n}function Zy(t){var n=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?n(Wy(t)):n()._curve},t}function Qy(){return Zy(Hy().curve(Gy))}function Ky(){var t=jy().curve(Gy),n=t.curve,e=t.lineX0,r=t.lineX1,i=t.lineY0,o=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return Zy(e())},delete t.lineX0,t.lineEndAngle=function(){return Zy(r())},delete t.lineX1,t.lineInnerRadius=function(){return Zy(i())},delete t.lineY0,t.lineOuterRadius=function(){return Zy(o())},delete t.lineY1,t.curve=function(t){return arguments.length?n(Wy(t)):n()._curve},t}function Jy(t,n){return[(n=+n)*Math.cos(t-=Math.PI/2),n*Math.sin(t)]}$y.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,n){this._curve.point(n*Math.sin(t),n*-Math.cos(t))}};var t_=Array.prototype.slice;function n_(t){return t.source}function e_(t){return t.target}function r_(t){var n=n_,e=e_,r=Yy,i=Iy,o=null;function a(){var a,u=t_.call(arguments),c=n.apply(this,u),f=e.apply(this,u);if(o||(o=a=no()),t(o,+r.apply(this,(u[0]=c,u)),+i.apply(this,u),+r.apply(this,(u[0]=f,u)),+i.apply(this,u)),a)return o=null,a+""||null}return a.source=function(t){return arguments.length?(n=t,a):n},a.target=function(t){return arguments.length?(e=t,a):e},a.x=function(t){return arguments.length?(r="function"==typeof t?t:my(+t),a):r},a.y=function(t){return arguments.length?(i="function"==typeof t?t:my(+t),a):i},a.context=function(t){return arguments.length?(o=null==t?null:t,a):o},a}function i_(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n=(n+r)/2,e,n,i,r,i)}function o_(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n,e=(e+i)/2,r,e,r,i)}function a_(t,n,e,r,i){var o=Jy(n,e),a=Jy(n,e=(e+i)/2),u=Jy(r,e),c=Jy(r,i);t.moveTo(o[0],o[1]),t.bezierCurveTo(a[0],a[1],u[0],u[1],c[0],c[1])}var u_={draw:function(t,n){var e=Math.sqrt(n/Ey);t.moveTo(e,0),t.arc(0,0,e,0,Py)}},c_={draw:function(t,n){var e=Math.sqrt(n/5)/2;t.moveTo(-3*e,-e),t.lineTo(-e,-e),t.lineTo(-e,-3*e),t.lineTo(e,-3*e),t.lineTo(e,-e),t.lineTo(3*e,-e),t.lineTo(3*e,e),t.lineTo(e,e),t.lineTo(e,3*e),t.lineTo(-e,3*e),t.lineTo(-e,e),t.lineTo(-3*e,e),t.closePath()}},f_=Math.sqrt(1/3),s_=2*f_,l_={draw:function(t,n){var e=Math.sqrt(n/s_),r=e*f_;t.moveTo(0,-e),t.lineTo(r,0),t.lineTo(0,e),t.lineTo(-r,0),t.closePath()}},h_=Math.sin(Ey/10)/Math.sin(7*Ey/10),d_=Math.sin(Py/10)*h_,p_=-Math.cos(Py/10)*h_,v_={draw:function(t,n){var e=Math.sqrt(.8908130915292852*n),r=d_*e,i=p_*e;t.moveTo(0,-e),t.lineTo(r,i);for(var o=1;o<5;++o){var a=Py*o/5,u=Math.cos(a),c=Math.sin(a);t.lineTo(c*e,-u*e),t.lineTo(u*r-c*i,c*r+u*i)}t.closePath()}},g_={draw:function(t,n){var e=Math.sqrt(n),r=-e/2;t.rect(r,r,e,e)}},y_=Math.sqrt(3),__={draw:function(t,n){var e=-Math.sqrt(n/(3*y_));t.moveTo(0,2*e),t.lineTo(-y_*e,-e),t.lineTo(y_*e,-e),t.closePath()}},b_=Math.sqrt(3)/2,m_=1/Math.sqrt(12),x_=3*(m_/2+1),w_={draw:function(t,n){var e=Math.sqrt(n/x_),r=e/2,i=e*m_,o=r,a=e*m_+e,u=-o,c=a;t.moveTo(r,i),t.lineTo(o,a),t.lineTo(u,c),t.lineTo(-.5*r-b_*i,b_*r+-.5*i),t.lineTo(-.5*o-b_*a,b_*o+-.5*a),t.lineTo(-.5*u-b_*c,b_*u+-.5*c),t.lineTo(-.5*r+b_*i,-.5*i-b_*r),t.lineTo(-.5*o+b_*a,-.5*a-b_*o),t.lineTo(-.5*u+b_*c,-.5*c-b_*u),t.closePath()}},M_=[u_,c_,l_,g_,v_,__,w_];function N_(){}function T_(t,n,e){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+n)/6,(t._y0+4*t._y1+e)/6)}function A_(t){this._context=t}function S_(t){this._context=t}function k_(t){this._context=t}function E_(t,n){this._basis=new A_(t),this._beta=n}A_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:T_(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:T_(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}},S_.prototype={areaStart:N_,areaEnd:N_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x2=t,this._y2=n;break;case 1:this._point=2,this._x3=t,this._y3=n;break;case 2:this._point=3,this._x4=t,this._y4=n,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+n)/6);break;default:T_(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}},k_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var e=(this._x0+4*this._x1+t)/6,r=(this._y0+4*this._y1+n)/6;this._line?this._context.lineTo(e,r):this._context.moveTo(e,r);break;case 3:this._point=4;default:T_(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}},E_.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,n=this._y,e=t.length-1;if(e>0)for(var r,i=t[0],o=n[0],a=t[e]-i,u=n[e]-o,c=-1;++c<=e;)r=c/e,this._basis.point(this._beta*t[c]+(1-this._beta)*(i+r*a),this._beta*n[c]+(1-this._beta)*(o+r*u));this._x=this._y=null,this._basis.lineEnd()},point:function(t,n){this._x.push(+t),this._y.push(+n)}};var C_=function t(n){function e(t){return 1===n?new A_(t):new E_(t,n)}return e.beta=function(n){return t(+n)},e}(.85);function P_(t,n,e){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-n),t._y2+t._k*(t._y1-e),t._x2,t._y2)}function z_(t,n){this._context=t,this._k=(1-n)/6}z_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:P_(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2,this._x1=t,this._y1=n;break;case 2:this._point=3;default:P_(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var R_=function t(n){function e(t){return new z_(t,n)}return e.tension=function(n){return t(+n)},e}(0);function D_(t,n){this._context=t,this._k=(1-n)/6}D_.prototype={areaStart:N_,areaEnd:N_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:P_(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var q_=function t(n){function e(t){return new D_(t,n)}return e.tension=function(n){return t(+n)},e}(0);function L_(t,n){this._context=t,this._k=(1-n)/6}L_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:P_(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var U_=function t(n){function e(t){return new L_(t,n)}return e.tension=function(n){return t(+n)},e}(0);function O_(t,n,e){var r=t._x1,i=t._y1,o=t._x2,a=t._y2;if(t._l01_a>ky){var u=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,c=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*u-t._x0*t._l12_2a+t._x2*t._l01_2a)/c,i=(i*u-t._y0*t._l12_2a+t._y2*t._l01_2a)/c}if(t._l23_a>ky){var f=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,s=3*t._l23_a*(t._l23_a+t._l12_a);o=(o*f+t._x1*t._l23_2a-n*t._l12_2a)/s,a=(a*f+t._y1*t._l23_2a-e*t._l12_2a)/s}t._context.bezierCurveTo(r,i,o,a,t._x2,t._y2)}function B_(t,n){this._context=t,this._alpha=n}B_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3;default:O_(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var F_=function t(n){function e(t){return n?new B_(t,n):new z_(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);function Y_(t,n){this._context=t,this._alpha=n}Y_.prototype={areaStart:N_,areaEnd:N_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:O_(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var I_=function t(n){function e(t){return n?new Y_(t,n):new D_(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);function H_(t,n){this._context=t,this._alpha=n}H_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:O_(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var j_=function t(n){function e(t){return n?new H_(t,n):new L_(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);function X_(t){this._context=t}function V_(t){return t<0?-1:1}function G_(t,n,e){var r=t._x1-t._x0,i=n-t._x1,o=(t._y1-t._y0)/(r||i<0&&-0),a=(e-t._y1)/(i||r<0&&-0),u=(o*i+a*r)/(r+i);return(V_(o)+V_(a))*Math.min(Math.abs(o),Math.abs(a),.5*Math.abs(u))||0}function $_(t,n){var e=t._x1-t._x0;return e?(3*(t._y1-t._y0)/e-n)/2:n}function W_(t,n,e){var r=t._x0,i=t._y0,o=t._x1,a=t._y1,u=(o-r)/3;t._context.bezierCurveTo(r+u,i+u*n,o-u,a-u*e,o,a)}function Z_(t){this._context=t}function Q_(t){this._context=new K_(t)}function K_(t){this._context=t}function J_(t){this._context=t}function tb(t){var n,e,r=t.length-1,i=new Array(r),o=new Array(r),a=new Array(r);for(i[0]=0,o[0]=2,a[0]=t[0]+2*t[1],n=1;n=0;--n)i[n]=(a[n]-i[n+1])/o[n];for(o[r-1]=(t[r]+i[r-1])/2,n=0;n1)for(var e,r,i,o=1,a=t[n[0]],u=a.length;o=0;)e[n]=n;return e}function ib(t,n){return t[n]}function ob(t){var n=t.map(ab);return rb(t).sort(function(t,e){return n[t]-n[e]})}function ab(t){for(var n,e=-1,r=0,i=t.length,o=-1/0;++eo&&(o=n,r=e);return r}function ub(t){var n=t.map(cb);return rb(t).sort(function(t,e){return n[t]-n[e]})}function cb(t){for(var n,e=0,r=-1,i=t.length;++r0)){if(o/=h,h<0){if(o0){if(o>l)return;o>s&&(s=o)}if(o=r-c,h||!(o<0)){if(o/=h,h<0){if(o>l)return;o>s&&(s=o)}else if(h>0){if(o0)){if(o/=d,d<0){if(o0){if(o>l)return;o>s&&(s=o)}if(o=i-f,d||!(o<0)){if(o/=d,d<0){if(o>l)return;o>s&&(s=o)}else if(d>0){if(o0||l<1)||(s>0&&(t[0]=[c+s*h,f+s*d]),l<1&&(t[1]=[c+l*h,f+l*d]),!0)}}}}}function xb(t,n,e,r,i){var o=t[1];if(o)return!0;var a,u,c=t[0],f=t.left,s=t.right,l=f[0],h=f[1],d=s[0],p=s[1],v=(l+d)/2,g=(h+p)/2;if(p===h){if(v=r)return;if(l>d){if(c){if(c[1]>=i)return}else c=[v,e];o=[v,i]}else{if(c){if(c[1]1)if(l>d){if(c){if(c[1]>=i)return}else c=[(e-u)/a,e];o=[(i-u)/a,i]}else{if(c){if(c[1]=r)return}else c=[n,a*n+u];o=[r,a*r+u]}else{if(c){if(c[0]=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,n),this._context.lineTo(t,n);else{var e=this._x*(1-this._t)+t*this._t;this._context.lineTo(e,this._y),this._context.lineTo(e,n)}}this._x=t,this._y=n}},hb.prototype={constructor:hb,insert:function(t,n){var e,r,i;if(t){if(n.P=t,n.N=t.N,t.N&&(t.N.P=n),t.N=n,t.R){for(t=t.R;t.L;)t=t.L;t.L=n}else t.R=n;e=t}else this._?(t=gb(this._),n.P=null,n.N=t,t.P=t.L=n,e=t):(n.P=n.N=null,this._=n,e=null);for(n.L=n.R=null,n.U=e,n.C=!0,t=n;e&&e.C;)e===(r=e.U).L?(i=r.R)&&i.C?(e.C=i.C=!1,r.C=!0,t=r):(t===e.R&&(pb(this,e),e=(t=e).U),e.C=!1,r.C=!0,vb(this,r)):(i=r.L)&&i.C?(e.C=i.C=!1,r.C=!0,t=r):(t===e.L&&(vb(this,e),e=(t=e).U),e.C=!1,r.C=!0,pb(this,r)),e=t.U;this._.C=!1},remove:function(t){t.N&&(t.N.P=t.P),t.P&&(t.P.N=t.N),t.N=t.P=null;var n,e,r,i=t.U,o=t.L,a=t.R;if(e=o?a?gb(a):o:a,i?i.L===t?i.L=e:i.R=e:this._=e,o&&a?(r=e.C,e.C=t.C,e.L=o,o.U=e,e!==a?(i=e.U,e.U=t.U,t=e.R,i.L=t,e.R=a,a.U=e):(e.U=i,i=e,t=e.R)):(r=t.C,t=e),t&&(t.U=i),!r)if(t&&t.C)t.C=!1;else{do{if(t===this._)break;if(t===i.L){if((n=i.R).C&&(n.C=!1,i.C=!0,pb(this,i),n=i.R),n.L&&n.L.C||n.R&&n.R.C){n.R&&n.R.C||(n.L.C=!1,n.C=!0,vb(this,n),n=i.R),n.C=i.C,i.C=n.R.C=!1,pb(this,i),t=this._;break}}else if((n=i.L).C&&(n.C=!1,i.C=!0,vb(this,i),n=i.L),n.L&&n.L.C||n.R&&n.R.C){n.L&&n.L.C||(n.R.C=!1,n.C=!0,pb(this,n),n=i.L),n.C=i.C,i.C=n.L.C=!1,vb(this,i),t=this._;break}n.C=!0,t=i,i=i.U}while(!t.C);t&&(t.C=!1)}}};var Tb,Ab=[];function Sb(){db(this),this.x=this.y=this.arc=this.site=this.cy=null}function kb(t){var n=t.P,e=t.N;if(n&&e){var r=n.site,i=t.site,o=e.site;if(r!==o){var a=i[0],u=i[1],c=r[0]-a,f=r[1]-u,s=o[0]-a,l=o[1]-u,h=2*(c*l-f*s);if(!(h>=-Hb)){var d=c*c+f*f,p=s*s+l*l,v=(l*d-f*p)/h,g=(c*p-s*d)/h,y=Ab.pop()||new Sb;y.arc=t,y.site=i,y.x=v+a,y.y=(y.cy=g+u)+Math.sqrt(v*v+g*g),t.circle=y;for(var _=null,b=Fb._;b;)if(y.yIb)u=u.L;else{if(!((i=o-Ub(u,a))>Ib)){r>-Ib?(n=u.P,e=u):i>-Ib?(n=u,e=u.N):n=e=u;break}if(!u.R){n=u;break}u=u.R}!function(t){Bb[t.index]={site:t,halfedges:[]}}(t);var c=zb(t);if(Ob.insert(n,c),n||e){if(n===e)return Eb(n),e=zb(n.site),Ob.insert(c,e),c.edge=e.edge=yb(n.site,c.site),kb(n),void kb(e);if(e){Eb(n),Eb(e);var f=n.site,s=f[0],l=f[1],h=t[0]-s,d=t[1]-l,p=e.site,v=p[0]-s,g=p[1]-l,y=2*(h*g-d*v),_=h*h+d*d,b=v*v+g*g,m=[(g*_-d*b)/y+s,(h*b-v*_)/y+l];bb(e.edge,f,p,m),c.edge=yb(f,t,null,m),e.edge=yb(t,p,null,m),kb(n),kb(e)}else c.edge=yb(n.site,c.site)}}function Lb(t,n){var e=t.site,r=e[0],i=e[1],o=i-n;if(!o)return r;var a=t.P;if(!a)return-1/0;var u=(e=a.site)[0],c=e[1],f=c-n;if(!f)return u;var s=u-r,l=1/o-1/f,h=s/f;return l?(-h+Math.sqrt(h*h-2*l*(s*s/(-2*f)-c+f/2+i-o/2)))/l+r:(r+u)/2}function Ub(t,n){var e=t.N;if(e)return Lb(e,n);var r=t.site;return r[1]===n?r[0]:1/0}var Ob,Bb,Fb,Yb,Ib=1e-6,Hb=1e-12;function jb(t,n,e){return(t[0]-e[0])*(n[1]-t[1])-(t[0]-n[0])*(e[1]-t[1])}function Xb(t,n){return n[1]-t[1]||n[0]-t[0]}function Vb(t,n){var e,r,i,o=t.sort(Xb).pop();for(Yb=[],Bb=new Array(t.length),Ob=new hb,Fb=new hb;;)if(i=Tb,o&&(!i||o[1]Ib||Math.abs(i[0][1]-i[1][1])>Ib)||delete Yb[o]}(a,u,c,f),function(t,n,e,r){var i,o,a,u,c,f,s,l,h,d,p,v,g=Bb.length,y=!0;for(i=0;iIb||Math.abs(v-h)>Ib)&&(c.splice(u,0,Yb.push(_b(a,d,Math.abs(p-t)Ib?[t,Math.abs(l-t)Ib?[Math.abs(h-r)Ib?[e,Math.abs(l-e)Ib?[Math.abs(h-n)=u)return null;var c=t-i.site[0],f=n-i.site[1],s=c*c+f*f;do{i=o.cells[r=a],a=null,i.halfedges.forEach(function(e){var r=o.edges[e],u=r.left;if(u!==i.site&&u||(u=r.right)){var c=t-u[0],f=n-u[1],l=c*c+f*f;lr?(r+i)/2:Math.min(0,r)||Math.max(0,i),a>o?(o+a)/2:Math.min(0,o)||Math.max(0,a))}Qb.prototype=Wb.prototype,t.FormatSpecifier=Ba,t.active=function(t,n){var e,r,i=t.__transition;if(i)for(r in n=null==n?null:n+"",i)if((e=i[r]).state>xr&&e.name===n)return new Ur([[t]],yi,n,+r);return null},t.arc=function(){var t=Ry,n=Dy,e=my(0),r=null,i=qy,o=Ly,a=Uy,u=null;function c(){var c,f,s=+t.apply(this,arguments),l=+n.apply(this,arguments),h=i.apply(this,arguments)-Cy,d=o.apply(this,arguments)-Cy,p=xy(d-h),v=d>h;if(u||(u=c=no()),lky)if(p>Py-ky)u.moveTo(l*My(h),l*Ay(h)),u.arc(0,0,l,h,d,!v),s>ky&&(u.moveTo(s*My(d),s*Ay(d)),u.arc(0,0,s,d,h,v));else{var g,y,_=h,b=d,m=h,x=d,w=p,M=p,N=a.apply(this,arguments)/2,T=N>ky&&(r?+r.apply(this,arguments):Sy(s*s+l*l)),A=Ty(xy(l-s)/2,+e.apply(this,arguments)),S=A,k=A;if(T>ky){var E=zy(T/s*Ay(N)),C=zy(T/l*Ay(N));(w-=2*E)>ky?(m+=E*=v?1:-1,x-=E):(w=0,m=x=(h+d)/2),(M-=2*C)>ky?(_+=C*=v?1:-1,b-=C):(M=0,_=b=(h+d)/2)}var P=l*My(_),z=l*Ay(_),R=s*My(x),D=s*Ay(x);if(A>ky){var q,L=l*My(b),U=l*Ay(b),O=s*My(m),B=s*Ay(m);if(p1?0:t<-1?Ey:Math.acos(t)}((F*I+Y*H)/(Sy(F*F+Y*Y)*Sy(I*I+H*H)))/2),X=Sy(q[0]*q[0]+q[1]*q[1]);S=Ty(A,(s-X)/(j-1)),k=Ty(A,(l-X)/(j+1))}}M>ky?k>ky?(g=Oy(O,B,P,z,l,k,v),y=Oy(L,U,R,D,l,k,v),u.moveTo(g.cx+g.x01,g.cy+g.y01),kky&&w>ky?S>ky?(g=Oy(R,D,L,U,s,-S,v),y=Oy(P,z,O,B,s,-S,v),u.lineTo(g.cx+g.x01,g.cy+g.y01),S>a,f=i+2*u>>a,s=bo(20);function l(r){var i=new Float32Array(c*f),l=new Float32Array(c*f);r.forEach(function(r,o,s){var l=+t(r,o,s)+u>>a,h=+n(r,o,s)+u>>a,d=+e(r,o,s);l>=0&&l=0&&h>a),So({width:c,height:f,data:l},{width:c,height:f,data:i},o>>a),Ao({width:c,height:f,data:i},{width:c,height:f,data:l},o>>a),So({width:c,height:f,data:l},{width:c,height:f,data:i},o>>a),Ao({width:c,height:f,data:i},{width:c,height:f,data:l},o>>a),So({width:c,height:f,data:l},{width:c,height:f,data:i},o>>a);var d=s(i);if(!Array.isArray(d)){var p=T(i);d=w(0,p,d),(d=g(0,Math.floor(p/d)*d,d)).shift()}return To().thresholds(d).size([c,f])(i).map(h)}function h(t){return t.value*=Math.pow(2,-2*a),t.coordinates.forEach(d),t}function d(t){t.forEach(p)}function p(t){t.forEach(v)}function v(t){t[0]=t[0]*Math.pow(2,a)-u,t[1]=t[1]*Math.pow(2,a)-u}function y(){return c=r+2*(u=3*o)>>a,f=i+2*u>>a,l}return l.x=function(n){return arguments.length?(t="function"==typeof n?n:bo(+n),l):t},l.y=function(t){return arguments.length?(n="function"==typeof t?t:bo(+t),l):n},l.weight=function(t){return arguments.length?(e="function"==typeof t?t:bo(+t),l):e},l.size=function(t){if(!arguments.length)return[r,i];var n=Math.ceil(t[0]),e=Math.ceil(t[1]);if(!(n>=0||n>=0))throw new Error("invalid size");return r=n,i=e,y()},l.cellSize=function(t){if(!arguments.length)return 1<=1))throw new Error("invalid cell size");return a=Math.floor(Math.log(t)/Math.LN2),y()},l.thresholds=function(t){return arguments.length?(s="function"==typeof t?t:Array.isArray(t)?bo(yo.call(t)):bo(t),l):s},l.bandwidth=function(t){if(!arguments.length)return Math.sqrt(o*(o+1));if(!((t=+t)>=0))throw new Error("invalid bandwidth");return o=Math.round((Math.sqrt(4*t*t+1)-1)/2),y()},l},t.contours=To,t.create=function(t){return Rt(Z(t).call(document.documentElement))},t.creator=Z,t.cross=function(t,n,e){var r,i,o,u,c=t.length,f=n.length,s=new Array(c*f);for(null==e&&(e=a),r=o=0;rt?1:n>=t?0:NaN},t.deviation=f,t.dispatch=I,t.drag=function(){var n,e,r,i,o=Gt,a=$t,u=Wt,c=Zt,f={},s=I("start","drag","end"),l=0,h=0;function d(t){t.on("mousedown.drag",p).filter(c).on("touchstart.drag",y).on("touchmove.drag",_).on("touchend.drag touchcancel.drag",b).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function p(){if(!i&&o.apply(this,arguments)){var u=m("mouse",a.apply(this,arguments),Bt,this,arguments);u&&(Rt(t.event.view).on("mousemove.drag",v,!0).on("mouseup.drag",g,!0),Ht(t.event.view),Yt(),r=!1,n=t.event.clientX,e=t.event.clientY,u("start"))}}function v(){if(It(),!r){var i=t.event.clientX-n,o=t.event.clientY-e;r=i*i+o*o>h}f.mouse("drag")}function g(){Rt(t.event.view).on("mousemove.drag mouseup.drag",null),jt(t.event.view,r),It(),f.mouse("end")}function y(){if(o.apply(this,arguments)){var n,e,r=t.event.changedTouches,i=a.apply(this,arguments),u=r.length;for(n=0;nc+d||if+d||ou.index){var p=c-a.x-a.vx,v=f-a.y-a.vy,g=p*p+v*v;gt.r&&(t.r=t[n].r)}function u(){if(n){var r,i,o=n.length;for(e=new Array(o),r=0;r=a)){(t.data!==n||t.next)&&(0===s&&(d+=(s=ya())*s),0===l&&(d+=(l=ya())*l),d1?(null==e?u.remove(t):u.set(t,d(e)),n):u.get(t)},find:function(n,e,r){var i,o,a,u,c,f=0,s=t.length;for(null==r?r=1/0:r*=r,f=0;f1?(f.on(t,e),n):f.on(t)}}},t.forceX=function(t){var n,e,r,i=ga(.1);function o(t){for(var i,o=0,a=n.length;o=.12&&i<.234&&r>=-.425&&r<-.214?u:i>=.166&&i<.234&&r>=-.214&&r<-.115?c:a).invert(t)},s.stream=function(e){return t&&n===e?t:(r=[a.stream(n=e),u.stream(e),c.stream(e)],i=r.length,t={point:function(t,n){for(var e=-1;++ePc(r[0],r[1])&&(r[1]=i[1]),Pc(i[0],r[1])>Pc(r[0],r[1])&&(r[0]=i[0])):o.push(r=i);for(a=-1/0,n=0,r=o[e=o.length-1];n<=e;r=i,++n)i=o[n],(u=Pc(r[1],i[0]))>a&&(a=u,Zu=i[0],Ku=r[1])}return ic=oc=null,Zu===1/0||Qu===1/0?[[NaN,NaN],[NaN,NaN]]:[[Zu,Qu],[Ku,Ju]]},t.geoCentroid=function(t){ac=uc=cc=fc=sc=lc=hc=dc=pc=vc=gc=0,Cu(t,Dc);var n=pc,e=vc,r=gc,i=n*n+e*e+r*r;return i2?t[2]+90:90]):[(t=e())[0],t[1],t[2]-90]},e([0,0,90]).scale(159.155)},t.geoTransverseMercatorRaw=Ml,t.gray=function(t,n){return new Bn(t,0,0,null==n?1:n)},t.hcl=Xn,t.hierarchy=kl,t.histogram=function(){var t=v,n=s,e=M;function r(r){var o,a,u=r.length,c=new Array(u);for(o=0;ol;)h.pop(),--d;var p,v=new Array(d+1);for(o=0;o<=d;++o)(p=v[o]=[]).x0=o>0?h[o-1]:s,p.x1=o1)&&(t-=Math.floor(t));var n=Math.abs(t-.5);return ly.h=360*t-100,ly.s=1.5-1.5*n,ly.l=.8-.9*n,ly+""},t.interpolateRdBu=yg,t.interpolateRdGy=bg,t.interpolateRdPu=Yg,t.interpolateRdYlBu=xg,t.interpolateRdYlGn=Mg,t.interpolateReds=oy,t.interpolateRgb=he,t.interpolateRgbBasis=pe,t.interpolateRgbBasisClosed=ve,t.interpolateRound=Ae,t.interpolateSinebow=function(t){var n;return t=(.5-t)*Math.PI,hy.r=255*(n=Math.sin(t))*n,hy.g=255*(n=Math.sin(t+dy))*n,hy.b=255*(n=Math.sin(t+py))*n,hy+""},t.interpolateSpectral=Tg,t.interpolateString=Ne,t.interpolateTransformCss=qe,t.interpolateTransformSvg=Le,t.interpolateTurbo=function(t){return t=Math.max(0,Math.min(1,t)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+t*(1172.33-t*(10793.56-t*(33300.12-t*(38394.49-14825.05*t)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+t*(557.33+t*(1225.33-t*(3574.96-t*(1073.77+707.56*t)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+t*(3211.1-t*(15327.97-t*(27814-t*(22569.18-6838.66*t)))))))+")"},t.interpolateViridis=gy,t.interpolateWarm=fy,t.interpolateYlGn=Xg,t.interpolateYlGnBu=Hg,t.interpolateYlOrBr=Gg,t.interpolateYlOrRd=Wg,t.interpolateZoom=Ie,t.interrupt=Pr,t.interval=function(t,n,e){var r=new lr,i=n;return null==n?(r.restart(t,n,e),r):(n=+n,e=null==e?fr():+e,r.restart(function o(a){a+=i,r.restart(o,i+=n,e),t(a)},n,e),r)},t.isoFormat=Rv,t.isoParse=Dv,t.json=function(t,n){return fetch(t,n).then(la)},t.keys=function(t){var n=[];for(var e in t)n.push(e);return n},t.lab=On,t.lch=function(t,n,e,r){return 1===arguments.length?jn(t):new Vn(e,n,t,null==r?1:r)},t.line=Hy,t.lineRadial=Qy,t.linkHorizontal=function(){return r_(i_)},t.linkRadial=function(){var t=r_(a_);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t},t.linkVertical=function(){return r_(o_)},t.local=qt,t.map=co,t.matcher=nt,t.max=T,t.mean=function(t,n){var e,r=t.length,i=r,o=-1,a=0;if(null==n)for(;++o=r.length)return null!=t&&e.sort(t),null!=n?n(e):e;for(var c,f,s,l=-1,h=e.length,d=r[i++],p=co(),v=a();++lr.length)return e;var a,u=i[o-1];return null!=n&&o>=r.length?a=e.entries():(a=[],e.each(function(n,e){a.push({key:e,values:t(n,o)})})),null!=u?a.sort(function(t,n){return u(t.key,n.key)}):a}(o(t,0,lo,ho),0)},key:function(t){return r.push(t),e},sortKeys:function(t){return i[r.length-1]=t,e},sortValues:function(n){return t=n,e},rollup:function(t){return n=t,e}}},t.now=fr,t.pack=function(){var t=null,n=1,e=1,r=Wl;function i(i){return i.x=n/2,i.y=e/2,t?i.eachBefore(Kl(t)).eachAfter(Jl(r,.5)).eachBefore(th(1)):i.eachBefore(Kl(Ql)).eachAfter(Jl(Wl,1)).eachAfter(Jl(r,i.r/Math.min(n,e))).eachBefore(th(Math.min(n,e)/(2*i.r))),i}return i.radius=function(n){return arguments.length?(t=Gl(n),i):t},i.size=function(t){return arguments.length?(n=+t[0],e=+t[1],i):[n,e]},i.padding=function(t){return arguments.length?(r="function"==typeof t?t:Zl(+t),i):r},i},t.packEnclose=Dl,t.packSiblings=function(t){return Vl(t),t},t.pairs=function(t,n){null==n&&(n=a);for(var e=0,r=t.length-1,i=t[0],o=new Array(r<0?0:r);e0&&(d+=l);for(null!=n?p.sort(function(t,e){return n(v[t],v[e])}):null!=e&&p.sort(function(t,n){return e(a[t],a[n])}),u=0,f=d?(y-h*b)/d:0;u0?l*f:0)+b,v[c]={data:a[c],index:u,value:l,startAngle:g,endAngle:s,padAngle:_};return v}return a.value=function(n){return arguments.length?(t="function"==typeof n?n:my(+n),a):t},a.sortValues=function(t){return arguments.length?(n=t,e=null,a):n},a.sort=function(t){return arguments.length?(e=t,n=null,a):e},a.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:my(+t),a):r},a.endAngle=function(t){return arguments.length?(i="function"==typeof t?t:my(+t),a):i},a.padAngle=function(t){return arguments.length?(o="function"==typeof t?t:my(+t),a):o},a},t.piecewise=function(t,n){for(var e=0,r=n.length-1,i=n[0],o=new Array(r<0?0:r);eu!=f>u&&a<(c-e)*(u-r)/(f-r)+e&&(s=!s),c=e,f=r;return s},t.polygonHull=function(t){if((e=t.length)<3)return null;var n,e,r=new Array(e),i=new Array(e);for(n=0;n=0;--n)f.push(t[r[o[n]][2]]);for(n=+u;n0?a[n-1]:r[0],n=o?[a[o-1],r]:[a[n-1],a[n]]},c.unknown=function(t){return arguments.length?(n=t,c):c},c.thresholds=function(){return a.slice()},c.copy=function(){return t().domain([e,r]).range(u).unknown(n)},Eh.apply($h(c),arguments)},t.scaleSequential=function t(){var n=$h(Xv()(Bh));return n.copy=function(){return Vv(n,t())},Ch.apply(n,arguments)},t.scaleSequentialLog=function t(){var n=ed(Xv()).domain([1,10]);return n.copy=function(){return Vv(n,t()).base(n.base())},Ch.apply(n,arguments)},t.scaleSequentialPow=Gv,t.scaleSequentialQuantile=function t(){var e=[],r=Bh;function o(t){if(!isNaN(t=+t))return r((i(e,t)-1)/(e.length-1))}return o.domain=function(t){if(!arguments.length)return e.slice();e=[];for(var r,i=0,a=t.length;i0)for(var e,r,i,o,a,u,c=0,f=t[n[0]].length;c0?(r[0]=o,r[1]=o+=i):i<0?(r[1]=a,r[0]=a+=i):(r[0]=0,r[1]=i)},t.stackOffsetExpand=function(t,n){if((r=t.length)>0){for(var e,r,i,o=0,a=t[0].length;o0){for(var e,r=0,i=t[n[0]],o=i.length;r0&&(r=(e=t[n[0]]).length)>0){for(var e,r,i,o=0,a=1;a0)throw new Error("cycle");return o}return e.id=function(n){return arguments.length?(t=$l(n),e):t},e.parentId=function(t){return arguments.length?(n=$l(t),e):n},e},t.style=ft,t.sum=function(t,n){var e,r=t.length,i=-1,o=0;if(null==n)for(;++i=0;--i)u.push(e=n.children[i]=new dh(r[i],i)),e.parent=n;return(a.parent=new dh(null,0)).children=[a],a}(i);if(c.eachAfter(o),c.parent.m=-c.z,c.eachBefore(a),r)i.eachBefore(u);else{var f=i,s=i,l=i;i.eachBefore(function(t){t.xs.x&&(s=t),t.depth>l.depth&&(l=t)});var h=f===s?1:t(f,s)/2,d=h-f.x,p=n/(s.x+h+d),v=e/(l.depth||1);i.eachBefore(function(t){t.x=(t.x+d)*p,t.y=t.depth*v})}return i}function o(n){var e=n.children,r=n.parent.children,i=n.i?r[n.i-1]:null;if(e){!function(t){for(var n,e=0,r=0,i=t.children,o=i.length;--o>=0;)(n=i[o]).z+=e,n.m+=e,e+=n.s+(r+=n.c)}(n);var o=(e[0].z+e[e.length-1].z)/2;i?(n.z=i.z+t(n._,i._),n.m=n.z-o):n.z=o}else i&&(n.z=i.z+t(n._,i._));n.parent.A=function(n,e,r){if(e){for(var i,o=n,a=n,u=e,c=o.parent.children[0],f=o.m,s=a.m,l=u.m,h=c.m;u=sh(u),o=fh(o),u&&o;)c=fh(c),(a=sh(a)).a=n,(i=u.z+l-o.z-f+t(u._,o._))>0&&(lh(hh(u,n,r),n,i),f+=i,s+=i),l+=u.m,f+=o.m,h+=c.m,s+=a.m;u&&!sh(a)&&(a.t=u,a.m+=l-s),o&&!fh(c)&&(c.t=o,c.m+=f-h,r=n)}return r}(n,i,n.parent.A||r[0])}function a(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function u(t){t.x*=n,t.y=t.depth*e}return i.separation=function(n){return arguments.length?(t=n,i):t},i.size=function(t){return arguments.length?(r=!1,n=+t[0],e=+t[1],i):r?null:[n,e]},i.nodeSize=function(t){return arguments.length?(r=!0,n=+t[0],e=+t[1],i):r?[n,e]:null},i},t.treemap=function(){var t=yh,n=!1,e=1,r=1,i=[0],o=Wl,a=Wl,u=Wl,c=Wl,f=Wl;function s(t){return t.x0=t.y0=0,t.x1=e,t.y1=r,t.eachBefore(l),i=[0],n&&t.eachBefore(nh),t}function l(n){var e=i[n.depth],r=n.x0+e,s=n.y0+e,l=n.x1-e,h=n.y1-e;l=e-1){var s=u[n];return s.x0=i,s.y0=o,s.x1=a,void(s.y1=c)}for(var l=f[n],h=r/2+l,d=n+1,p=e-1;d>>1;f[v]c-o){var _=(i*y+a*g)/r;t(n,d,g,i,o,_,c),t(d,e,y,_,o,a,c)}else{var b=(o*y+c*g)/r;t(n,d,g,i,o,a,b),t(d,e,y,i,b,a,c)}}(0,c,t.value,n,e,r,i)},t.treemapDice=eh,t.treemapResquarify=_h,t.treemapSlice=ph,t.treemapSliceDice=function(t,n,e,r,i){(1&t.depth?ph:eh)(t,n,e,r,i)},t.treemapSquarify=yh,t.tsv=sa,t.tsvFormat=Ko,t.tsvFormatBody=Jo,t.tsvFormatRow=na,t.tsvFormatRows=ta,t.tsvFormatValue=ea,t.tsvParse=Zo,t.tsvParseRows=Qo,t.utcDay=Wd,t.utcDays=Zd,t.utcFriday=rp,t.utcFridays=sp,t.utcHour=Gd,t.utcHours=$d,t.utcMillisecond=pd,t.utcMilliseconds=vd,t.utcMinute=Xd,t.utcMinutes=Vd,t.utcMonday=Jd,t.utcMondays=ap,t.utcMonth=hp,t.utcMonths=dp,t.utcSaturday=ip,t.utcSaturdays=lp,t.utcSecond=_d,t.utcSeconds=bd,t.utcSunday=Kd,t.utcSundays=op,t.utcThursday=ep,t.utcThursdays=fp,t.utcTuesday=tp,t.utcTuesdays=up,t.utcWednesday=np,t.utcWednesdays=cp,t.utcWeek=Kd,t.utcWeeks=op,t.utcYear=pp,t.utcYears=vp,t.values=function(t){var n=[];for(var e in t)n.push(t[e]);return n},t.variance=c,t.version="5.16.0",t.voronoi=function(){var t=sb,n=lb,e=null;function r(r){return new Vb(r.map(function(e,i){var o=[Math.round(t(e,i,r)/Ib)*Ib,Math.round(n(e,i,r)/Ib)*Ib];return o.index=i,o.data=e,o}),e)}return r.polygons=function(t){return r(t).polygons()},r.links=function(t){return r(t).links()},r.triangles=function(t){return r(t).triangles()},r.x=function(n){return arguments.length?(t="function"==typeof n?n:fb(+n),r):t},r.y=function(t){return arguments.length?(n="function"==typeof t?t:fb(+t),r):n},r.extent=function(t){return arguments.length?(e=null==t?null:[[+t[0][0],+t[0][1]],[+t[1][0],+t[1][1]]],r):e&&[[e[0][0],e[0][1]],[e[1][0],e[1][1]]]},r.size=function(t){return arguments.length?(e=null==t?null:[[0,0],[+t[0],+t[1]]],r):e&&[e[1][0]-e[0][0],e[1][1]-e[0][1]]},r},t.window=ct,t.xml=da,t.zip=function(){return k(arguments)},t.zoom=function(){var n,e,r=tm,i=nm,o=om,a=rm,u=im,c=[0,1/0],f=[[-1/0,-1/0],[1/0,1/0]],s=250,l=Ie,h=I("start","zoom","end"),d=500,p=150,v=0;function g(t){t.property("__zoom",em).on("wheel.zoom",M).on("mousedown.zoom",N).on("dblclick.zoom",T).filter(u).on("touchstart.zoom",A).on("touchmove.zoom",S).on("touchend.zoom touchcancel.zoom",k).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function y(t,n){return(n=Math.max(c[0],Math.min(c[1],n)))===t.k?t:new Wb(n,t.x,t.y)}function _(t,n,e){var r=n[0]-e[0]*t.k,i=n[1]-e[1]*t.k;return r===t.x&&i===t.y?t:new Wb(t.k,r,i)}function b(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function m(t,n,e){t.on("start.zoom",function(){x(this,arguments).start()}).on("interrupt.zoom end.zoom",function(){x(this,arguments).end()}).tween("zoom",function(){var t=this,r=arguments,o=x(t,r),a=i.apply(t,r),u=null==e?b(a):"function"==typeof e?e.apply(t,r):e,c=Math.max(a[1][0]-a[0][0],a[1][1]-a[0][1]),f=t.__zoom,s="function"==typeof n?n.apply(t,r):n,h=l(f.invert(u).concat(c/f.k),s.invert(u).concat(c/s.k));return function(t){if(1===t)t=s;else{var n=h(t),e=c/n[2];t=new Wb(e,u[0]-n[0]*e,u[1]-n[1]*e)}o.zoom(null,t)}})}function x(t,n,e){return!e&&t.__zooming||new w(t,n)}function w(t,n){this.that=t,this.args=n,this.active=0,this.extent=i.apply(t,n),this.taps=0}function M(){if(r.apply(this,arguments)){var t=x(this,arguments),n=this.__zoom,e=Math.max(c[0],Math.min(c[1],n.k*Math.pow(2,a.apply(this,arguments)))),i=Bt(this);if(t.wheel)t.mouse[0][0]===i[0]&&t.mouse[0][1]===i[1]||(t.mouse[1]=n.invert(t.mouse[0]=i)),clearTimeout(t.wheel);else{if(n.k===e)return;t.mouse=[i,n.invert(i)],Pr(this),t.start()}Jb(),t.wheel=setTimeout(function(){t.wheel=null,t.end()},p),t.zoom("mouse",o(_(y(n,e),t.mouse[0],t.mouse[1]),t.extent,f))}}function N(){if(!e&&r.apply(this,arguments)){var n=x(this,arguments,!0),i=Rt(t.event.view).on("mousemove.zoom",function(){if(Jb(),!n.moved){var e=t.event.clientX-u,r=t.event.clientY-c;n.moved=e*e+r*r>v}n.zoom("mouse",o(_(n.that.__zoom,n.mouse[0]=Bt(n.that),n.mouse[1]),n.extent,f))},!0).on("mouseup.zoom",function(){i.on("mousemove.zoom mouseup.zoom",null),jt(t.event.view,n.moved),Jb(),n.end()},!0),a=Bt(this),u=t.event.clientX,c=t.event.clientY;Ht(t.event.view),Kb(),n.mouse=[a,this.__zoom.invert(a)],Pr(this),n.start()}}function T(){if(r.apply(this,arguments)){var n=this.__zoom,e=Bt(this),a=n.invert(e),u=n.k*(t.event.shiftKey?.5:2),c=o(_(y(n,u),e,a),i.apply(this,arguments),f);Jb(),s>0?Rt(this).transition().duration(s).call(m,c,e):Rt(this).call(g.transform,c)}}function A(){if(r.apply(this,arguments)){var e,i,o,a,u=t.event.touches,c=u.length,f=x(this,arguments,t.event.changedTouches.length===c);for(Kb(),i=0;i0;--i){entry=buckets[i].dequeue();if(entry){results=results.concat(removeNode(g,buckets,zeroIdx,entry,true));break}}}}return results}function removeNode(g,buckets,zeroIdx,entry,collectPredecessors){var results=collectPredecessors?[]:undefined;_.forEach(g.inEdges(entry.v),function(edge){var weight=g.edge(edge);var uEntry=g.node(edge.v);if(collectPredecessors){results.push({v:edge.v,w:edge.w})}uEntry.out-=weight;assignBucket(buckets,zeroIdx,uEntry)});_.forEach(g.outEdges(entry.v),function(edge){var weight=g.edge(edge);var w=edge.w;var wEntry=g.node(w);wEntry["in"]-=weight;assignBucket(buckets,zeroIdx,wEntry)});g.removeNode(entry.v);return results}function buildState(g,weightFn){var fasGraph=new Graph;var maxIn=0;var maxOut=0;_.forEach(g.nodes(),function(v){fasGraph.setNode(v,{v:v,in:0,out:0})}); +// Aggregate weights on nodes, but also sum the weights across multi-edges +// into a single edge for the fasGraph. +_.forEach(g.edges(),function(e){var prevWeight=fasGraph.edge(e.v,e.w)||0;var weight=weightFn(e);var edgeWeight=prevWeight+weight;fasGraph.setEdge(e.v,e.w,edgeWeight);maxOut=Math.max(maxOut,fasGraph.node(e.v).out+=weight);maxIn=Math.max(maxIn,fasGraph.node(e.w)["in"]+=weight)});var buckets=_.range(maxOut+maxIn+3).map(function(){return new List});var zeroIdx=maxIn+1;_.forEach(fasGraph.nodes(),function(v){assignBucket(buckets,zeroIdx,fasGraph.node(v))});return{graph:fasGraph,buckets:buckets,zeroIdx:zeroIdx}}function assignBucket(buckets,zeroIdx,entry){if(!entry.out){buckets[0].enqueue(entry)}else if(!entry["in"]){buckets[buckets.length-1].enqueue(entry)}else{buckets[entry.out-entry["in"]+zeroIdx].enqueue(entry)}}},{"./data/list":5,"./graphlib":7,"./lodash":10}],9:[function(require,module,exports){"use strict";var _=require("./lodash");var acyclic=require("./acyclic");var normalize=require("./normalize");var rank=require("./rank");var normalizeRanks=require("./util").normalizeRanks;var parentDummyChains=require("./parent-dummy-chains");var removeEmptyRanks=require("./util").removeEmptyRanks;var nestingGraph=require("./nesting-graph");var addBorderSegments=require("./add-border-segments");var coordinateSystem=require("./coordinate-system");var order=require("./order");var position=require("./position");var util=require("./util");var Graph=require("./graphlib").Graph;module.exports=layout;function layout(g,opts){var time=opts&&opts.debugTiming?util.time:util.notime;time("layout",function(){var layoutGraph=time(" buildLayoutGraph",function(){return buildLayoutGraph(g)});time(" runLayout",function(){runLayout(layoutGraph,time)});time(" updateInputGraph",function(){updateInputGraph(g,layoutGraph)})})}function runLayout(g,time){time(" makeSpaceForEdgeLabels",function(){makeSpaceForEdgeLabels(g)});time(" removeSelfEdges",function(){removeSelfEdges(g)});time(" acyclic",function(){acyclic.run(g)});time(" nestingGraph.run",function(){nestingGraph.run(g)});time(" rank",function(){rank(util.asNonCompoundGraph(g))});time(" injectEdgeLabelProxies",function(){injectEdgeLabelProxies(g)});time(" removeEmptyRanks",function(){removeEmptyRanks(g)});time(" nestingGraph.cleanup",function(){nestingGraph.cleanup(g)});time(" normalizeRanks",function(){normalizeRanks(g)});time(" assignRankMinMax",function(){assignRankMinMax(g)});time(" removeEdgeLabelProxies",function(){removeEdgeLabelProxies(g)});time(" normalize.run",function(){normalize.run(g)});time(" parentDummyChains",function(){parentDummyChains(g)});time(" addBorderSegments",function(){addBorderSegments(g)});time(" order",function(){order(g)});time(" insertSelfEdges",function(){insertSelfEdges(g)});time(" adjustCoordinateSystem",function(){coordinateSystem.adjust(g)});time(" position",function(){position(g)});time(" positionSelfEdges",function(){positionSelfEdges(g)});time(" removeBorderNodes",function(){removeBorderNodes(g)});time(" normalize.undo",function(){normalize.undo(g)});time(" fixupEdgeLabelCoords",function(){fixupEdgeLabelCoords(g)});time(" undoCoordinateSystem",function(){coordinateSystem.undo(g)});time(" translateGraph",function(){translateGraph(g)});time(" assignNodeIntersects",function(){assignNodeIntersects(g)});time(" reversePoints",function(){reversePointsForReversedEdges(g)});time(" acyclic.undo",function(){acyclic.undo(g)})} +/* + * Copies final layout information from the layout graph back to the input + * graph. This process only copies whitelisted attributes from the layout graph + * to the input graph, so it serves as a good place to determine what + * attributes can influence layout. + */function updateInputGraph(inputGraph,layoutGraph){_.forEach(inputGraph.nodes(),function(v){var inputLabel=inputGraph.node(v);var layoutLabel=layoutGraph.node(v);if(inputLabel){inputLabel.x=layoutLabel.x;inputLabel.y=layoutLabel.y;if(layoutGraph.children(v).length){inputLabel.width=layoutLabel.width;inputLabel.height=layoutLabel.height}}});_.forEach(inputGraph.edges(),function(e){var inputLabel=inputGraph.edge(e);var layoutLabel=layoutGraph.edge(e);inputLabel.points=layoutLabel.points;if(_.has(layoutLabel,"x")){inputLabel.x=layoutLabel.x;inputLabel.y=layoutLabel.y}});inputGraph.graph().width=layoutGraph.graph().width;inputGraph.graph().height=layoutGraph.graph().height}var graphNumAttrs=["nodesep","edgesep","ranksep","marginx","marginy"];var graphDefaults={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"};var graphAttrs=["acyclicer","ranker","rankdir","align"];var nodeNumAttrs=["width","height"];var nodeDefaults={width:0,height:0};var edgeNumAttrs=["minlen","weight","width","height","labeloffset"];var edgeDefaults={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"};var edgeAttrs=["labelpos"]; +/* + * Constructs a new graph from the input graph, which can be used for layout. + * This process copies only whitelisted attributes from the input graph to the + * layout graph. Thus this function serves as a good place to determine what + * attributes can influence layout. + */function buildLayoutGraph(inputGraph){var g=new Graph({multigraph:true,compound:true});var graph=canonicalize(inputGraph.graph());g.setGraph(_.merge({},graphDefaults,selectNumberAttrs(graph,graphNumAttrs),_.pick(graph,graphAttrs)));_.forEach(inputGraph.nodes(),function(v){var node=canonicalize(inputGraph.node(v));g.setNode(v,_.defaults(selectNumberAttrs(node,nodeNumAttrs),nodeDefaults));g.setParent(v,inputGraph.parent(v))});_.forEach(inputGraph.edges(),function(e){var edge=canonicalize(inputGraph.edge(e));g.setEdge(e,_.merge({},edgeDefaults,selectNumberAttrs(edge,edgeNumAttrs),_.pick(edge,edgeAttrs)))});return g} +/* + * This idea comes from the Gansner paper: to account for edge labels in our + * layout we split each rank in half by doubling minlen and halving ranksep. + * Then we can place labels at these mid-points between nodes. + * + * We also add some minimal padding to the width to push the label for the edge + * away from the edge itself a bit. + */function makeSpaceForEdgeLabels(g){var graph=g.graph();graph.ranksep/=2;_.forEach(g.edges(),function(e){var edge=g.edge(e);edge.minlen*=2;if(edge.labelpos.toLowerCase()!=="c"){if(graph.rankdir==="TB"||graph.rankdir==="BT"){edge.width+=edge.labeloffset}else{edge.height+=edge.labeloffset}}})} +/* + * Creates temporary dummy nodes that capture the rank in which each edge's + * label is going to, if it has one of non-zero width and height. We do this + * so that we can safely remove empty ranks while preserving balance for the + * label's position. + */function injectEdgeLabelProxies(g){_.forEach(g.edges(),function(e){var edge=g.edge(e);if(edge.width&&edge.height){var v=g.node(e.v);var w=g.node(e.w);var label={rank:(w.rank-v.rank)/2+v.rank,e:e};util.addDummyNode(g,"edge-proxy",label,"_ep")}})}function assignRankMinMax(g){var maxRank=0;_.forEach(g.nodes(),function(v){var node=g.node(v);if(node.borderTop){node.minRank=g.node(node.borderTop).rank;node.maxRank=g.node(node.borderBottom).rank;maxRank=_.max(maxRank,node.maxRank)}});g.graph().maxRank=maxRank}function removeEdgeLabelProxies(g){_.forEach(g.nodes(),function(v){var node=g.node(v);if(node.dummy==="edge-proxy"){g.edge(node.e).labelRank=node.rank;g.removeNode(v)}})}function translateGraph(g){var minX=Number.POSITIVE_INFINITY;var maxX=0;var minY=Number.POSITIVE_INFINITY;var maxY=0;var graphLabel=g.graph();var marginX=graphLabel.marginx||0;var marginY=graphLabel.marginy||0;function getExtremes(attrs){var x=attrs.x;var y=attrs.y;var w=attrs.width;var h=attrs.height;minX=Math.min(minX,x-w/2);maxX=Math.max(maxX,x+w/2);minY=Math.min(minY,y-h/2);maxY=Math.max(maxY,y+h/2)}_.forEach(g.nodes(),function(v){getExtremes(g.node(v))});_.forEach(g.edges(),function(e){var edge=g.edge(e);if(_.has(edge,"x")){getExtremes(edge)}});minX-=marginX;minY-=marginY;_.forEach(g.nodes(),function(v){var node=g.node(v);node.x-=minX;node.y-=minY});_.forEach(g.edges(),function(e){var edge=g.edge(e);_.forEach(edge.points,function(p){p.x-=minX;p.y-=minY});if(_.has(edge,"x")){edge.x-=minX}if(_.has(edge,"y")){edge.y-=minY}});graphLabel.width=maxX-minX+marginX;graphLabel.height=maxY-minY+marginY}function assignNodeIntersects(g){_.forEach(g.edges(),function(e){var edge=g.edge(e);var nodeV=g.node(e.v);var nodeW=g.node(e.w);var p1,p2;if(!edge.points){edge.points=[];p1=nodeW;p2=nodeV}else{p1=edge.points[0];p2=edge.points[edge.points.length-1]}edge.points.unshift(util.intersectRect(nodeV,p1));edge.points.push(util.intersectRect(nodeW,p2))})}function fixupEdgeLabelCoords(g){_.forEach(g.edges(),function(e){var edge=g.edge(e);if(_.has(edge,"x")){if(edge.labelpos==="l"||edge.labelpos==="r"){edge.width-=edge.labeloffset}switch(edge.labelpos){case"l":edge.x-=edge.width/2+edge.labeloffset;break;case"r":edge.x+=edge.width/2+edge.labeloffset;break}}})}function reversePointsForReversedEdges(g){_.forEach(g.edges(),function(e){var edge=g.edge(e);if(edge.reversed){edge.points.reverse()}})}function removeBorderNodes(g){_.forEach(g.nodes(),function(v){if(g.children(v).length){var node=g.node(v);var t=g.node(node.borderTop);var b=g.node(node.borderBottom);var l=g.node(_.last(node.borderLeft));var r=g.node(_.last(node.borderRight));node.width=Math.abs(r.x-l.x);node.height=Math.abs(b.y-t.y);node.x=l.x+node.width/2;node.y=t.y+node.height/2}});_.forEach(g.nodes(),function(v){if(g.node(v).dummy==="border"){g.removeNode(v)}})}function removeSelfEdges(g){_.forEach(g.edges(),function(e){if(e.v===e.w){var node=g.node(e.v);if(!node.selfEdges){node.selfEdges=[]}node.selfEdges.push({e:e,label:g.edge(e)});g.removeEdge(e)}})}function insertSelfEdges(g){var layers=util.buildLayerMatrix(g);_.forEach(layers,function(layer){var orderShift=0;_.forEach(layer,function(v,i){var node=g.node(v);node.order=i+orderShift;_.forEach(node.selfEdges,function(selfEdge){util.addDummyNode(g,"selfedge",{width:selfEdge.label.width,height:selfEdge.label.height,rank:node.rank,order:i+ ++orderShift,e:selfEdge.e,label:selfEdge.label},"_se")});delete node.selfEdges})})}function positionSelfEdges(g){_.forEach(g.nodes(),function(v){var node=g.node(v);if(node.dummy==="selfedge"){var selfNode=g.node(node.e.v);var x=selfNode.x+selfNode.width/2;var y=selfNode.y;var dx=node.x-x;var dy=selfNode.height/2;g.setEdge(node.e,node.label);g.removeNode(v);node.label.points=[{x:x+2*dx/3,y:y-dy},{x:x+5*dx/6,y:y-dy},{x:x+dx,y:y},{x:x+5*dx/6,y:y+dy},{x:x+2*dx/3,y:y+dy}];node.label.x=node.x;node.label.y=node.y}})}function selectNumberAttrs(obj,attrs){return _.mapValues(_.pick(obj,attrs),Number)}function canonicalize(attrs){var newAttrs={};_.forEach(attrs,function(v,k){newAttrs[k.toLowerCase()]=v});return newAttrs}},{"./acyclic":2,"./add-border-segments":3,"./coordinate-system":4,"./graphlib":7,"./lodash":10,"./nesting-graph":11,"./normalize":12,"./order":17,"./parent-dummy-chains":22,"./position":24,"./rank":26,"./util":29}],10:[function(require,module,exports){ +/* global window */ +var lodash;if(typeof require==="function"){try{lodash={cloneDeep:require("lodash/cloneDeep"),constant:require("lodash/constant"),defaults:require("lodash/defaults"),each:require("lodash/each"),filter:require("lodash/filter"),find:require("lodash/find"),flatten:require("lodash/flatten"),forEach:require("lodash/forEach"),forIn:require("lodash/forIn"),has:require("lodash/has"),isUndefined:require("lodash/isUndefined"),last:require("lodash/last"),map:require("lodash/map"),mapValues:require("lodash/mapValues"),max:require("lodash/max"),merge:require("lodash/merge"),min:require("lodash/min"),minBy:require("lodash/minBy"),now:require("lodash/now"),pick:require("lodash/pick"),range:require("lodash/range"),reduce:require("lodash/reduce"),sortBy:require("lodash/sortBy"),uniqueId:require("lodash/uniqueId"),values:require("lodash/values"),zipObject:require("lodash/zipObject")}}catch(e){ +// continue regardless of error +}}if(!lodash){lodash=window._}module.exports=lodash},{"lodash/cloneDeep":227,"lodash/constant":228,"lodash/defaults":229,"lodash/each":230,"lodash/filter":232,"lodash/find":233,"lodash/flatten":235,"lodash/forEach":236,"lodash/forIn":237,"lodash/has":239,"lodash/isUndefined":258,"lodash/last":261,"lodash/map":262,"lodash/mapValues":263,"lodash/max":264,"lodash/merge":266,"lodash/min":267,"lodash/minBy":268,"lodash/now":270,"lodash/pick":271,"lodash/range":273,"lodash/reduce":274,"lodash/sortBy":276,"lodash/uniqueId":286,"lodash/values":287,"lodash/zipObject":288}],11:[function(require,module,exports){var _=require("./lodash");var util=require("./util");module.exports={run:run,cleanup:cleanup}; +/* + * A nesting graph creates dummy nodes for the tops and bottoms of subgraphs, + * adds appropriate edges to ensure that all cluster nodes are placed between + * these boundries, and ensures that the graph is connected. + * + * In addition we ensure, through the use of the minlen property, that nodes + * and subgraph border nodes to not end up on the same rank. + * + * Preconditions: + * + * 1. Input graph is a DAG + * 2. Nodes in the input graph has a minlen attribute + * + * Postconditions: + * + * 1. Input graph is connected. + * 2. Dummy nodes are added for the tops and bottoms of subgraphs. + * 3. The minlen attribute for nodes is adjusted to ensure nodes do not + * get placed on the same rank as subgraph border nodes. + * + * The nesting graph idea comes from Sander, "Layout of Compound Directed + * Graphs." + */function run(g){var root=util.addDummyNode(g,"root",{},"_root");var depths=treeDepths(g);var height=_.max(_.values(depths))-1;// Note: depths is an Object not an array +var nodeSep=2*height+1;g.graph().nestingRoot=root; +// Multiply minlen by nodeSep to align nodes on non-border ranks. +_.forEach(g.edges(),function(e){g.edge(e).minlen*=nodeSep}); +// Calculate a weight that is sufficient to keep subgraphs vertically compact +var weight=sumWeights(g)+1; +// Create border nodes and link them up +_.forEach(g.children(),function(child){dfs(g,root,nodeSep,weight,height,depths,child)}); +// Save the multiplier for node layers for later removal of empty border +// layers. +g.graph().nodeRankFactor=nodeSep}function dfs(g,root,nodeSep,weight,height,depths,v){var children=g.children(v);if(!children.length){if(v!==root){g.setEdge(root,v,{weight:0,minlen:nodeSep})}return}var top=util.addBorderNode(g,"_bt");var bottom=util.addBorderNode(g,"_bb");var label=g.node(v);g.setParent(top,v);label.borderTop=top;g.setParent(bottom,v);label.borderBottom=bottom;_.forEach(children,function(child){dfs(g,root,nodeSep,weight,height,depths,child);var childNode=g.node(child);var childTop=childNode.borderTop?childNode.borderTop:child;var childBottom=childNode.borderBottom?childNode.borderBottom:child;var thisWeight=childNode.borderTop?weight:2*weight;var minlen=childTop!==childBottom?1:height-depths[v]+1;g.setEdge(top,childTop,{weight:thisWeight,minlen:minlen,nestingEdge:true});g.setEdge(childBottom,bottom,{weight:thisWeight,minlen:minlen,nestingEdge:true})});if(!g.parent(v)){g.setEdge(root,top,{weight:0,minlen:height+depths[v]})}}function treeDepths(g){var depths={};function dfs(v,depth){var children=g.children(v);if(children&&children.length){_.forEach(children,function(child){dfs(child,depth+1)})}depths[v]=depth}_.forEach(g.children(),function(v){dfs(v,1)});return depths}function sumWeights(g){return _.reduce(g.edges(),function(acc,e){return acc+g.edge(e).weight},0)}function cleanup(g){var graphLabel=g.graph();g.removeNode(graphLabel.nestingRoot);delete graphLabel.nestingRoot;_.forEach(g.edges(),function(e){var edge=g.edge(e);if(edge.nestingEdge){g.removeEdge(e)}})}},{"./lodash":10,"./util":29}],12:[function(require,module,exports){"use strict";var _=require("./lodash");var util=require("./util");module.exports={run:run,undo:undo}; +/* + * Breaks any long edges in the graph into short segments that span 1 layer + * each. This operation is undoable with the denormalize function. + * + * Pre-conditions: + * + * 1. The input graph is a DAG. + * 2. Each node in the graph has a "rank" property. + * + * Post-condition: + * + * 1. All edges in the graph have a length of 1. + * 2. Dummy nodes are added where edges have been split into segments. + * 3. The graph is augmented with a "dummyChains" attribute which contains + * the first dummy in each chain of dummy nodes produced. + */function run(g){g.graph().dummyChains=[];_.forEach(g.edges(),function(edge){normalizeEdge(g,edge)})}function normalizeEdge(g,e){var v=e.v;var vRank=g.node(v).rank;var w=e.w;var wRank=g.node(w).rank;var name=e.name;var edgeLabel=g.edge(e);var labelRank=edgeLabel.labelRank;if(wRank===vRank+1)return;g.removeEdge(e);var dummy,attrs,i;for(i=0,++vRank;vRank0){if(index%2){weightSum+=tree[index+1]}index=index-1>>1;tree[index]+=entry.weight}cc+=entry.weight*weightSum}));return cc}},{"../lodash":10}],17:[function(require,module,exports){"use strict";var _=require("../lodash");var initOrder=require("./init-order");var crossCount=require("./cross-count");var sortSubgraph=require("./sort-subgraph");var buildLayerGraph=require("./build-layer-graph");var addSubgraphConstraints=require("./add-subgraph-constraints");var Graph=require("../graphlib").Graph;var util=require("../util");module.exports=order; +/* + * Applies heuristics to minimize edge crossings in the graph and sets the best + * order solution as an order attribute on each node. + * + * Pre-conditions: + * + * 1. Graph must be DAG + * 2. Graph nodes must be objects with a "rank" attribute + * 3. Graph edges must have the "weight" attribute + * + * Post-conditions: + * + * 1. Graph nodes will have an "order" attribute based on the results of the + * algorithm. + */function order(g){var maxRank=util.maxRank(g),downLayerGraphs=buildLayerGraphs(g,_.range(1,maxRank+1),"inEdges"),upLayerGraphs=buildLayerGraphs(g,_.range(maxRank-1,-1,-1),"outEdges");var layering=initOrder(g);assignOrder(g,layering);var bestCC=Number.POSITIVE_INFINITY,best;for(var i=0,lastBest=0;lastBest<4;++i,++lastBest){sweepLayerGraphs(i%2?downLayerGraphs:upLayerGraphs,i%4>=2);layering=util.buildLayerMatrix(g);var cc=crossCount(g,layering);if(cc=vEntry.barycenter){mergeEntries(vEntry,uEntry)}}}function handleOut(vEntry){return function(wEntry){wEntry["in"].push(vEntry);if(--wEntry.indegree===0){sourceSet.push(wEntry)}}}while(sourceSet.length){var entry=sourceSet.pop();entries.push(entry);_.forEach(entry["in"].reverse(),handleIn(entry));_.forEach(entry.out,handleOut(entry))}return _.map(_.filter(entries,function(entry){return!entry.merged}),function(entry){return _.pick(entry,["vs","i","barycenter","weight"])})}function mergeEntries(target,source){var sum=0;var weight=0;if(target.weight){sum+=target.barycenter*target.weight;weight+=target.weight}if(source.weight){sum+=source.barycenter*source.weight;weight+=source.weight}target.vs=source.vs.concat(target.vs);target.barycenter=sum/weight;target.weight=weight;target.i=Math.min(source.i,target.i);source.merged=true}},{"../lodash":10}],20:[function(require,module,exports){var _=require("../lodash");var barycenter=require("./barycenter");var resolveConflicts=require("./resolve-conflicts");var sort=require("./sort");module.exports=sortSubgraph;function sortSubgraph(g,v,cg,biasRight){var movable=g.children(v);var node=g.node(v);var bl=node?node.borderLeft:undefined;var br=node?node.borderRight:undefined;var subgraphs={};if(bl){movable=_.filter(movable,function(w){return w!==bl&&w!==br})}var barycenters=barycenter(g,movable);_.forEach(barycenters,function(entry){if(g.children(entry.v).length){var subgraphResult=sortSubgraph(g,entry.v,cg,biasRight);subgraphs[entry.v]=subgraphResult;if(_.has(subgraphResult,"barycenter")){mergeBarycenters(entry,subgraphResult)}}});var entries=resolveConflicts(barycenters,cg);expandSubgraphs(entries,subgraphs);var result=sort(entries,biasRight);if(bl){result.vs=_.flatten([bl,result.vs,br],true);if(g.predecessors(bl).length){var blPred=g.node(g.predecessors(bl)[0]),brPred=g.node(g.predecessors(br)[0]);if(!_.has(result,"barycenter")){result.barycenter=0;result.weight=0}result.barycenter=(result.barycenter*result.weight+blPred.order+brPred.order)/(result.weight+2);result.weight+=2}}return result}function expandSubgraphs(entries,subgraphs){_.forEach(entries,function(entry){entry.vs=_.flatten(entry.vs.map(function(v){if(subgraphs[v]){return subgraphs[v].vs}return v}),true)})}function mergeBarycenters(target,other){if(!_.isUndefined(target.barycenter)){target.barycenter=(target.barycenter*target.weight+other.barycenter*other.weight)/(target.weight+other.weight);target.weight+=other.weight}else{target.barycenter=other.barycenter;target.weight=other.weight}}},{"../lodash":10,"./barycenter":14,"./resolve-conflicts":19,"./sort":21}],21:[function(require,module,exports){var _=require("../lodash");var util=require("../util");module.exports=sort;function sort(entries,biasRight){var parts=util.partition(entries,function(entry){return _.has(entry,"barycenter")});var sortable=parts.lhs,unsortable=_.sortBy(parts.rhs,function(entry){return-entry.i}),vs=[],sum=0,weight=0,vsIndex=0;sortable.sort(compareWithBias(!!biasRight));vsIndex=consumeUnsortable(vs,unsortable,vsIndex);_.forEach(sortable,function(entry){vsIndex+=entry.vs.length;vs.push(entry.vs);sum+=entry.barycenter*entry.weight;weight+=entry.weight;vsIndex=consumeUnsortable(vs,unsortable,vsIndex)});var result={vs:_.flatten(vs,true)};if(weight){result.barycenter=sum/weight;result.weight=weight}return result}function consumeUnsortable(vs,unsortable,index){var last;while(unsortable.length&&(last=_.last(unsortable)).i<=index){unsortable.pop();vs.push(last.vs);index++}return index}function compareWithBias(bias){return function(entryV,entryW){if(entryV.barycenterentryW.barycenter){return 1}return!bias?entryV.i-entryW.i:entryW.i-entryV.i}}},{"../lodash":10,"../util":29}],22:[function(require,module,exports){var _=require("./lodash");module.exports=parentDummyChains;function parentDummyChains(g){var postorderNums=postorder(g);_.forEach(g.graph().dummyChains,function(v){var node=g.node(v);var edgeObj=node.edgeObj;var pathData=findPath(g,postorderNums,edgeObj.v,edgeObj.w);var path=pathData.path;var lca=pathData.lca;var pathIdx=0;var pathV=path[pathIdx];var ascending=true;while(v!==edgeObj.w){node=g.node(v);if(ascending){while((pathV=path[pathIdx])!==lca&&g.node(pathV).maxRanklow||lim>postorderNums[parent].lim));lca=parent; +// Traverse from w to LCA +parent=w;while((parent=g.parent(parent))!==lca){wPath.push(parent)}return{path:vPath.concat(wPath.reverse()),lca:lca}}function postorder(g){var result={};var lim=0;function dfs(v){var low=lim;_.forEach(g.children(v),dfs);result[v]={low:low,lim:lim++}}_.forEach(g.children(),dfs);return result}},{"./lodash":10}],23:[function(require,module,exports){"use strict";var _=require("../lodash");var Graph=require("../graphlib").Graph;var util=require("../util"); +/* + * This module provides coordinate assignment based on Brandes and Köpf, "Fast + * and Simple Horizontal Coordinate Assignment." + */module.exports={positionX:positionX,findType1Conflicts:findType1Conflicts,findType2Conflicts:findType2Conflicts,addConflict:addConflict,hasConflict:hasConflict,verticalAlignment:verticalAlignment,horizontalCompaction:horizontalCompaction,alignCoordinates:alignCoordinates,findSmallestWidthAlignment:findSmallestWidthAlignment,balance:balance}; +/* + * Marks all edges in the graph with a type-1 conflict with the "type1Conflict" + * property. A type-1 conflict is one where a non-inner segment crosses an + * inner segment. An inner segment is an edge with both incident nodes marked + * with the "dummy" property. + * + * This algorithm scans layer by layer, starting with the second, for type-1 + * conflicts between the current layer and the previous layer. For each layer + * it scans the nodes from left to right until it reaches one that is incident + * on an inner segment. It then scans predecessors to determine if they have + * edges that cross that inner segment. At the end a final scan is done for all + * nodes on the current rank to see if they cross the last visited inner + * segment. + * + * This algorithm (safely) assumes that a dummy node will only be incident on a + * single node in the layers being scanned. + */function findType1Conflicts(g,layering){var conflicts={};function visitLayer(prevLayer,layer){var +// last visited node in the previous layer that is incident on an inner +// segment. +k0=0, +// Tracks the last node in this layer scanned for crossings with a type-1 +// segment. +scanPos=0,prevLayerLength=prevLayer.length,lastNode=_.last(layer);_.forEach(layer,function(v,i){var w=findOtherInnerSegmentNode(g,v),k1=w?g.node(w).order:prevLayerLength;if(w||v===lastNode){_.forEach(layer.slice(scanPos,i+1),function(scanNode){_.forEach(g.predecessors(scanNode),function(u){var uLabel=g.node(u),uPos=uLabel.order;if((uPosnextNorthBorder)){addConflict(conflicts,u,v)}})}})}function visitLayer(north,south){var prevNorthPos=-1,nextNorthPos,southPos=0;_.forEach(south,function(v,southLookahead){if(g.node(v).dummy==="border"){var predecessors=g.predecessors(v);if(predecessors.length){nextNorthPos=g.node(predecessors[0]).order;scan(south,southPos,southLookahead,prevNorthPos,nextNorthPos);southPos=southLookahead;prevNorthPos=nextNorthPos}}scan(south,southPos,south.length,nextNorthPos,north.length)});return south}_.reduce(layering,visitLayer);return conflicts}function findOtherInnerSegmentNode(g,v){if(g.node(v).dummy){return _.find(g.predecessors(v),function(u){return g.node(u).dummy})}}function addConflict(conflicts,v,w){if(v>w){var tmp=v;v=w;w=tmp}var conflictsV=conflicts[v];if(!conflictsV){conflicts[v]=conflictsV={}}conflictsV[w]=true}function hasConflict(conflicts,v,w){if(v>w){var tmp=v;v=w;w=tmp}return _.has(conflicts[v],w)} +/* + * Try to align nodes into vertical "blocks" where possible. This algorithm + * attempts to align a node with one of its median neighbors. If the edge + * connecting a neighbor is a type-1 conflict then we ignore that possibility. + * If a previous node has already formed a block with a node after the node + * we're trying to form a block with, we also ignore that possibility - our + * blocks would be split in that scenario. + */function verticalAlignment(g,layering,conflicts,neighborFn){var root={},align={},pos={}; +// We cache the position here based on the layering because the graph and +// layering may be out of sync. The layering matrix is manipulated to +// generate different extreme alignments. +_.forEach(layering,function(layer){_.forEach(layer,function(v,order){root[v]=v;align[v]=v;pos[v]=order})});_.forEach(layering,function(layer){var prevIdx=-1;_.forEach(layer,function(v){var ws=neighborFn(v);if(ws.length){ws=_.sortBy(ws,function(w){return pos[w]});var mp=(ws.length-1)/2;for(var i=Math.floor(mp),il=Math.ceil(mp);i<=il;++i){var w=ws[i];if(align[v]===v&&prevIdxwLabel.lim){tailLabel=wLabel;flip=true}var candidates=_.filter(g.edges(),function(edge){return flip===isDescendant(t,t.node(edge.v),tailLabel)&&flip!==isDescendant(t,t.node(edge.w),tailLabel)});return _.minBy(candidates,function(edge){return slack(g,edge)})}function exchangeEdges(t,g,e,f){var v=e.v;var w=e.w;t.removeEdge(v,w);t.setEdge(f.v,f.w,{});initLowLimValues(t);initCutValues(t,g);updateRanks(t,g)}function updateRanks(t,g){var root=_.find(t.nodes(),function(v){return!g.node(v).parent});var vs=preorder(t,root);vs=vs.slice(1);_.forEach(vs,function(v){var parent=t.node(v).parent,edge=g.edge(v,parent),flipped=false;if(!edge){edge=g.edge(parent,v);flipped=true}g.node(v).rank=g.node(parent).rank+(flipped?edge.minlen:-edge.minlen)})} +/* + * Returns true if the edge is in the tree. + */function isTreeEdge(tree,u,v){return tree.hasEdge(u,v)} +/* + * Returns true if the specified node is descendant of the root node per the + * assigned low and lim attributes in the tree. + */function isDescendant(tree,vLabel,rootLabel){return rootLabel.low<=vLabel.lim&&vLabel.lim<=rootLabel.lim}},{"../graphlib":7,"../lodash":10,"../util":29,"./feasible-tree":25,"./util":28}],28:[function(require,module,exports){"use strict";var _=require("../lodash");module.exports={longestPath:longestPath,slack:slack}; +/* + * Initializes ranks for the input graph using the longest path algorithm. This + * algorithm scales well and is fast in practice, it yields rather poor + * solutions. Nodes are pushed to the lowest layer possible, leaving the bottom + * ranks wide and leaving edges longer than necessary. However, due to its + * speed, this algorithm is good for getting an initial ranking that can be fed + * into other algorithms. + * + * This algorithm does not normalize layers because it will be used by other + * algorithms in most cases. If using this algorithm directly, be sure to + * run normalize at the end. + * + * Pre-conditions: + * + * 1. Input graph is a DAG. + * 2. Input graph node labels can be assigned properties. + * + * Post-conditions: + * + * 1. Each node will be assign an (unnormalized) "rank" property. + */function longestPath(g){var visited={};function dfs(v){var label=g.node(v);if(_.has(visited,v)){return label.rank}visited[v]=true;var rank=_.min(_.map(g.outEdges(v),function(e){return dfs(e.w)-g.edge(e).minlen}));if(rank===Number.POSITIVE_INFINITY||// return value of _.map([]) for Lodash 3 +rank===undefined||// return value of _.map([]) for Lodash 4 +rank===null){// return value of _.map([null]) +rank=0}return label.rank=rank}_.forEach(g.sources(),dfs)} +/* + * Returns the amount of slack for the given edge. The slack is defined as the + * difference between the length of the edge and its minimum length. + */function slack(g,e){return g.node(e.w).rank-g.node(e.v).rank-g.edge(e).minlen}},{"../lodash":10}],29:[function(require,module,exports){ +/* eslint "no-console": off */ +"use strict";var _=require("./lodash");var Graph=require("./graphlib").Graph;module.exports={addDummyNode:addDummyNode,simplify:simplify,asNonCompoundGraph:asNonCompoundGraph,successorWeights:successorWeights,predecessorWeights:predecessorWeights,intersectRect:intersectRect,buildLayerMatrix:buildLayerMatrix,normalizeRanks:normalizeRanks,removeEmptyRanks:removeEmptyRanks,addBorderNode:addBorderNode,maxRank:maxRank,partition:partition,time:time,notime:notime}; +/* + * Adds a dummy node to the graph and return v. + */function addDummyNode(g,type,attrs,name){var v;do{v=_.uniqueId(name)}while(g.hasNode(v));attrs.dummy=type;g.setNode(v,attrs);return v} +/* + * Returns a new graph with only simple edges. Handles aggregation of data + * associated with multi-edges. + */function simplify(g){var simplified=(new Graph).setGraph(g.graph());_.forEach(g.nodes(),function(v){simplified.setNode(v,g.node(v))});_.forEach(g.edges(),function(e){var simpleLabel=simplified.edge(e.v,e.w)||{weight:0,minlen:1};var label=g.edge(e);simplified.setEdge(e.v,e.w,{weight:simpleLabel.weight+label.weight,minlen:Math.max(simpleLabel.minlen,label.minlen)})});return simplified}function asNonCompoundGraph(g){var simplified=new Graph({multigraph:g.isMultigraph()}).setGraph(g.graph());_.forEach(g.nodes(),function(v){if(!g.children(v).length){simplified.setNode(v,g.node(v))}});_.forEach(g.edges(),function(e){simplified.setEdge(e,g.edge(e))});return simplified}function successorWeights(g){var weightMap=_.map(g.nodes(),function(v){var sucs={};_.forEach(g.outEdges(v),function(e){sucs[e.w]=(sucs[e.w]||0)+g.edge(e).weight});return sucs});return _.zipObject(g.nodes(),weightMap)}function predecessorWeights(g){var weightMap=_.map(g.nodes(),function(v){var preds={};_.forEach(g.inEdges(v),function(e){preds[e.v]=(preds[e.v]||0)+g.edge(e).weight});return preds});return _.zipObject(g.nodes(),weightMap)} +/* + * Finds where a line starting at point ({x, y}) would intersect a rectangle + * ({x, y, width, height}) if it were pointing at the rectangle's center. + */function intersectRect(rect,point){var x=rect.x;var y=rect.y; +// Rectangle intersection algorithm from: +// http://math.stackexchange.com/questions/108113/find-edge-between-two-boxes +var dx=point.x-x;var dy=point.y-y;var w=rect.width/2;var h=rect.height/2;if(!dx&&!dy){throw new Error("Not possible to find intersection inside of the rectangle")}var sx,sy;if(Math.abs(dy)*w>Math.abs(dx)*h){ +// Intersection is top or bottom of rect. +if(dy<0){h=-h}sx=h*dx/dy;sy=h}else{ +// Intersection is left or right of rect. +if(dx<0){w=-w}sx=w;sy=w*dy/dx}return{x:x+sx,y:y+sy}} +/* + * Given a DAG with each node assigned "rank" and "order" properties, this + * function will produce a matrix with the ids of each node. + */function buildLayerMatrix(g){var layering=_.map(_.range(maxRank(g)+1),function(){return[]});_.forEach(g.nodes(),function(v){var node=g.node(v);var rank=node.rank;if(!_.isUndefined(rank)){layering[rank][node.order]=v}});return layering} +/* + * Adjusts the ranks for all nodes in the graph such that all nodes v have + * rank(v) >= 0 and at least one node w has rank(w) = 0. + */function normalizeRanks(g){var min=_.min(_.map(g.nodes(),function(v){return g.node(v).rank}));_.forEach(g.nodes(),function(v){var node=g.node(v);if(_.has(node,"rank")){node.rank-=min}})}function removeEmptyRanks(g){ +// Ranks may not start at 0, so we need to offset them +var offset=_.min(_.map(g.nodes(),function(v){return g.node(v).rank}));var layers=[];_.forEach(g.nodes(),function(v){var rank=g.node(v).rank-offset;if(!layers[rank]){layers[rank]=[]}layers[rank].push(v)});var delta=0;var nodeRankFactor=g.graph().nodeRankFactor;_.forEach(layers,function(vs,i){if(_.isUndefined(vs)&&i%nodeRankFactor!==0){--delta}else if(delta){_.forEach(vs,function(v){g.node(v).rank+=delta})}})}function addBorderNode(g,prefix,rank,order){var node={width:0,height:0};if(arguments.length>=4){node.rank=rank;node.order=order}return addDummyNode(g,"border",node,prefix)}function maxRank(g){return _.max(_.map(g.nodes(),function(v){var rank=g.node(v).rank;if(!_.isUndefined(rank)){return rank}}))} +/* + * Partition a collection into two groups: `lhs` and `rhs`. If the supplied + * function returns true for an entry it goes into `lhs`. Otherwise it goes + * into `rhs. + */function partition(collection,fn){var result={lhs:[],rhs:[]};_.forEach(collection,function(value){if(fn(value)){result.lhs.push(value)}else{result.rhs.push(value)}});return result} +/* + * Returns a new function that wraps `fn` with a timer. The wrapper logs the + * time it takes to execute the function. + */function time(name,fn){var start=_.now();try{return fn()}finally{console.log(name+" time: "+(_.now()-start)+"ms")}}function notime(name,fn){return fn()}},{"./graphlib":7,"./lodash":10}],30:[function(require,module,exports){module.exports="0.8.5"},{}],31:[function(require,module,exports){ +/** + * Copyright (c) 2014, Chris Pettitt + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its contributors + * may be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +var lib=require("./lib");module.exports={Graph:lib.Graph,json:require("./lib/json"),alg:require("./lib/alg"),version:lib.version}},{"./lib":47,"./lib/alg":38,"./lib/json":48}],32:[function(require,module,exports){var _=require("../lodash");module.exports=components;function components(g){var visited={};var cmpts=[];var cmpt;function dfs(v){if(_.has(visited,v))return;visited[v]=true;cmpt.push(v);_.each(g.successors(v),dfs);_.each(g.predecessors(v),dfs)}_.each(g.nodes(),function(v){cmpt=[];dfs(v);if(cmpt.length){cmpts.push(cmpt)}});return cmpts}},{"../lodash":49}],33:[function(require,module,exports){var _=require("../lodash");module.exports=dfs; +/* + * A helper that preforms a pre- or post-order traversal on the input graph + * and returns the nodes in the order they were visited. If the graph is + * undirected then this algorithm will navigate using neighbors. If the graph + * is directed then this algorithm will navigate using successors. + * + * Order must be one of "pre" or "post". + */function dfs(g,vs,order){if(!_.isArray(vs)){vs=[vs]}var navigation=(g.isDirected()?g.successors:g.neighbors).bind(g);var acc=[];var visited={};_.each(vs,function(v){if(!g.hasNode(v)){throw new Error("Graph does not have node: "+v)}doDfs(g,v,order==="post",visited,navigation,acc)});return acc}function doDfs(g,v,postorder,visited,navigation,acc){if(!_.has(visited,v)){visited[v]=true;if(!postorder){acc.push(v)}_.each(navigation(v),function(w){doDfs(g,w,postorder,visited,navigation,acc)});if(postorder){acc.push(v)}}}},{"../lodash":49}],34:[function(require,module,exports){var dijkstra=require("./dijkstra");var _=require("../lodash");module.exports=dijkstraAll;function dijkstraAll(g,weightFunc,edgeFunc){return _.transform(g.nodes(),function(acc,v){acc[v]=dijkstra(g,v,weightFunc,edgeFunc)},{})}},{"../lodash":49,"./dijkstra":35}],35:[function(require,module,exports){var _=require("../lodash");var PriorityQueue=require("../data/priority-queue");module.exports=dijkstra;var DEFAULT_WEIGHT_FUNC=_.constant(1);function dijkstra(g,source,weightFn,edgeFn){return runDijkstra(g,String(source),weightFn||DEFAULT_WEIGHT_FUNC,edgeFn||function(v){return g.outEdges(v)})}function runDijkstra(g,source,weightFn,edgeFn){var results={};var pq=new PriorityQueue;var v,vEntry;var updateNeighbors=function(edge){var w=edge.v!==v?edge.v:edge.w;var wEntry=results[w];var weight=weightFn(edge);var distance=vEntry.distance+weight;if(weight<0){throw new Error("dijkstra does not allow negative edge weights. "+"Bad edge: "+edge+" Weight: "+weight)}if(distance0){v=pq.removeMin();vEntry=results[v];if(vEntry.distance===Number.POSITIVE_INFINITY){break}edgeFn(v).forEach(updateNeighbors)}return results}},{"../data/priority-queue":45,"../lodash":49}],36:[function(require,module,exports){var _=require("../lodash");var tarjan=require("./tarjan");module.exports=findCycles;function findCycles(g){return _.filter(tarjan(g),function(cmpt){return cmpt.length>1||cmpt.length===1&&g.hasEdge(cmpt[0],cmpt[0])})}},{"../lodash":49,"./tarjan":43}],37:[function(require,module,exports){var _=require("../lodash");module.exports=floydWarshall;var DEFAULT_WEIGHT_FUNC=_.constant(1);function floydWarshall(g,weightFn,edgeFn){return runFloydWarshall(g,weightFn||DEFAULT_WEIGHT_FUNC,edgeFn||function(v){return g.outEdges(v)})}function runFloydWarshall(g,weightFn,edgeFn){var results={};var nodes=g.nodes();nodes.forEach(function(v){results[v]={};results[v][v]={distance:0};nodes.forEach(function(w){if(v!==w){results[v][w]={distance:Number.POSITIVE_INFINITY}}});edgeFn(v).forEach(function(edge){var w=edge.v===v?edge.w:edge.v;var d=weightFn(edge);results[v][w]={distance:d,predecessor:v}})});nodes.forEach(function(k){var rowK=results[k];nodes.forEach(function(i){var rowI=results[i];nodes.forEach(function(j){var ik=rowI[k];var kj=rowK[j];var ij=rowI[j];var altDistance=ik.distance+kj.distance;if(altDistance0){v=pq.removeMin();if(_.has(parents,v)){result.setEdge(v,parents[v])}else if(init){throw new Error("Input graph is not connected: "+g)}else{init=true}g.nodeEdges(v).forEach(updateNeighbors)}return result}},{"../data/priority-queue":45,"../graph":46,"../lodash":49}],43:[function(require,module,exports){var _=require("../lodash");module.exports=tarjan;function tarjan(g){var index=0;var stack=[];var visited={};// node id -> { onStack, lowlink, index } +var results=[];function dfs(v){var entry=visited[v]={onStack:true,lowlink:index,index:index++};stack.push(v);g.successors(v).forEach(function(w){if(!_.has(visited,w)){dfs(w);entry.lowlink=Math.min(entry.lowlink,visited[w].lowlink)}else if(visited[w].onStack){entry.lowlink=Math.min(entry.lowlink,visited[w].index)}});if(entry.lowlink===entry.index){var cmpt=[];var w;do{w=stack.pop();visited[w].onStack=false;cmpt.push(w)}while(v!==w);results.push(cmpt)}}g.nodes().forEach(function(v){if(!_.has(visited,v)){dfs(v)}});return results}},{"../lodash":49}],44:[function(require,module,exports){var _=require("../lodash");module.exports=topsort;topsort.CycleException=CycleException;function topsort(g){var visited={};var stack={};var results=[];function visit(node){if(_.has(stack,node)){throw new CycleException}if(!_.has(visited,node)){stack[node]=true;visited[node]=true;_.each(g.predecessors(node),visit);delete stack[node];results.push(node)}}_.each(g.sinks(),visit);if(_.size(visited)!==g.nodeCount()){throw new CycleException}return results}function CycleException(){}CycleException.prototype=new Error;// must be an instance of Error to pass testing +},{"../lodash":49}],45:[function(require,module,exports){var _=require("../lodash");module.exports=PriorityQueue; +/** + * A min-priority queue data structure. This algorithm is derived from Cormen, + * et al., "Introduction to Algorithms". The basic idea of a min-priority + * queue is that you can efficiently (in O(1) time) get the smallest key in + * the queue. Adding and removing elements takes O(log n) time. A key can + * have its priority decreased in O(log n) time. + */function PriorityQueue(){this._arr=[];this._keyIndices={}} +/** + * Returns the number of elements in the queue. Takes `O(1)` time. + */PriorityQueue.prototype.size=function(){return this._arr.length}; +/** + * Returns the keys that are in the queue. Takes `O(n)` time. + */PriorityQueue.prototype.keys=function(){return this._arr.map(function(x){return x.key})}; +/** + * Returns `true` if **key** is in the queue and `false` if not. + */PriorityQueue.prototype.has=function(key){return _.has(this._keyIndices,key)}; +/** + * Returns the priority for **key**. If **key** is not present in the queue + * then this function returns `undefined`. Takes `O(1)` time. + * + * @param {Object} key + */PriorityQueue.prototype.priority=function(key){var index=this._keyIndices[key];if(index!==undefined){return this._arr[index].priority}}; +/** + * Returns the key for the minimum element in this queue. If the queue is + * empty this function throws an Error. Takes `O(1)` time. + */PriorityQueue.prototype.min=function(){if(this.size()===0){throw new Error("Queue underflow")}return this._arr[0].key}; +/** + * Inserts a new key into the priority queue. If the key already exists in + * the queue this function returns `false`; otherwise it will return `true`. + * Takes `O(n)` time. + * + * @param {Object} key the key to add + * @param {Number} priority the initial priority for the key + */PriorityQueue.prototype.add=function(key,priority){var keyIndices=this._keyIndices;key=String(key);if(!_.has(keyIndices,key)){var arr=this._arr;var index=arr.length;keyIndices[key]=index;arr.push({key:key,priority:priority});this._decrease(index);return true}return false}; +/** + * Removes and returns the smallest key in the queue. Takes `O(log n)` time. + */PriorityQueue.prototype.removeMin=function(){this._swap(0,this._arr.length-1);var min=this._arr.pop();delete this._keyIndices[min.key];this._heapify(0);return min.key}; +/** + * Decreases the priority for **key** to **priority**. If the new priority is + * greater than the previous priority, this function will throw an Error. + * + * @param {Object} key the key for which to raise priority + * @param {Number} priority the new priority for the key + */PriorityQueue.prototype.decrease=function(key,priority){var index=this._keyIndices[key];if(priority>this._arr[index].priority){throw new Error("New priority is greater than current priority. "+"Key: "+key+" Old: "+this._arr[index].priority+" New: "+priority)}this._arr[index].priority=priority;this._decrease(index)};PriorityQueue.prototype._heapify=function(i){var arr=this._arr;var l=2*i;var r=l+1;var largest=i;if(l>1;if(arr[parent].priority label +this._nodes={};if(this._isCompound){ +// v -> parent +this._parent={}; +// v -> children +this._children={};this._children[GRAPH_NODE]={}} +// v -> edgeObj +this._in={}; +// u -> v -> Number +this._preds={}; +// v -> edgeObj +this._out={}; +// v -> w -> Number +this._sucs={}; +// e -> edgeObj +this._edgeObjs={}; +// e -> label +this._edgeLabels={}} +/* Number of nodes in the graph. Should only be changed by the implementation. */Graph.prototype._nodeCount=0; +/* Number of edges in the graph. Should only be changed by the implementation. */Graph.prototype._edgeCount=0; +/* === Graph functions ========= */Graph.prototype.isDirected=function(){return this._isDirected};Graph.prototype.isMultigraph=function(){return this._isMultigraph};Graph.prototype.isCompound=function(){return this._isCompound};Graph.prototype.setGraph=function(label){this._label=label;return this};Graph.prototype.graph=function(){return this._label}; +/* === Node functions ========== */Graph.prototype.setDefaultNodeLabel=function(newDefault){if(!_.isFunction(newDefault)){newDefault=_.constant(newDefault)}this._defaultNodeLabelFn=newDefault;return this};Graph.prototype.nodeCount=function(){return this._nodeCount};Graph.prototype.nodes=function(){return _.keys(this._nodes)};Graph.prototype.sources=function(){var self=this;return _.filter(this.nodes(),function(v){return _.isEmpty(self._in[v])})};Graph.prototype.sinks=function(){var self=this;return _.filter(this.nodes(),function(v){return _.isEmpty(self._out[v])})};Graph.prototype.setNodes=function(vs,value){var args=arguments;var self=this;_.each(vs,function(v){if(args.length>1){self.setNode(v,value)}else{self.setNode(v)}});return this};Graph.prototype.setNode=function(v,value){if(_.has(this._nodes,v)){if(arguments.length>1){this._nodes[v]=value}return this}this._nodes[v]=arguments.length>1?value:this._defaultNodeLabelFn(v);if(this._isCompound){this._parent[v]=GRAPH_NODE;this._children[v]={};this._children[GRAPH_NODE][v]=true}this._in[v]={};this._preds[v]={};this._out[v]={};this._sucs[v]={};++this._nodeCount;return this};Graph.prototype.node=function(v){return this._nodes[v]};Graph.prototype.hasNode=function(v){return _.has(this._nodes,v)};Graph.prototype.removeNode=function(v){var self=this;if(_.has(this._nodes,v)){var removeEdge=function(e){self.removeEdge(self._edgeObjs[e])};delete this._nodes[v];if(this._isCompound){this._removeFromParentsChildList(v);delete this._parent[v];_.each(this.children(v),function(child){self.setParent(child)});delete this._children[v]}_.each(_.keys(this._in[v]),removeEdge);delete this._in[v];delete this._preds[v];_.each(_.keys(this._out[v]),removeEdge);delete this._out[v];delete this._sucs[v];--this._nodeCount}return this};Graph.prototype.setParent=function(v,parent){if(!this._isCompound){throw new Error("Cannot set parent in a non-compound graph")}if(_.isUndefined(parent)){parent=GRAPH_NODE}else{ +// Coerce parent to string +parent+="";for(var ancestor=parent;!_.isUndefined(ancestor);ancestor=this.parent(ancestor)){if(ancestor===v){throw new Error("Setting "+parent+" as parent of "+v+" would create a cycle")}}this.setNode(parent)}this.setNode(v);this._removeFromParentsChildList(v);this._parent[v]=parent;this._children[parent][v]=true;return this};Graph.prototype._removeFromParentsChildList=function(v){delete this._children[this._parent[v]][v]};Graph.prototype.parent=function(v){if(this._isCompound){var parent=this._parent[v];if(parent!==GRAPH_NODE){return parent}}};Graph.prototype.children=function(v){if(_.isUndefined(v)){v=GRAPH_NODE}if(this._isCompound){var children=this._children[v];if(children){return _.keys(children)}}else if(v===GRAPH_NODE){return this.nodes()}else if(this.hasNode(v)){return[]}};Graph.prototype.predecessors=function(v){var predsV=this._preds[v];if(predsV){return _.keys(predsV)}};Graph.prototype.successors=function(v){var sucsV=this._sucs[v];if(sucsV){return _.keys(sucsV)}};Graph.prototype.neighbors=function(v){var preds=this.predecessors(v);if(preds){return _.union(preds,this.successors(v))}};Graph.prototype.isLeaf=function(v){var neighbors;if(this.isDirected()){neighbors=this.successors(v)}else{neighbors=this.neighbors(v)}return neighbors.length===0};Graph.prototype.filterNodes=function(filter){var copy=new this.constructor({directed:this._isDirected,multigraph:this._isMultigraph,compound:this._isCompound});copy.setGraph(this.graph());var self=this;_.each(this._nodes,function(value,v){if(filter(v)){copy.setNode(v,value)}});_.each(this._edgeObjs,function(e){if(copy.hasNode(e.v)&©.hasNode(e.w)){copy.setEdge(e,self.edge(e))}});var parents={};function findParent(v){var parent=self.parent(v);if(parent===undefined||copy.hasNode(parent)){parents[v]=parent;return parent}else if(parent in parents){return parents[parent]}else{return findParent(parent)}}if(this._isCompound){_.each(copy.nodes(),function(v){copy.setParent(v,findParent(v))})}return copy}; +/* === Edge functions ========== */Graph.prototype.setDefaultEdgeLabel=function(newDefault){if(!_.isFunction(newDefault)){newDefault=_.constant(newDefault)}this._defaultEdgeLabelFn=newDefault;return this};Graph.prototype.edgeCount=function(){return this._edgeCount};Graph.prototype.edges=function(){return _.values(this._edgeObjs)};Graph.prototype.setPath=function(vs,value){var self=this;var args=arguments;_.reduce(vs,function(v,w){if(args.length>1){self.setEdge(v,w,value)}else{self.setEdge(v,w)}return w});return this}; +/* + * setEdge(v, w, [value, [name]]) + * setEdge({ v, w, [name] }, [value]) + */Graph.prototype.setEdge=function(){var v,w,name,value;var valueSpecified=false;var arg0=arguments[0];if(typeof arg0==="object"&&arg0!==null&&"v"in arg0){v=arg0.v;w=arg0.w;name=arg0.name;if(arguments.length===2){value=arguments[1];valueSpecified=true}}else{v=arg0;w=arguments[1];name=arguments[3];if(arguments.length>2){value=arguments[2];valueSpecified=true}}v=""+v;w=""+w;if(!_.isUndefined(name)){name=""+name}var e=edgeArgsToId(this._isDirected,v,w,name);if(_.has(this._edgeLabels,e)){if(valueSpecified){this._edgeLabels[e]=value}return this}if(!_.isUndefined(name)&&!this._isMultigraph){throw new Error("Cannot set a named edge when isMultigraph = false")} +// It didn't exist, so we need to create it. +// First ensure the nodes exist. +this.setNode(v);this.setNode(w);this._edgeLabels[e]=valueSpecified?value:this._defaultEdgeLabelFn(v,w,name);var edgeObj=edgeArgsToObj(this._isDirected,v,w,name); +// Ensure we add undirected edges in a consistent way. +v=edgeObj.v;w=edgeObj.w;Object.freeze(edgeObj);this._edgeObjs[e]=edgeObj;incrementOrInitEntry(this._preds[w],v);incrementOrInitEntry(this._sucs[v],w);this._in[w][e]=edgeObj;this._out[v][e]=edgeObj;this._edgeCount++;return this};Graph.prototype.edge=function(v,w,name){var e=arguments.length===1?edgeObjToId(this._isDirected,arguments[0]):edgeArgsToId(this._isDirected,v,w,name);return this._edgeLabels[e]};Graph.prototype.hasEdge=function(v,w,name){var e=arguments.length===1?edgeObjToId(this._isDirected,arguments[0]):edgeArgsToId(this._isDirected,v,w,name);return _.has(this._edgeLabels,e)};Graph.prototype.removeEdge=function(v,w,name){var e=arguments.length===1?edgeObjToId(this._isDirected,arguments[0]):edgeArgsToId(this._isDirected,v,w,name);var edge=this._edgeObjs[e];if(edge){v=edge.v;w=edge.w;delete this._edgeLabels[e];delete this._edgeObjs[e];decrementOrRemoveEntry(this._preds[w],v);decrementOrRemoveEntry(this._sucs[v],w);delete this._in[w][e];delete this._out[v][e];this._edgeCount--}return this};Graph.prototype.inEdges=function(v,u){var inV=this._in[v];if(inV){var edges=_.values(inV);if(!u){return edges}return _.filter(edges,function(edge){return edge.v===u})}};Graph.prototype.outEdges=function(v,w){var outV=this._out[v];if(outV){var edges=_.values(outV);if(!w){return edges}return _.filter(edges,function(edge){return edge.w===w})}};Graph.prototype.nodeEdges=function(v,w){var inEdges=this.inEdges(v,w);if(inEdges){return inEdges.concat(this.outEdges(v,w))}};function incrementOrInitEntry(map,k){if(map[k]){map[k]++}else{map[k]=1}}function decrementOrRemoveEntry(map,k){if(!--map[k]){delete map[k]}}function edgeArgsToId(isDirected,v_,w_,name){var v=""+v_;var w=""+w_;if(!isDirected&&v>w){var tmp=v;v=w;w=tmp}return v+EDGE_KEY_DELIM+w+EDGE_KEY_DELIM+(_.isUndefined(name)?DEFAULT_EDGE_NAME:name)}function edgeArgsToObj(isDirected,v_,w_,name){var v=""+v_;var w=""+w_;if(!isDirected&&v>w){var tmp=v;v=w;w=tmp}var edgeObj={v:v,w:w};if(name){edgeObj.name=name}return edgeObj}function edgeObjToId(isDirected,edgeObj){return edgeArgsToId(isDirected,edgeObj.v,edgeObj.w,edgeObj.name)}},{"./lodash":49}],47:[function(require,module,exports){ +// Includes only the "core" of graphlib +module.exports={Graph:require("./graph"),version:require("./version")}},{"./graph":46,"./version":50}],48:[function(require,module,exports){var _=require("./lodash");var Graph=require("./graph");module.exports={write:write,read:read};function write(g){var json={options:{directed:g.isDirected(),multigraph:g.isMultigraph(),compound:g.isCompound()},nodes:writeNodes(g),edges:writeEdges(g)};if(!_.isUndefined(g.graph())){json.value=_.clone(g.graph())}return json}function writeNodes(g){return _.map(g.nodes(),function(v){var nodeValue=g.node(v);var parent=g.parent(v);var node={v:v};if(!_.isUndefined(nodeValue)){node.value=nodeValue}if(!_.isUndefined(parent)){node.parent=parent}return node})}function writeEdges(g){return _.map(g.edges(),function(e){var edgeValue=g.edge(e);var edge={v:e.v,w:e.w};if(!_.isUndefined(e.name)){edge.name=e.name}if(!_.isUndefined(edgeValue)){edge.value=edgeValue}return edge})}function read(json){var g=new Graph(json.options).setGraph(json.value);_.each(json.nodes,function(entry){g.setNode(entry.v,entry.value);if(entry.parent){g.setParent(entry.v,entry.parent)}});_.each(json.edges,function(entry){g.setEdge({v:entry.v,w:entry.w,name:entry.name},entry.value)});return g}},{"./graph":46,"./lodash":49}],49:[function(require,module,exports){ +/* global window */ +var lodash;if(typeof require==="function"){try{lodash={clone:require("lodash/clone"),constant:require("lodash/constant"),each:require("lodash/each"),filter:require("lodash/filter"),has:require("lodash/has"),isArray:require("lodash/isArray"),isEmpty:require("lodash/isEmpty"),isFunction:require("lodash/isFunction"),isUndefined:require("lodash/isUndefined"),keys:require("lodash/keys"),map:require("lodash/map"),reduce:require("lodash/reduce"),size:require("lodash/size"),transform:require("lodash/transform"),union:require("lodash/union"),values:require("lodash/values")}}catch(e){ +// continue regardless of error +}}if(!lodash){lodash=window._}module.exports=lodash},{"lodash/clone":226,"lodash/constant":228,"lodash/each":230,"lodash/filter":232,"lodash/has":239,"lodash/isArray":243,"lodash/isEmpty":247,"lodash/isFunction":248,"lodash/isUndefined":258,"lodash/keys":259,"lodash/map":262,"lodash/reduce":274,"lodash/size":275,"lodash/transform":284,"lodash/union":285,"lodash/values":287}],50:[function(require,module,exports){module.exports="2.1.8"},{}],51:[function(require,module,exports){var getNative=require("./_getNative"),root=require("./_root"); +/* Built-in method references that are verified to be native. */var DataView=getNative(root,"DataView");module.exports=DataView},{"./_getNative":163,"./_root":208}],52:[function(require,module,exports){var hashClear=require("./_hashClear"),hashDelete=require("./_hashDelete"),hashGet=require("./_hashGet"),hashHas=require("./_hashHas"),hashSet=require("./_hashSet"); +/** + * Creates a hash object. + * + * @private + * @constructor + * @param {Array} [entries] The key-value pairs to cache. + */function Hash(entries){var index=-1,length=entries==null?0:entries.length;this.clear();while(++index-1}module.exports=arrayIncludes},{"./_baseIndexOf":95}],67:[function(require,module,exports){ +/** + * This function is like `arrayIncludes` except that it accepts a comparator. + * + * @private + * @param {Array} [array] The array to inspect. + * @param {*} target The value to search for. + * @param {Function} comparator The comparator invoked per element. + * @returns {boolean} Returns `true` if `target` is found, else `false`. + */ +function arrayIncludesWith(array,value,comparator){var index=-1,length=array==null?0:array.length;while(++index0&&predicate(value)){if(depth>1){ +// Recursively flatten arrays (susceptible to call stack limits). +baseFlatten(value,depth-1,predicate,isStrict,result)}else{arrayPush(result,value)}}else if(!isStrict){result[result.length]=value}}return result}module.exports=baseFlatten},{"./_arrayPush":70,"./_isFlattenable":180}],87:[function(require,module,exports){var createBaseFor=require("./_createBaseFor"); +/** + * The base implementation of `baseForOwn` which iterates over `object` + * properties returned by `keysFunc` and invokes `iteratee` for each property. + * Iteratee functions may exit iteration early by explicitly returning `false`. + * + * @private + * @param {Object} object The object to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @param {Function} keysFunc The function to get the keys of `object`. + * @returns {Object} Returns `object`. + */var baseFor=createBaseFor();module.exports=baseFor},{"./_createBaseFor":149}],88:[function(require,module,exports){var baseFor=require("./_baseFor"),keys=require("./keys"); +/** + * The base implementation of `_.forOwn` without support for iteratee shorthands. + * + * @private + * @param {Object} object The object to iterate over. + * @param {Function} iteratee The function invoked per iteration. + * @returns {Object} Returns `object`. + */function baseForOwn(object,iteratee){return object&&baseFor(object,iteratee,keys)}module.exports=baseForOwn},{"./_baseFor":87,"./keys":259}],89:[function(require,module,exports){var castPath=require("./_castPath"),toKey=require("./_toKey"); +/** + * The base implementation of `_.get` without support for default values. + * + * @private + * @param {Object} object The object to query. + * @param {Array|string} path The path of the property to get. + * @returns {*} Returns the resolved value. + */function baseGet(object,path){path=castPath(path,object);var index=0,length=path.length;while(object!=null&&indexother}module.exports=baseGt},{}],93:[function(require,module,exports){ +/** Used for built-in method references. */ +var objectProto=Object.prototype; +/** Used to check objects for own properties. */var hasOwnProperty=objectProto.hasOwnProperty; +/** + * The base implementation of `_.has` without support for deep paths. + * + * @private + * @param {Object} [object] The object to query. + * @param {Array|string} key The key to check. + * @returns {boolean} Returns `true` if `key` exists, else `false`. + */function baseHas(object,key){return object!=null&&hasOwnProperty.call(object,key)}module.exports=baseHas},{}],94:[function(require,module,exports){ +/** + * The base implementation of `_.hasIn` without support for deep paths. + * + * @private + * @param {Object} [object] The object to query. + * @param {Array|string} key The key to check. + * @returns {boolean} Returns `true` if `key` exists, else `false`. + */ +function baseHasIn(object,key){return object!=null&&key in Object(object)}module.exports=baseHasIn},{}],95:[function(require,module,exports){var baseFindIndex=require("./_baseFindIndex"),baseIsNaN=require("./_baseIsNaN"),strictIndexOf=require("./_strictIndexOf"); +/** + * The base implementation of `_.indexOf` without `fromIndex` bounds checks. + * + * @private + * @param {Array} array The array to inspect. + * @param {*} value The value to search for. + * @param {number} fromIndex The index to search from. + * @returns {number} Returns the index of the matched value, else `-1`. + */function baseIndexOf(array,value,fromIndex){return value===value?strictIndexOf(array,value,fromIndex):baseFindIndex(array,baseIsNaN,fromIndex)}module.exports=baseIndexOf},{"./_baseFindIndex":85,"./_baseIsNaN":101,"./_strictIndexOf":220}],96:[function(require,module,exports){var baseGetTag=require("./_baseGetTag"),isObjectLike=require("./isObjectLike"); +/** `Object#toString` result references. */var argsTag="[object Arguments]"; +/** + * The base implementation of `_.isArguments`. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an `arguments` object, + */function baseIsArguments(value){return isObjectLike(value)&&baseGetTag(value)==argsTag}module.exports=baseIsArguments},{"./_baseGetTag":91,"./isObjectLike":252}],97:[function(require,module,exports){var baseIsEqualDeep=require("./_baseIsEqualDeep"),isObjectLike=require("./isObjectLike"); +/** + * The base implementation of `_.isEqual` which supports partial comparisons + * and tracks traversed objects. + * + * @private + * @param {*} value The value to compare. + * @param {*} other The other value to compare. + * @param {boolean} bitmask The bitmask flags. + * 1 - Unordered comparison + * 2 - Partial comparison + * @param {Function} [customizer] The function to customize comparisons. + * @param {Object} [stack] Tracks traversed `value` and `other` objects. + * @returns {boolean} Returns `true` if the values are equivalent, else `false`. + */function baseIsEqual(value,other,bitmask,customizer,stack){if(value===other){return true}if(value==null||other==null||!isObjectLike(value)&&!isObjectLike(other)){return value!==value&&other!==other}return baseIsEqualDeep(value,other,bitmask,customizer,baseIsEqual,stack)}module.exports=baseIsEqual},{"./_baseIsEqualDeep":98,"./isObjectLike":252}],98:[function(require,module,exports){var Stack=require("./_Stack"),equalArrays=require("./_equalArrays"),equalByTag=require("./_equalByTag"),equalObjects=require("./_equalObjects"),getTag=require("./_getTag"),isArray=require("./isArray"),isBuffer=require("./isBuffer"),isTypedArray=require("./isTypedArray"); +/** Used to compose bitmasks for value comparisons. */var COMPARE_PARTIAL_FLAG=1; +/** `Object#toString` result references. */var argsTag="[object Arguments]",arrayTag="[object Array]",objectTag="[object Object]"; +/** Used for built-in method references. */var objectProto=Object.prototype; +/** Used to check objects for own properties. */var hasOwnProperty=objectProto.hasOwnProperty; +/** + * A specialized version of `baseIsEqual` for arrays and objects which performs + * deep comparisons and tracks traversed objects enabling objects with circular + * references to be compared. + * + * @private + * @param {Object} object The object to compare. + * @param {Object} other The other object to compare. + * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details. + * @param {Function} customizer The function to customize comparisons. + * @param {Function} equalFunc The function to determine equivalents of values. + * @param {Object} [stack] Tracks traversed `object` and `other` objects. + * @returns {boolean} Returns `true` if the objects are equivalent, else `false`. + */function baseIsEqualDeep(object,other,bitmask,customizer,equalFunc,stack){var objIsArr=isArray(object),othIsArr=isArray(other),objTag=objIsArr?arrayTag:getTag(object),othTag=othIsArr?arrayTag:getTag(other);objTag=objTag==argsTag?objectTag:objTag;othTag=othTag==argsTag?objectTag:othTag;var objIsObj=objTag==objectTag,othIsObj=othTag==objectTag,isSameTag=objTag==othTag;if(isSameTag&&isBuffer(object)){if(!isBuffer(other)){return false}objIsArr=true;objIsObj=false}if(isSameTag&&!objIsObj){stack||(stack=new Stack);return objIsArr||isTypedArray(object)?equalArrays(object,other,bitmask,customizer,equalFunc,stack):equalByTag(object,other,objTag,bitmask,customizer,equalFunc,stack)}if(!(bitmask&COMPARE_PARTIAL_FLAG)){var objIsWrapped=objIsObj&&hasOwnProperty.call(object,"__wrapped__"),othIsWrapped=othIsObj&&hasOwnProperty.call(other,"__wrapped__");if(objIsWrapped||othIsWrapped){var objUnwrapped=objIsWrapped?object.value():object,othUnwrapped=othIsWrapped?other.value():other;stack||(stack=new Stack);return equalFunc(objUnwrapped,othUnwrapped,bitmask,customizer,stack)}}if(!isSameTag){return false}stack||(stack=new Stack);return equalObjects(object,other,bitmask,customizer,equalFunc,stack)}module.exports=baseIsEqualDeep},{"./_Stack":59,"./_equalArrays":154,"./_equalByTag":155,"./_equalObjects":156,"./_getTag":168,"./isArray":243,"./isBuffer":246,"./isTypedArray":257}],99:[function(require,module,exports){var getTag=require("./_getTag"),isObjectLike=require("./isObjectLike"); +/** `Object#toString` result references. */var mapTag="[object Map]"; +/** + * The base implementation of `_.isMap` without Node.js optimizations. + * + * @private + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a map, else `false`. + */function baseIsMap(value){return isObjectLike(value)&&getTag(value)==mapTag}module.exports=baseIsMap},{"./_getTag":168,"./isObjectLike":252}],100:[function(require,module,exports){var Stack=require("./_Stack"),baseIsEqual=require("./_baseIsEqual"); +/** Used to compose bitmasks for value comparisons. */var COMPARE_PARTIAL_FLAG=1,COMPARE_UNORDERED_FLAG=2; +/** + * The base implementation of `_.isMatch` without support for iteratee shorthands. + * + * @private + * @param {Object} object The object to inspect. + * @param {Object} source The object of property values to match. + * @param {Array} matchData The property names, values, and compare flags to match. + * @param {Function} [customizer] The function to customize comparisons. + * @returns {boolean} Returns `true` if `object` is a match, else `false`. + */function baseIsMatch(object,source,matchData,customizer){var index=matchData.length,length=index,noCustomizer=!customizer;if(object==null){return!length}object=Object(object);while(index--){var data=matchData[index];if(noCustomizer&&data[2]?data[1]!==object[data[0]]:!(data[0]in object)){return false}}while(++index=LARGE_ARRAY_SIZE){var set=iteratee?null:createSet(array);if(set){return setToArray(set)}isCommon=false;includes=cacheHas;seen=new SetCache}else{seen=iteratee?[]:result}outer:while(++indexother||valIsSymbol&&othIsDefined&&othIsReflexive&&!othIsNull&&!othIsSymbol||valIsNull&&othIsDefined&&othIsReflexive||!valIsDefined&&othIsReflexive||!valIsReflexive){return 1}if(!valIsNull&&!valIsSymbol&&!othIsSymbol&&value=ordersLength){return result}var order=orders[index];return result*(order=="desc"?-1:1)}} +// Fixes an `Array#sort` bug in the JS engine embedded in Adobe applications +// that causes it, under certain circumstances, to provide the same value for +// `object` and `other`. See https://github.com/jashkenas/underscore/pull/1247 +// for more details. +// +// This also ensures a stable sort in V8 and other engines. +// See https://bugs.chromium.org/p/v8/issues/detail?id=90 for more details. +return object.index-other.index}module.exports=compareMultiple},{"./_compareAscending":140}],142:[function(require,module,exports){ +/** + * Copies the values of `source` to `array`. + * + * @private + * @param {Array} source The array to copy values from. + * @param {Array} [array=[]] The array to copy values to. + * @returns {Array} Returns `array`. + */ +function copyArray(source,array){var index=-1,length=source.length;array||(array=Array(length));while(++index1?sources[length-1]:undefined,guard=length>2?sources[2]:undefined;customizer=assigner.length>3&&typeof customizer=="function"?(length--,customizer):undefined;if(guard&&isIterateeCall(sources[0],sources[1],guard)){customizer=length<3?undefined:customizer;length=1}object=Object(object);while(++index-1?iterable[iteratee?collection[index]:index]:undefined}}module.exports=createFind},{"./_baseIteratee":105,"./isArrayLike":244,"./keys":259}],151:[function(require,module,exports){var baseRange=require("./_baseRange"),isIterateeCall=require("./_isIterateeCall"),toFinite=require("./toFinite"); +/** + * Creates a `_.range` or `_.rangeRight` function. + * + * @private + * @param {boolean} [fromRight] Specify iterating from right to left. + * @returns {Function} Returns the new range function. + */function createRange(fromRight){return function(start,end,step){if(step&&typeof step!="number"&&isIterateeCall(start,end,step)){end=step=undefined} +// Ensure the sign of `-0` is preserved. +start=toFinite(start);if(end===undefined){end=start;start=0}else{end=toFinite(end)}step=step===undefined?startarrLength)){return false} +// Assume cyclic values are equal. +var stacked=stack.get(array);if(stacked&&stack.get(other)){return stacked==other}var index=-1,result=true,seen=bitmask&COMPARE_UNORDERED_FLAG?new SetCache:undefined;stack.set(array,other);stack.set(other,array); +// Ignore non-index properties. +while(++index-1&&value%1==0&&value-1}module.exports=listCacheHas},{"./_assocIndexOf":76}],192:[function(require,module,exports){var assocIndexOf=require("./_assocIndexOf"); +/** + * Sets the list cache `key` to `value`. + * + * @private + * @name set + * @memberOf ListCache + * @param {string} key The key of the value to set. + * @param {*} value The value to set. + * @returns {Object} Returns the list cache instance. + */function listCacheSet(key,value){var data=this.__data__,index=assocIndexOf(data,key);if(index<0){++this.size;data.push([key,value])}else{data[index][1]=value}return this}module.exports=listCacheSet},{"./_assocIndexOf":76}],193:[function(require,module,exports){var Hash=require("./_Hash"),ListCache=require("./_ListCache"),Map=require("./_Map"); +/** + * Removes all key-value entries from the map. + * + * @private + * @name clear + * @memberOf MapCache + */function mapCacheClear(){this.size=0;this.__data__={hash:new Hash,map:new(Map||ListCache),string:new Hash}}module.exports=mapCacheClear},{"./_Hash":52,"./_ListCache":53,"./_Map":54}],194:[function(require,module,exports){var getMapData=require("./_getMapData"); +/** + * Removes `key` and its value from the map. + * + * @private + * @name delete + * @memberOf MapCache + * @param {string} key The key of the value to remove. + * @returns {boolean} Returns `true` if the entry was removed, else `false`. + */function mapCacheDelete(key){var result=getMapData(this,key)["delete"](key);this.size-=result?1:0;return result}module.exports=mapCacheDelete},{"./_getMapData":161}],195:[function(require,module,exports){var getMapData=require("./_getMapData"); +/** + * Gets the map value for `key`. + * + * @private + * @name get + * @memberOf MapCache + * @param {string} key The key of the value to get. + * @returns {*} Returns the entry value. + */function mapCacheGet(key){return getMapData(this,key).get(key)}module.exports=mapCacheGet},{"./_getMapData":161}],196:[function(require,module,exports){var getMapData=require("./_getMapData"); +/** + * Checks if a map value for `key` exists. + * + * @private + * @name has + * @memberOf MapCache + * @param {string} key The key of the entry to check. + * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. + */function mapCacheHas(key){return getMapData(this,key).has(key)}module.exports=mapCacheHas},{"./_getMapData":161}],197:[function(require,module,exports){var getMapData=require("./_getMapData"); +/** + * Sets the map `key` to `value`. + * + * @private + * @name set + * @memberOf MapCache + * @param {string} key The key of the value to set. + * @param {*} value The value to set. + * @returns {Object} Returns the map cache instance. + */function mapCacheSet(key,value){var data=getMapData(this,key),size=data.size;data.set(key,value);this.size+=data.size==size?0:1;return this}module.exports=mapCacheSet},{"./_getMapData":161}],198:[function(require,module,exports){ +/** + * Converts `map` to its key-value pairs. + * + * @private + * @param {Object} map The map to convert. + * @returns {Array} Returns the key-value pairs. + */ +function mapToArray(map){var index=-1,result=Array(map.size);map.forEach(function(value,key){result[++index]=[key,value]});return result}module.exports=mapToArray},{}],199:[function(require,module,exports){ +/** + * A specialized version of `matchesProperty` for source values suitable + * for strict equality comparisons, i.e. `===`. + * + * @private + * @param {string} key The key of the property to get. + * @param {*} srcValue The value to match. + * @returns {Function} Returns the new spec function. + */ +function matchesStrictComparable(key,srcValue){return function(object){if(object==null){return false}return object[key]===srcValue&&(srcValue!==undefined||key in Object(object))}}module.exports=matchesStrictComparable},{}],200:[function(require,module,exports){var memoize=require("./memoize"); +/** Used as the maximum memoize cache size. */var MAX_MEMOIZE_SIZE=500; +/** + * A specialized version of `_.memoize` which clears the memoized function's + * cache when it exceeds `MAX_MEMOIZE_SIZE`. + * + * @private + * @param {Function} func The function to have its output memoized. + * @returns {Function} Returns the new memoized function. + */function memoizeCapped(func){var result=memoize(func,function(key){if(cache.size===MAX_MEMOIZE_SIZE){cache.clear()}return key});var cache=result.cache;return result}module.exports=memoizeCapped},{"./memoize":265}],201:[function(require,module,exports){var getNative=require("./_getNative"); +/* Built-in method references that are verified to be native. */var nativeCreate=getNative(Object,"create");module.exports=nativeCreate},{"./_getNative":163}],202:[function(require,module,exports){var overArg=require("./_overArg"); +/* Built-in method references for those with the same name as other `lodash` methods. */var nativeKeys=overArg(Object.keys,Object);module.exports=nativeKeys},{"./_overArg":206}],203:[function(require,module,exports){ +/** + * This function is like + * [`Object.keys`](http://ecma-international.org/ecma-262/7.0/#sec-object.keys) + * except that it includes inherited enumerable properties. + * + * @private + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property names. + */ +function nativeKeysIn(object){var result=[];if(object!=null){for(var key in Object(object)){result.push(key)}}return result}module.exports=nativeKeysIn},{}],204:[function(require,module,exports){var freeGlobal=require("./_freeGlobal"); +/** Detect free variable `exports`. */var freeExports=typeof exports=="object"&&exports&&!exports.nodeType&&exports; +/** Detect free variable `module`. */var freeModule=freeExports&&typeof module=="object"&&module&&!module.nodeType&&module; +/** Detect the popular CommonJS extension `module.exports`. */var moduleExports=freeModule&&freeModule.exports===freeExports; +/** Detect free variable `process` from Node.js. */var freeProcess=moduleExports&&freeGlobal.process; +/** Used to access faster Node.js helpers. */var nodeUtil=function(){try{ +// Use `util.types` for Node.js 10+. +var types=freeModule&&freeModule.require&&freeModule.require("util").types;if(types){return types} +// Legacy `process.binding('util')` for Node.js < 10. +return freeProcess&&freeProcess.binding&&freeProcess.binding("util")}catch(e){}}();module.exports=nodeUtil},{"./_freeGlobal":158}],205:[function(require,module,exports){ +/** Used for built-in method references. */ +var objectProto=Object.prototype; +/** + * Used to resolve the + * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring) + * of values. + */var nativeObjectToString=objectProto.toString; +/** + * Converts `value` to a string using `Object.prototype.toString`. + * + * @private + * @param {*} value The value to convert. + * @returns {string} Returns the converted string. + */function objectToString(value){return nativeObjectToString.call(value)}module.exports=objectToString},{}],206:[function(require,module,exports){ +/** + * Creates a unary function that invokes `func` with its argument transformed. + * + * @private + * @param {Function} func The function to wrap. + * @param {Function} transform The argument transform. + * @returns {Function} Returns the new function. + */ +function overArg(func,transform){return function(arg){return func(transform(arg))}}module.exports=overArg},{}],207:[function(require,module,exports){var apply=require("./_apply"); +/* Built-in method references for those with the same name as other `lodash` methods. */var nativeMax=Math.max; +/** + * A specialized version of `baseRest` which transforms the rest array. + * + * @private + * @param {Function} func The function to apply a rest parameter to. + * @param {number} [start=func.length-1] The start position of the rest parameter. + * @param {Function} transform The rest array transform. + * @returns {Function} Returns the new function. + */function overRest(func,start,transform){start=nativeMax(start===undefined?func.length-1:start,0);return function(){var args=arguments,index=-1,length=nativeMax(args.length-start,0),array=Array(length);while(++index0){if(++count>=HOT_COUNT){return arguments[0]}}else{count=0}return func.apply(undefined,arguments)}}module.exports=shortOut},{}],215:[function(require,module,exports){var ListCache=require("./_ListCache"); +/** + * Removes all key-value entries from the stack. + * + * @private + * @name clear + * @memberOf Stack + */function stackClear(){this.__data__=new ListCache;this.size=0}module.exports=stackClear},{"./_ListCache":53}],216:[function(require,module,exports){ +/** + * Removes `key` and its value from the stack. + * + * @private + * @name delete + * @memberOf Stack + * @param {string} key The key of the value to remove. + * @returns {boolean} Returns `true` if the entry was removed, else `false`. + */ +function stackDelete(key){var data=this.__data__,result=data["delete"](key);this.size=data.size;return result}module.exports=stackDelete},{}],217:[function(require,module,exports){ +/** + * Gets the stack value for `key`. + * + * @private + * @name get + * @memberOf Stack + * @param {string} key The key of the value to get. + * @returns {*} Returns the entry value. + */ +function stackGet(key){return this.__data__.get(key)}module.exports=stackGet},{}],218:[function(require,module,exports){ +/** + * Checks if a stack value for `key` exists. + * + * @private + * @name has + * @memberOf Stack + * @param {string} key The key of the entry to check. + * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`. + */ +function stackHas(key){return this.__data__.has(key)}module.exports=stackHas},{}],219:[function(require,module,exports){var ListCache=require("./_ListCache"),Map=require("./_Map"),MapCache=require("./_MapCache"); +/** Used as the size to enable large array optimizations. */var LARGE_ARRAY_SIZE=200; +/** + * Sets the stack `key` to `value`. + * + * @private + * @name set + * @memberOf Stack + * @param {string} key The key of the value to set. + * @param {*} value The value to set. + * @returns {Object} Returns the stack cache instance. + */function stackSet(key,value){var data=this.__data__;if(data instanceof ListCache){var pairs=data.__data__;if(!Map||pairs.length true + */function clone(value){return baseClone(value,CLONE_SYMBOLS_FLAG)}module.exports=clone},{"./_baseClone":80}],227:[function(require,module,exports){var baseClone=require("./_baseClone"); +/** Used to compose bitmasks for cloning. */var CLONE_DEEP_FLAG=1,CLONE_SYMBOLS_FLAG=4; +/** + * This method is like `_.clone` except that it recursively clones `value`. + * + * @static + * @memberOf _ + * @since 1.0.0 + * @category Lang + * @param {*} value The value to recursively clone. + * @returns {*} Returns the deep cloned value. + * @see _.clone + * @example + * + * var objects = [{ 'a': 1 }, { 'b': 2 }]; + * + * var deep = _.cloneDeep(objects); + * console.log(deep[0] === objects[0]); + * // => false + */function cloneDeep(value){return baseClone(value,CLONE_DEEP_FLAG|CLONE_SYMBOLS_FLAG)}module.exports=cloneDeep},{"./_baseClone":80}],228:[function(require,module,exports){ +/** + * Creates a function that returns `value`. + * + * @static + * @memberOf _ + * @since 2.4.0 + * @category Util + * @param {*} value The value to return from the new function. + * @returns {Function} Returns the new constant function. + * @example + * + * var objects = _.times(2, _.constant({ 'a': 1 })); + * + * console.log(objects); + * // => [{ 'a': 1 }, { 'a': 1 }] + * + * console.log(objects[0] === objects[1]); + * // => true + */ +function constant(value){return function(){return value}}module.exports=constant},{}],229:[function(require,module,exports){var baseRest=require("./_baseRest"),eq=require("./eq"),isIterateeCall=require("./_isIterateeCall"),keysIn=require("./keysIn"); +/** Used for built-in method references. */var objectProto=Object.prototype; +/** Used to check objects for own properties. */var hasOwnProperty=objectProto.hasOwnProperty; +/** + * Assigns own and inherited enumerable string keyed properties of source + * objects to the destination object for all destination properties that + * resolve to `undefined`. Source objects are applied from left to right. + * Once a property is set, additional values of the same property are ignored. + * + * **Note:** This method mutates `object`. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The destination object. + * @param {...Object} [sources] The source objects. + * @returns {Object} Returns `object`. + * @see _.defaultsDeep + * @example + * + * _.defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 }); + * // => { 'a': 1, 'b': 2 } + */var defaults=baseRest(function(object,sources){object=Object(object);var index=-1;var length=sources.length;var guard=length>2?sources[2]:undefined;if(guard&&isIterateeCall(sources[0],sources[1],guard)){length=1}while(++index true + * + * _.eq(object, other); + * // => false + * + * _.eq('a', 'a'); + * // => true + * + * _.eq('a', Object('a')); + * // => false + * + * _.eq(NaN, NaN); + * // => true + */ +function eq(value,other){return value===other||value!==value&&other!==other}module.exports=eq},{}],232:[function(require,module,exports){var arrayFilter=require("./_arrayFilter"),baseFilter=require("./_baseFilter"),baseIteratee=require("./_baseIteratee"),isArray=require("./isArray"); +/** + * Iterates over elements of `collection`, returning an array of all elements + * `predicate` returns truthy for. The predicate is invoked with three + * arguments: (value, index|key, collection). + * + * **Note:** Unlike `_.remove`, this method returns a new array. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @returns {Array} Returns the new filtered array. + * @see _.reject + * @example + * + * var users = [ + * { 'user': 'barney', 'age': 36, 'active': true }, + * { 'user': 'fred', 'age': 40, 'active': false } + * ]; + * + * _.filter(users, function(o) { return !o.active; }); + * // => objects for ['fred'] + * + * // The `_.matches` iteratee shorthand. + * _.filter(users, { 'age': 36, 'active': true }); + * // => objects for ['barney'] + * + * // The `_.matchesProperty` iteratee shorthand. + * _.filter(users, ['active', false]); + * // => objects for ['fred'] + * + * // The `_.property` iteratee shorthand. + * _.filter(users, 'active'); + * // => objects for ['barney'] + */function filter(collection,predicate){var func=isArray(collection)?arrayFilter:baseFilter;return func(collection,baseIteratee(predicate,3))}module.exports=filter},{"./_arrayFilter":65,"./_baseFilter":84,"./_baseIteratee":105,"./isArray":243}],233:[function(require,module,exports){var createFind=require("./_createFind"),findIndex=require("./findIndex"); +/** + * Iterates over elements of `collection`, returning the first element + * `predicate` returns truthy for. The predicate is invoked with three + * arguments: (value, index|key, collection). + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to inspect. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @param {number} [fromIndex=0] The index to search from. + * @returns {*} Returns the matched element, else `undefined`. + * @example + * + * var users = [ + * { 'user': 'barney', 'age': 36, 'active': true }, + * { 'user': 'fred', 'age': 40, 'active': false }, + * { 'user': 'pebbles', 'age': 1, 'active': true } + * ]; + * + * _.find(users, function(o) { return o.age < 40; }); + * // => object for 'barney' + * + * // The `_.matches` iteratee shorthand. + * _.find(users, { 'age': 1, 'active': true }); + * // => object for 'pebbles' + * + * // The `_.matchesProperty` iteratee shorthand. + * _.find(users, ['active', false]); + * // => object for 'fred' + * + * // The `_.property` iteratee shorthand. + * _.find(users, 'active'); + * // => object for 'barney' + */var find=createFind(findIndex);module.exports=find},{"./_createFind":150,"./findIndex":234}],234:[function(require,module,exports){var baseFindIndex=require("./_baseFindIndex"),baseIteratee=require("./_baseIteratee"),toInteger=require("./toInteger"); +/* Built-in method references for those with the same name as other `lodash` methods. */var nativeMax=Math.max; +/** + * This method is like `_.find` except that it returns the index of the first + * element `predicate` returns truthy for instead of the element itself. + * + * @static + * @memberOf _ + * @since 1.1.0 + * @category Array + * @param {Array} array The array to inspect. + * @param {Function} [predicate=_.identity] The function invoked per iteration. + * @param {number} [fromIndex=0] The index to search from. + * @returns {number} Returns the index of the found element, else `-1`. + * @example + * + * var users = [ + * { 'user': 'barney', 'active': false }, + * { 'user': 'fred', 'active': false }, + * { 'user': 'pebbles', 'active': true } + * ]; + * + * _.findIndex(users, function(o) { return o.user == 'barney'; }); + * // => 0 + * + * // The `_.matches` iteratee shorthand. + * _.findIndex(users, { 'user': 'fred', 'active': false }); + * // => 1 + * + * // The `_.matchesProperty` iteratee shorthand. + * _.findIndex(users, ['active', false]); + * // => 0 + * + * // The `_.property` iteratee shorthand. + * _.findIndex(users, 'active'); + * // => 2 + */function findIndex(array,predicate,fromIndex){var length=array==null?0:array.length;if(!length){return-1}var index=fromIndex==null?0:toInteger(fromIndex);if(index<0){index=nativeMax(length+index,0)}return baseFindIndex(array,baseIteratee(predicate,3),index)}module.exports=findIndex},{"./_baseFindIndex":85,"./_baseIteratee":105,"./toInteger":280}],235:[function(require,module,exports){var baseFlatten=require("./_baseFlatten"); +/** + * Flattens `array` a single level deep. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to flatten. + * @returns {Array} Returns the new flattened array. + * @example + * + * _.flatten([1, [2, [3, [4]], 5]]); + * // => [1, 2, [3, [4]], 5] + */function flatten(array){var length=array==null?0:array.length;return length?baseFlatten(array,1):[]}module.exports=flatten},{"./_baseFlatten":86}],236:[function(require,module,exports){var arrayEach=require("./_arrayEach"),baseEach=require("./_baseEach"),castFunction=require("./_castFunction"),isArray=require("./isArray"); +/** + * Iterates over elements of `collection` and invokes `iteratee` for each element. + * The iteratee is invoked with three arguments: (value, index|key, collection). + * Iteratee functions may exit iteration early by explicitly returning `false`. + * + * **Note:** As with other "Collections" methods, objects with a "length" + * property are iterated like arrays. To avoid this behavior use `_.forIn` + * or `_.forOwn` for object iteration. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @alias each + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Array|Object} Returns `collection`. + * @see _.forEachRight + * @example + * + * _.forEach([1, 2], function(value) { + * console.log(value); + * }); + * // => Logs `1` then `2`. + * + * _.forEach({ 'a': 1, 'b': 2 }, function(value, key) { + * console.log(key); + * }); + * // => Logs 'a' then 'b' (iteration order is not guaranteed). + */function forEach(collection,iteratee){var func=isArray(collection)?arrayEach:baseEach;return func(collection,castFunction(iteratee))}module.exports=forEach},{"./_arrayEach":64,"./_baseEach":82,"./_castFunction":132,"./isArray":243}],237:[function(require,module,exports){var baseFor=require("./_baseFor"),castFunction=require("./_castFunction"),keysIn=require("./keysIn"); +/** + * Iterates over own and inherited enumerable string keyed properties of an + * object and invokes `iteratee` for each property. The iteratee is invoked + * with three arguments: (value, key, object). Iteratee functions may exit + * iteration early by explicitly returning `false`. + * + * @static + * @memberOf _ + * @since 0.3.0 + * @category Object + * @param {Object} object The object to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Object} Returns `object`. + * @see _.forInRight + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.forIn(new Foo, function(value, key) { + * console.log(key); + * }); + * // => Logs 'a', 'b', then 'c' (iteration order is not guaranteed). + */function forIn(object,iteratee){return object==null?object:baseFor(object,castFunction(iteratee),keysIn)}module.exports=forIn},{"./_baseFor":87,"./_castFunction":132,"./keysIn":260}],238:[function(require,module,exports){var baseGet=require("./_baseGet"); +/** + * Gets the value at `path` of `object`. If the resolved value is + * `undefined`, the `defaultValue` is returned in its place. + * + * @static + * @memberOf _ + * @since 3.7.0 + * @category Object + * @param {Object} object The object to query. + * @param {Array|string} path The path of the property to get. + * @param {*} [defaultValue] The value returned for `undefined` resolved values. + * @returns {*} Returns the resolved value. + * @example + * + * var object = { 'a': [{ 'b': { 'c': 3 } }] }; + * + * _.get(object, 'a[0].b.c'); + * // => 3 + * + * _.get(object, ['a', '0', 'b', 'c']); + * // => 3 + * + * _.get(object, 'a.b.c', 'default'); + * // => 'default' + */function get(object,path,defaultValue){var result=object==null?undefined:baseGet(object,path);return result===undefined?defaultValue:result}module.exports=get},{"./_baseGet":89}],239:[function(require,module,exports){var baseHas=require("./_baseHas"),hasPath=require("./_hasPath"); +/** + * Checks if `path` is a direct property of `object`. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The object to query. + * @param {Array|string} path The path to check. + * @returns {boolean} Returns `true` if `path` exists, else `false`. + * @example + * + * var object = { 'a': { 'b': 2 } }; + * var other = _.create({ 'a': _.create({ 'b': 2 }) }); + * + * _.has(object, 'a'); + * // => true + * + * _.has(object, 'a.b'); + * // => true + * + * _.has(object, ['a', 'b']); + * // => true + * + * _.has(other, 'a'); + * // => false + */function has(object,path){return object!=null&&hasPath(object,path,baseHas)}module.exports=has},{"./_baseHas":93,"./_hasPath":170}],240:[function(require,module,exports){var baseHasIn=require("./_baseHasIn"),hasPath=require("./_hasPath"); +/** + * Checks if `path` is a direct or inherited property of `object`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Object + * @param {Object} object The object to query. + * @param {Array|string} path The path to check. + * @returns {boolean} Returns `true` if `path` exists, else `false`. + * @example + * + * var object = _.create({ 'a': _.create({ 'b': 2 }) }); + * + * _.hasIn(object, 'a'); + * // => true + * + * _.hasIn(object, 'a.b'); + * // => true + * + * _.hasIn(object, ['a', 'b']); + * // => true + * + * _.hasIn(object, 'b'); + * // => false + */function hasIn(object,path){return object!=null&&hasPath(object,path,baseHasIn)}module.exports=hasIn},{"./_baseHasIn":94,"./_hasPath":170}],241:[function(require,module,exports){ +/** + * This method returns the first argument it receives. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Util + * @param {*} value Any value. + * @returns {*} Returns `value`. + * @example + * + * var object = { 'a': 1 }; + * + * console.log(_.identity(object) === object); + * // => true + */ +function identity(value){return value}module.exports=identity},{}],242:[function(require,module,exports){var baseIsArguments=require("./_baseIsArguments"),isObjectLike=require("./isObjectLike"); +/** Used for built-in method references. */var objectProto=Object.prototype; +/** Used to check objects for own properties. */var hasOwnProperty=objectProto.hasOwnProperty; +/** Built-in value references. */var propertyIsEnumerable=objectProto.propertyIsEnumerable; +/** + * Checks if `value` is likely an `arguments` object. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an `arguments` object, + * else `false`. + * @example + * + * _.isArguments(function() { return arguments; }()); + * // => true + * + * _.isArguments([1, 2, 3]); + * // => false + */var isArguments=baseIsArguments(function(){return arguments}())?baseIsArguments:function(value){return isObjectLike(value)&&hasOwnProperty.call(value,"callee")&&!propertyIsEnumerable.call(value,"callee")};module.exports=isArguments},{"./_baseIsArguments":96,"./isObjectLike":252}],243:[function(require,module,exports){ +/** + * Checks if `value` is classified as an `Array` object. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an array, else `false`. + * @example + * + * _.isArray([1, 2, 3]); + * // => true + * + * _.isArray(document.body.children); + * // => false + * + * _.isArray('abc'); + * // => false + * + * _.isArray(_.noop); + * // => false + */ +var isArray=Array.isArray;module.exports=isArray},{}],244:[function(require,module,exports){var isFunction=require("./isFunction"),isLength=require("./isLength"); +/** + * Checks if `value` is array-like. A value is considered array-like if it's + * not a function and has a `value.length` that's an integer greater than or + * equal to `0` and less than or equal to `Number.MAX_SAFE_INTEGER`. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is array-like, else `false`. + * @example + * + * _.isArrayLike([1, 2, 3]); + * // => true + * + * _.isArrayLike(document.body.children); + * // => true + * + * _.isArrayLike('abc'); + * // => true + * + * _.isArrayLike(_.noop); + * // => false + */function isArrayLike(value){return value!=null&&isLength(value.length)&&!isFunction(value)}module.exports=isArrayLike},{"./isFunction":248,"./isLength":249}],245:[function(require,module,exports){var isArrayLike=require("./isArrayLike"),isObjectLike=require("./isObjectLike"); +/** + * This method is like `_.isArrayLike` except that it also checks if `value` + * is an object. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an array-like object, + * else `false`. + * @example + * + * _.isArrayLikeObject([1, 2, 3]); + * // => true + * + * _.isArrayLikeObject(document.body.children); + * // => true + * + * _.isArrayLikeObject('abc'); + * // => false + * + * _.isArrayLikeObject(_.noop); + * // => false + */function isArrayLikeObject(value){return isObjectLike(value)&&isArrayLike(value)}module.exports=isArrayLikeObject},{"./isArrayLike":244,"./isObjectLike":252}],246:[function(require,module,exports){var root=require("./_root"),stubFalse=require("./stubFalse"); +/** Detect free variable `exports`. */var freeExports=typeof exports=="object"&&exports&&!exports.nodeType&&exports; +/** Detect free variable `module`. */var freeModule=freeExports&&typeof module=="object"&&module&&!module.nodeType&&module; +/** Detect the popular CommonJS extension `module.exports`. */var moduleExports=freeModule&&freeModule.exports===freeExports; +/** Built-in value references. */var Buffer=moduleExports?root.Buffer:undefined; +/* Built-in method references for those with the same name as other `lodash` methods. */var nativeIsBuffer=Buffer?Buffer.isBuffer:undefined; +/** + * Checks if `value` is a buffer. + * + * @static + * @memberOf _ + * @since 4.3.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a buffer, else `false`. + * @example + * + * _.isBuffer(new Buffer(2)); + * // => true + * + * _.isBuffer(new Uint8Array(2)); + * // => false + */var isBuffer=nativeIsBuffer||stubFalse;module.exports=isBuffer},{"./_root":208,"./stubFalse":278}],247:[function(require,module,exports){var baseKeys=require("./_baseKeys"),getTag=require("./_getTag"),isArguments=require("./isArguments"),isArray=require("./isArray"),isArrayLike=require("./isArrayLike"),isBuffer=require("./isBuffer"),isPrototype=require("./_isPrototype"),isTypedArray=require("./isTypedArray"); +/** `Object#toString` result references. */var mapTag="[object Map]",setTag="[object Set]"; +/** Used for built-in method references. */var objectProto=Object.prototype; +/** Used to check objects for own properties. */var hasOwnProperty=objectProto.hasOwnProperty; +/** + * Checks if `value` is an empty object, collection, map, or set. + * + * Objects are considered empty if they have no own enumerable string keyed + * properties. + * + * Array-like values such as `arguments` objects, arrays, buffers, strings, or + * jQuery-like collections are considered empty if they have a `length` of `0`. + * Similarly, maps and sets are considered empty if they have a `size` of `0`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is empty, else `false`. + * @example + * + * _.isEmpty(null); + * // => true + * + * _.isEmpty(true); + * // => true + * + * _.isEmpty(1); + * // => true + * + * _.isEmpty([1, 2, 3]); + * // => false + * + * _.isEmpty({ 'a': 1 }); + * // => false + */function isEmpty(value){if(value==null){return true}if(isArrayLike(value)&&(isArray(value)||typeof value=="string"||typeof value.splice=="function"||isBuffer(value)||isTypedArray(value)||isArguments(value))){return!value.length}var tag=getTag(value);if(tag==mapTag||tag==setTag){return!value.size}if(isPrototype(value)){return!baseKeys(value).length}for(var key in value){if(hasOwnProperty.call(value,key)){return false}}return true}module.exports=isEmpty},{"./_baseKeys":106,"./_getTag":168,"./_isPrototype":186,"./isArguments":242,"./isArray":243,"./isArrayLike":244,"./isBuffer":246,"./isTypedArray":257}],248:[function(require,module,exports){var baseGetTag=require("./_baseGetTag"),isObject=require("./isObject"); +/** `Object#toString` result references. */var asyncTag="[object AsyncFunction]",funcTag="[object Function]",genTag="[object GeneratorFunction]",proxyTag="[object Proxy]"; +/** + * Checks if `value` is classified as a `Function` object. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a function, else `false`. + * @example + * + * _.isFunction(_); + * // => true + * + * _.isFunction(/abc/); + * // => false + */function isFunction(value){if(!isObject(value)){return false} +// The use of `Object#toString` avoids issues with the `typeof` operator +// in Safari 9 which returns 'object' for typed arrays and other constructors. +var tag=baseGetTag(value);return tag==funcTag||tag==genTag||tag==asyncTag||tag==proxyTag}module.exports=isFunction},{"./_baseGetTag":91,"./isObject":251}],249:[function(require,module,exports){ +/** Used as references for various `Number` constants. */ +var MAX_SAFE_INTEGER=9007199254740991; +/** + * Checks if `value` is a valid array-like length. + * + * **Note:** This method is loosely based on + * [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a valid length, else `false`. + * @example + * + * _.isLength(3); + * // => true + * + * _.isLength(Number.MIN_VALUE); + * // => false + * + * _.isLength(Infinity); + * // => false + * + * _.isLength('3'); + * // => false + */function isLength(value){return typeof value=="number"&&value>-1&&value%1==0&&value<=MAX_SAFE_INTEGER}module.exports=isLength},{}],250:[function(require,module,exports){var baseIsMap=require("./_baseIsMap"),baseUnary=require("./_baseUnary"),nodeUtil=require("./_nodeUtil"); +/* Node.js helper references. */var nodeIsMap=nodeUtil&&nodeUtil.isMap; +/** + * Checks if `value` is classified as a `Map` object. + * + * @static + * @memberOf _ + * @since 4.3.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a map, else `false`. + * @example + * + * _.isMap(new Map); + * // => true + * + * _.isMap(new WeakMap); + * // => false + */var isMap=nodeIsMap?baseUnary(nodeIsMap):baseIsMap;module.exports=isMap},{"./_baseIsMap":99,"./_baseUnary":127,"./_nodeUtil":204}],251:[function(require,module,exports){ +/** + * Checks if `value` is the + * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types) + * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`) + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is an object, else `false`. + * @example + * + * _.isObject({}); + * // => true + * + * _.isObject([1, 2, 3]); + * // => true + * + * _.isObject(_.noop); + * // => true + * + * _.isObject(null); + * // => false + */ +function isObject(value){var type=typeof value;return value!=null&&(type=="object"||type=="function")}module.exports=isObject},{}],252:[function(require,module,exports){ +/** + * Checks if `value` is object-like. A value is object-like if it's not `null` + * and has a `typeof` result of "object". + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is object-like, else `false`. + * @example + * + * _.isObjectLike({}); + * // => true + * + * _.isObjectLike([1, 2, 3]); + * // => true + * + * _.isObjectLike(_.noop); + * // => false + * + * _.isObjectLike(null); + * // => false + */ +function isObjectLike(value){return value!=null&&typeof value=="object"}module.exports=isObjectLike},{}],253:[function(require,module,exports){var baseGetTag=require("./_baseGetTag"),getPrototype=require("./_getPrototype"),isObjectLike=require("./isObjectLike"); +/** `Object#toString` result references. */var objectTag="[object Object]"; +/** Used for built-in method references. */var funcProto=Function.prototype,objectProto=Object.prototype; +/** Used to resolve the decompiled source of functions. */var funcToString=funcProto.toString; +/** Used to check objects for own properties. */var hasOwnProperty=objectProto.hasOwnProperty; +/** Used to infer the `Object` constructor. */var objectCtorString=funcToString.call(Object); +/** + * Checks if `value` is a plain object, that is, an object created by the + * `Object` constructor or one with a `[[Prototype]]` of `null`. + * + * @static + * @memberOf _ + * @since 0.8.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a plain object, else `false`. + * @example + * + * function Foo() { + * this.a = 1; + * } + * + * _.isPlainObject(new Foo); + * // => false + * + * _.isPlainObject([1, 2, 3]); + * // => false + * + * _.isPlainObject({ 'x': 0, 'y': 0 }); + * // => true + * + * _.isPlainObject(Object.create(null)); + * // => true + */function isPlainObject(value){if(!isObjectLike(value)||baseGetTag(value)!=objectTag){return false}var proto=getPrototype(value);if(proto===null){return true}var Ctor=hasOwnProperty.call(proto,"constructor")&&proto.constructor;return typeof Ctor=="function"&&Ctor instanceof Ctor&&funcToString.call(Ctor)==objectCtorString}module.exports=isPlainObject},{"./_baseGetTag":91,"./_getPrototype":164,"./isObjectLike":252}],254:[function(require,module,exports){var baseIsSet=require("./_baseIsSet"),baseUnary=require("./_baseUnary"),nodeUtil=require("./_nodeUtil"); +/* Node.js helper references. */var nodeIsSet=nodeUtil&&nodeUtil.isSet; +/** + * Checks if `value` is classified as a `Set` object. + * + * @static + * @memberOf _ + * @since 4.3.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a set, else `false`. + * @example + * + * _.isSet(new Set); + * // => true + * + * _.isSet(new WeakSet); + * // => false + */var isSet=nodeIsSet?baseUnary(nodeIsSet):baseIsSet;module.exports=isSet},{"./_baseIsSet":103,"./_baseUnary":127,"./_nodeUtil":204}],255:[function(require,module,exports){var baseGetTag=require("./_baseGetTag"),isArray=require("./isArray"),isObjectLike=require("./isObjectLike"); +/** `Object#toString` result references. */var stringTag="[object String]"; +/** + * Checks if `value` is classified as a `String` primitive or object. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a string, else `false`. + * @example + * + * _.isString('abc'); + * // => true + * + * _.isString(1); + * // => false + */function isString(value){return typeof value=="string"||!isArray(value)&&isObjectLike(value)&&baseGetTag(value)==stringTag}module.exports=isString},{"./_baseGetTag":91,"./isArray":243,"./isObjectLike":252}],256:[function(require,module,exports){var baseGetTag=require("./_baseGetTag"),isObjectLike=require("./isObjectLike"); +/** `Object#toString` result references. */var symbolTag="[object Symbol]"; +/** + * Checks if `value` is classified as a `Symbol` primitive or object. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a symbol, else `false`. + * @example + * + * _.isSymbol(Symbol.iterator); + * // => true + * + * _.isSymbol('abc'); + * // => false + */function isSymbol(value){return typeof value=="symbol"||isObjectLike(value)&&baseGetTag(value)==symbolTag}module.exports=isSymbol},{"./_baseGetTag":91,"./isObjectLike":252}],257:[function(require,module,exports){var baseIsTypedArray=require("./_baseIsTypedArray"),baseUnary=require("./_baseUnary"),nodeUtil=require("./_nodeUtil"); +/* Node.js helper references. */var nodeIsTypedArray=nodeUtil&&nodeUtil.isTypedArray; +/** + * Checks if `value` is classified as a typed array. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is a typed array, else `false`. + * @example + * + * _.isTypedArray(new Uint8Array); + * // => true + * + * _.isTypedArray([]); + * // => false + */var isTypedArray=nodeIsTypedArray?baseUnary(nodeIsTypedArray):baseIsTypedArray;module.exports=isTypedArray},{"./_baseIsTypedArray":104,"./_baseUnary":127,"./_nodeUtil":204}],258:[function(require,module,exports){ +/** + * Checks if `value` is `undefined`. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Lang + * @param {*} value The value to check. + * @returns {boolean} Returns `true` if `value` is `undefined`, else `false`. + * @example + * + * _.isUndefined(void 0); + * // => true + * + * _.isUndefined(null); + * // => false + */ +function isUndefined(value){return value===undefined}module.exports=isUndefined},{}],259:[function(require,module,exports){var arrayLikeKeys=require("./_arrayLikeKeys"),baseKeys=require("./_baseKeys"),isArrayLike=require("./isArrayLike"); +/** + * Creates an array of the own enumerable property names of `object`. + * + * **Note:** Non-object values are coerced to objects. See the + * [ES spec](http://ecma-international.org/ecma-262/7.0/#sec-object.keys) + * for more details. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property names. + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.keys(new Foo); + * // => ['a', 'b'] (iteration order is not guaranteed) + * + * _.keys('hi'); + * // => ['0', '1'] + */function keys(object){return isArrayLike(object)?arrayLikeKeys(object):baseKeys(object)}module.exports=keys},{"./_arrayLikeKeys":68,"./_baseKeys":106,"./isArrayLike":244}],260:[function(require,module,exports){var arrayLikeKeys=require("./_arrayLikeKeys"),baseKeysIn=require("./_baseKeysIn"),isArrayLike=require("./isArrayLike"); +/** + * Creates an array of the own and inherited enumerable property names of `object`. + * + * **Note:** Non-object values are coerced to objects. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Object + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property names. + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.keysIn(new Foo); + * // => ['a', 'b', 'c'] (iteration order is not guaranteed) + */function keysIn(object){return isArrayLike(object)?arrayLikeKeys(object,true):baseKeysIn(object)}module.exports=keysIn},{"./_arrayLikeKeys":68,"./_baseKeysIn":107,"./isArrayLike":244}],261:[function(require,module,exports){ +/** + * Gets the last element of `array`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {Array} array The array to query. + * @returns {*} Returns the last element of `array`. + * @example + * + * _.last([1, 2, 3]); + * // => 3 + */ +function last(array){var length=array==null?0:array.length;return length?array[length-1]:undefined}module.exports=last},{}],262:[function(require,module,exports){var arrayMap=require("./_arrayMap"),baseIteratee=require("./_baseIteratee"),baseMap=require("./_baseMap"),isArray=require("./isArray"); +/** + * Creates an array of values by running each element in `collection` thru + * `iteratee`. The iteratee is invoked with three arguments: + * (value, index|key, collection). + * + * Many lodash methods are guarded to work as iteratees for methods like + * `_.every`, `_.filter`, `_.map`, `_.mapValues`, `_.reject`, and `_.some`. + * + * The guarded methods are: + * `ary`, `chunk`, `curry`, `curryRight`, `drop`, `dropRight`, `every`, + * `fill`, `invert`, `parseInt`, `random`, `range`, `rangeRight`, `repeat`, + * `sampleSize`, `slice`, `some`, `sortBy`, `split`, `take`, `takeRight`, + * `template`, `trim`, `trimEnd`, `trimStart`, and `words` + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Array} Returns the new mapped array. + * @example + * + * function square(n) { + * return n * n; + * } + * + * _.map([4, 8], square); + * // => [16, 64] + * + * _.map({ 'a': 4, 'b': 8 }, square); + * // => [16, 64] (iteration order is not guaranteed) + * + * var users = [ + * { 'user': 'barney' }, + * { 'user': 'fred' } + * ]; + * + * // The `_.property` iteratee shorthand. + * _.map(users, 'user'); + * // => ['barney', 'fred'] + */function map(collection,iteratee){var func=isArray(collection)?arrayMap:baseMap;return func(collection,baseIteratee(iteratee,3))}module.exports=map},{"./_arrayMap":69,"./_baseIteratee":105,"./_baseMap":109,"./isArray":243}],263:[function(require,module,exports){var baseAssignValue=require("./_baseAssignValue"),baseForOwn=require("./_baseForOwn"),baseIteratee=require("./_baseIteratee"); +/** + * Creates an object with the same keys as `object` and values generated + * by running each own enumerable string keyed property of `object` thru + * `iteratee`. The iteratee is invoked with three arguments: + * (value, key, object). + * + * @static + * @memberOf _ + * @since 2.4.0 + * @category Object + * @param {Object} object The object to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @returns {Object} Returns the new mapped object. + * @see _.mapKeys + * @example + * + * var users = { + * 'fred': { 'user': 'fred', 'age': 40 }, + * 'pebbles': { 'user': 'pebbles', 'age': 1 } + * }; + * + * _.mapValues(users, function(o) { return o.age; }); + * // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed) + * + * // The `_.property` iteratee shorthand. + * _.mapValues(users, 'age'); + * // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed) + */function mapValues(object,iteratee){var result={};iteratee=baseIteratee(iteratee,3);baseForOwn(object,function(value,key,object){baseAssignValue(result,key,iteratee(value,key,object))});return result}module.exports=mapValues},{"./_baseAssignValue":79,"./_baseForOwn":88,"./_baseIteratee":105}],264:[function(require,module,exports){var baseExtremum=require("./_baseExtremum"),baseGt=require("./_baseGt"),identity=require("./identity"); +/** + * Computes the maximum value of `array`. If `array` is empty or falsey, + * `undefined` is returned. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Math + * @param {Array} array The array to iterate over. + * @returns {*} Returns the maximum value. + * @example + * + * _.max([4, 2, 8, 6]); + * // => 8 + * + * _.max([]); + * // => undefined + */function max(array){return array&&array.length?baseExtremum(array,identity,baseGt):undefined}module.exports=max},{"./_baseExtremum":83,"./_baseGt":92,"./identity":241}],265:[function(require,module,exports){var MapCache=require("./_MapCache"); +/** Error message constants. */var FUNC_ERROR_TEXT="Expected a function"; +/** + * Creates a function that memoizes the result of `func`. If `resolver` is + * provided, it determines the cache key for storing the result based on the + * arguments provided to the memoized function. By default, the first argument + * provided to the memoized function is used as the map cache key. The `func` + * is invoked with the `this` binding of the memoized function. + * + * **Note:** The cache is exposed as the `cache` property on the memoized + * function. Its creation may be customized by replacing the `_.memoize.Cache` + * constructor with one whose instances implement the + * [`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object) + * method interface of `clear`, `delete`, `get`, `has`, and `set`. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Function + * @param {Function} func The function to have its output memoized. + * @param {Function} [resolver] The function to resolve the cache key. + * @returns {Function} Returns the new memoized function. + * @example + * + * var object = { 'a': 1, 'b': 2 }; + * var other = { 'c': 3, 'd': 4 }; + * + * var values = _.memoize(_.values); + * values(object); + * // => [1, 2] + * + * values(other); + * // => [3, 4] + * + * object.a = 2; + * values(object); + * // => [1, 2] + * + * // Modify the result cache. + * values.cache.set(object, ['a', 'b']); + * values(object); + * // => ['a', 'b'] + * + * // Replace `_.memoize.Cache`. + * _.memoize.Cache = WeakMap; + */function memoize(func,resolver){if(typeof func!="function"||resolver!=null&&typeof resolver!="function"){throw new TypeError(FUNC_ERROR_TEXT)}var memoized=function(){var args=arguments,key=resolver?resolver.apply(this,args):args[0],cache=memoized.cache;if(cache.has(key)){return cache.get(key)}var result=func.apply(this,args);memoized.cache=cache.set(key,result)||cache;return result};memoized.cache=new(memoize.Cache||MapCache);return memoized} +// Expose `MapCache`. +memoize.Cache=MapCache;module.exports=memoize},{"./_MapCache":55}],266:[function(require,module,exports){var baseMerge=require("./_baseMerge"),createAssigner=require("./_createAssigner"); +/** + * This method is like `_.assign` except that it recursively merges own and + * inherited enumerable string keyed properties of source objects into the + * destination object. Source properties that resolve to `undefined` are + * skipped if a destination value exists. Array and plain object properties + * are merged recursively. Other objects and value types are overridden by + * assignment. Source objects are applied from left to right. Subsequent + * sources overwrite property assignments of previous sources. + * + * **Note:** This method mutates `object`. + * + * @static + * @memberOf _ + * @since 0.5.0 + * @category Object + * @param {Object} object The destination object. + * @param {...Object} [sources] The source objects. + * @returns {Object} Returns `object`. + * @example + * + * var object = { + * 'a': [{ 'b': 2 }, { 'd': 4 }] + * }; + * + * var other = { + * 'a': [{ 'c': 3 }, { 'e': 5 }] + * }; + * + * _.merge(object, other); + * // => { 'a': [{ 'b': 2, 'c': 3 }, { 'd': 4, 'e': 5 }] } + */var merge=createAssigner(function(object,source,srcIndex){baseMerge(object,source,srcIndex)});module.exports=merge},{"./_baseMerge":112,"./_createAssigner":147}],267:[function(require,module,exports){var baseExtremum=require("./_baseExtremum"),baseLt=require("./_baseLt"),identity=require("./identity"); +/** + * Computes the minimum value of `array`. If `array` is empty or falsey, + * `undefined` is returned. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Math + * @param {Array} array The array to iterate over. + * @returns {*} Returns the minimum value. + * @example + * + * _.min([4, 2, 8, 6]); + * // => 2 + * + * _.min([]); + * // => undefined + */function min(array){return array&&array.length?baseExtremum(array,identity,baseLt):undefined}module.exports=min},{"./_baseExtremum":83,"./_baseLt":108,"./identity":241}],268:[function(require,module,exports){var baseExtremum=require("./_baseExtremum"),baseIteratee=require("./_baseIteratee"),baseLt=require("./_baseLt"); +/** + * This method is like `_.min` except that it accepts `iteratee` which is + * invoked for each element in `array` to generate the criterion by which + * the value is ranked. The iteratee is invoked with one argument: (value). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Math + * @param {Array} array The array to iterate over. + * @param {Function} [iteratee=_.identity] The iteratee invoked per element. + * @returns {*} Returns the minimum value. + * @example + * + * var objects = [{ 'n': 1 }, { 'n': 2 }]; + * + * _.minBy(objects, function(o) { return o.n; }); + * // => { 'n': 1 } + * + * // The `_.property` iteratee shorthand. + * _.minBy(objects, 'n'); + * // => { 'n': 1 } + */function minBy(array,iteratee){return array&&array.length?baseExtremum(array,baseIteratee(iteratee,2),baseLt):undefined}module.exports=minBy},{"./_baseExtremum":83,"./_baseIteratee":105,"./_baseLt":108}],269:[function(require,module,exports){ +/** + * This method returns `undefined`. + * + * @static + * @memberOf _ + * @since 2.3.0 + * @category Util + * @example + * + * _.times(2, _.noop); + * // => [undefined, undefined] + */ +function noop(){ +// No operation performed. +}module.exports=noop},{}],270:[function(require,module,exports){var root=require("./_root"); +/** + * Gets the timestamp of the number of milliseconds that have elapsed since + * the Unix epoch (1 January 1970 00:00:00 UTC). + * + * @static + * @memberOf _ + * @since 2.4.0 + * @category Date + * @returns {number} Returns the timestamp. + * @example + * + * _.defer(function(stamp) { + * console.log(_.now() - stamp); + * }, _.now()); + * // => Logs the number of milliseconds it took for the deferred invocation. + */var now=function(){return root.Date.now()};module.exports=now},{"./_root":208}],271:[function(require,module,exports){var basePick=require("./_basePick"),flatRest=require("./_flatRest"); +/** + * Creates an object composed of the picked `object` properties. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The source object. + * @param {...(string|string[])} [paths] The property paths to pick. + * @returns {Object} Returns the new object. + * @example + * + * var object = { 'a': 1, 'b': '2', 'c': 3 }; + * + * _.pick(object, ['a', 'c']); + * // => { 'a': 1, 'c': 3 } + */var pick=flatRest(function(object,paths){return object==null?{}:basePick(object,paths)});module.exports=pick},{"./_basePick":115,"./_flatRest":157}],272:[function(require,module,exports){var baseProperty=require("./_baseProperty"),basePropertyDeep=require("./_basePropertyDeep"),isKey=require("./_isKey"),toKey=require("./_toKey"); +/** + * Creates a function that returns the value at `path` of a given object. + * + * @static + * @memberOf _ + * @since 2.4.0 + * @category Util + * @param {Array|string} path The path of the property to get. + * @returns {Function} Returns the new accessor function. + * @example + * + * var objects = [ + * { 'a': { 'b': 2 } }, + * { 'a': { 'b': 1 } } + * ]; + * + * _.map(objects, _.property('a.b')); + * // => [2, 1] + * + * _.map(_.sortBy(objects, _.property(['a', 'b'])), 'a.b'); + * // => [1, 2] + */function property(path){return isKey(path)?baseProperty(toKey(path)):basePropertyDeep(path)}module.exports=property},{"./_baseProperty":117,"./_basePropertyDeep":118,"./_isKey":183,"./_toKey":223}],273:[function(require,module,exports){var createRange=require("./_createRange"); +/** + * Creates an array of numbers (positive and/or negative) progressing from + * `start` up to, but not including, `end`. A step of `-1` is used if a negative + * `start` is specified without an `end` or `step`. If `end` is not specified, + * it's set to `start` with `start` then set to `0`. + * + * **Note:** JavaScript follows the IEEE-754 standard for resolving + * floating-point values which can produce unexpected results. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Util + * @param {number} [start=0] The start of the range. + * @param {number} end The end of the range. + * @param {number} [step=1] The value to increment or decrement by. + * @returns {Array} Returns the range of numbers. + * @see _.inRange, _.rangeRight + * @example + * + * _.range(4); + * // => [0, 1, 2, 3] + * + * _.range(-4); + * // => [0, -1, -2, -3] + * + * _.range(1, 5); + * // => [1, 2, 3, 4] + * + * _.range(0, 20, 5); + * // => [0, 5, 10, 15] + * + * _.range(0, -4, -1); + * // => [0, -1, -2, -3] + * + * _.range(1, 4, 0); + * // => [1, 1, 1] + * + * _.range(0); + * // => [] + */var range=createRange();module.exports=range},{"./_createRange":151}],274:[function(require,module,exports){var arrayReduce=require("./_arrayReduce"),baseEach=require("./_baseEach"),baseIteratee=require("./_baseIteratee"),baseReduce=require("./_baseReduce"),isArray=require("./isArray"); +/** + * Reduces `collection` to a value which is the accumulated result of running + * each element in `collection` thru `iteratee`, where each successive + * invocation is supplied the return value of the previous. If `accumulator` + * is not given, the first element of `collection` is used as the initial + * value. The iteratee is invoked with four arguments: + * (accumulator, value, index|key, collection). + * + * Many lodash methods are guarded to work as iteratees for methods like + * `_.reduce`, `_.reduceRight`, and `_.transform`. + * + * The guarded methods are: + * `assign`, `defaults`, `defaultsDeep`, `includes`, `merge`, `orderBy`, + * and `sortBy` + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @param {*} [accumulator] The initial value. + * @returns {*} Returns the accumulated value. + * @see _.reduceRight + * @example + * + * _.reduce([1, 2], function(sum, n) { + * return sum + n; + * }, 0); + * // => 3 + * + * _.reduce({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) { + * (result[value] || (result[value] = [])).push(key); + * return result; + * }, {}); + * // => { '1': ['a', 'c'], '2': ['b'] } (iteration order is not guaranteed) + */function reduce(collection,iteratee,accumulator){var func=isArray(collection)?arrayReduce:baseReduce,initAccum=arguments.length<3;return func(collection,baseIteratee(iteratee,4),accumulator,initAccum,baseEach)}module.exports=reduce},{"./_arrayReduce":71,"./_baseEach":82,"./_baseIteratee":105,"./_baseReduce":120,"./isArray":243}],275:[function(require,module,exports){var baseKeys=require("./_baseKeys"),getTag=require("./_getTag"),isArrayLike=require("./isArrayLike"),isString=require("./isString"),stringSize=require("./_stringSize"); +/** `Object#toString` result references. */var mapTag="[object Map]",setTag="[object Set]"; +/** + * Gets the size of `collection` by returning its length for array-like + * values or the number of own enumerable string keyed properties for objects. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object|string} collection The collection to inspect. + * @returns {number} Returns the collection size. + * @example + * + * _.size([1, 2, 3]); + * // => 3 + * + * _.size({ 'a': 1, 'b': 2 }); + * // => 2 + * + * _.size('pebbles'); + * // => 7 + */function size(collection){if(collection==null){return 0}if(isArrayLike(collection)){return isString(collection)?stringSize(collection):collection.length}var tag=getTag(collection);if(tag==mapTag||tag==setTag){return collection.size}return baseKeys(collection).length}module.exports=size},{"./_baseKeys":106,"./_getTag":168,"./_stringSize":221,"./isArrayLike":244,"./isString":255}],276:[function(require,module,exports){var baseFlatten=require("./_baseFlatten"),baseOrderBy=require("./_baseOrderBy"),baseRest=require("./_baseRest"),isIterateeCall=require("./_isIterateeCall"); +/** + * Creates an array of elements, sorted in ascending order by the results of + * running each element in a collection thru each iteratee. This method + * performs a stable sort, that is, it preserves the original sort order of + * equal elements. The iteratees are invoked with one argument: (value). + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Collection + * @param {Array|Object} collection The collection to iterate over. + * @param {...(Function|Function[])} [iteratees=[_.identity]] + * The iteratees to sort by. + * @returns {Array} Returns the new sorted array. + * @example + * + * var users = [ + * { 'user': 'fred', 'age': 48 }, + * { 'user': 'barney', 'age': 36 }, + * { 'user': 'fred', 'age': 40 }, + * { 'user': 'barney', 'age': 34 } + * ]; + * + * _.sortBy(users, [function(o) { return o.user; }]); + * // => objects for [['barney', 36], ['barney', 34], ['fred', 48], ['fred', 40]] + * + * _.sortBy(users, ['user', 'age']); + * // => objects for [['barney', 34], ['barney', 36], ['fred', 40], ['fred', 48]] + */var sortBy=baseRest(function(collection,iteratees){if(collection==null){return[]}var length=iteratees.length;if(length>1&&isIterateeCall(collection,iteratees[0],iteratees[1])){iteratees=[]}else if(length>2&&isIterateeCall(iteratees[0],iteratees[1],iteratees[2])){iteratees=[iteratees[0]]}return baseOrderBy(collection,baseFlatten(iteratees,1),[])});module.exports=sortBy},{"./_baseFlatten":86,"./_baseOrderBy":114,"./_baseRest":121,"./_isIterateeCall":182}],277:[function(require,module,exports){ +/** + * This method returns a new empty array. + * + * @static + * @memberOf _ + * @since 4.13.0 + * @category Util + * @returns {Array} Returns the new empty array. + * @example + * + * var arrays = _.times(2, _.stubArray); + * + * console.log(arrays); + * // => [[], []] + * + * console.log(arrays[0] === arrays[1]); + * // => false + */ +function stubArray(){return[]}module.exports=stubArray},{}],278:[function(require,module,exports){ +/** + * This method returns `false`. + * + * @static + * @memberOf _ + * @since 4.13.0 + * @category Util + * @returns {boolean} Returns `false`. + * @example + * + * _.times(2, _.stubFalse); + * // => [false, false] + */ +function stubFalse(){return false}module.exports=stubFalse},{}],279:[function(require,module,exports){var toNumber=require("./toNumber"); +/** Used as references for various `Number` constants. */var INFINITY=1/0,MAX_INTEGER=17976931348623157e292; +/** + * Converts `value` to a finite number. + * + * @static + * @memberOf _ + * @since 4.12.0 + * @category Lang + * @param {*} value The value to convert. + * @returns {number} Returns the converted number. + * @example + * + * _.toFinite(3.2); + * // => 3.2 + * + * _.toFinite(Number.MIN_VALUE); + * // => 5e-324 + * + * _.toFinite(Infinity); + * // => 1.7976931348623157e+308 + * + * _.toFinite('3.2'); + * // => 3.2 + */function toFinite(value){if(!value){return value===0?value:0}value=toNumber(value);if(value===INFINITY||value===-INFINITY){var sign=value<0?-1:1;return sign*MAX_INTEGER}return value===value?value:0}module.exports=toFinite},{"./toNumber":281}],280:[function(require,module,exports){var toFinite=require("./toFinite"); +/** + * Converts `value` to an integer. + * + * **Note:** This method is loosely based on + * [`ToInteger`](http://www.ecma-international.org/ecma-262/7.0/#sec-tointeger). + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to convert. + * @returns {number} Returns the converted integer. + * @example + * + * _.toInteger(3.2); + * // => 3 + * + * _.toInteger(Number.MIN_VALUE); + * // => 0 + * + * _.toInteger(Infinity); + * // => 1.7976931348623157e+308 + * + * _.toInteger('3.2'); + * // => 3 + */function toInteger(value){var result=toFinite(value),remainder=result%1;return result===result?remainder?result-remainder:result:0}module.exports=toInteger},{"./toFinite":279}],281:[function(require,module,exports){var isObject=require("./isObject"),isSymbol=require("./isSymbol"); +/** Used as references for various `Number` constants. */var NAN=0/0; +/** Used to match leading and trailing whitespace. */var reTrim=/^\s+|\s+$/g; +/** Used to detect bad signed hexadecimal string values. */var reIsBadHex=/^[-+]0x[0-9a-f]+$/i; +/** Used to detect binary string values. */var reIsBinary=/^0b[01]+$/i; +/** Used to detect octal string values. */var reIsOctal=/^0o[0-7]+$/i; +/** Built-in method references without a dependency on `root`. */var freeParseInt=parseInt; +/** + * Converts `value` to a number. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to process. + * @returns {number} Returns the number. + * @example + * + * _.toNumber(3.2); + * // => 3.2 + * + * _.toNumber(Number.MIN_VALUE); + * // => 5e-324 + * + * _.toNumber(Infinity); + * // => Infinity + * + * _.toNumber('3.2'); + * // => 3.2 + */function toNumber(value){if(typeof value=="number"){return value}if(isSymbol(value)){return NAN}if(isObject(value)){var other=typeof value.valueOf=="function"?value.valueOf():value;value=isObject(other)?other+"":other}if(typeof value!="string"){return value===0?value:+value}value=value.replace(reTrim,"");var isBinary=reIsBinary.test(value);return isBinary||reIsOctal.test(value)?freeParseInt(value.slice(2),isBinary?2:8):reIsBadHex.test(value)?NAN:+value}module.exports=toNumber},{"./isObject":251,"./isSymbol":256}],282:[function(require,module,exports){var copyObject=require("./_copyObject"),keysIn=require("./keysIn"); +/** + * Converts `value` to a plain object flattening inherited enumerable string + * keyed properties of `value` to own properties of the plain object. + * + * @static + * @memberOf _ + * @since 3.0.0 + * @category Lang + * @param {*} value The value to convert. + * @returns {Object} Returns the converted plain object. + * @example + * + * function Foo() { + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.assign({ 'a': 1 }, new Foo); + * // => { 'a': 1, 'b': 2 } + * + * _.assign({ 'a': 1 }, _.toPlainObject(new Foo)); + * // => { 'a': 1, 'b': 2, 'c': 3 } + */function toPlainObject(value){return copyObject(value,keysIn(value))}module.exports=toPlainObject},{"./_copyObject":143,"./keysIn":260}],283:[function(require,module,exports){var baseToString=require("./_baseToString"); +/** + * Converts `value` to a string. An empty string is returned for `null` + * and `undefined` values. The sign of `-0` is preserved. + * + * @static + * @memberOf _ + * @since 4.0.0 + * @category Lang + * @param {*} value The value to convert. + * @returns {string} Returns the converted string. + * @example + * + * _.toString(null); + * // => '' + * + * _.toString(-0); + * // => '-0' + * + * _.toString([1, 2, 3]); + * // => '1,2,3' + */function toString(value){return value==null?"":baseToString(value)}module.exports=toString},{"./_baseToString":126}],284:[function(require,module,exports){var arrayEach=require("./_arrayEach"),baseCreate=require("./_baseCreate"),baseForOwn=require("./_baseForOwn"),baseIteratee=require("./_baseIteratee"),getPrototype=require("./_getPrototype"),isArray=require("./isArray"),isBuffer=require("./isBuffer"),isFunction=require("./isFunction"),isObject=require("./isObject"),isTypedArray=require("./isTypedArray"); +/** + * An alternative to `_.reduce`; this method transforms `object` to a new + * `accumulator` object which is the result of running each of its own + * enumerable string keyed properties thru `iteratee`, with each invocation + * potentially mutating the `accumulator` object. If `accumulator` is not + * provided, a new object with the same `[[Prototype]]` will be used. The + * iteratee is invoked with four arguments: (accumulator, value, key, object). + * Iteratee functions may exit iteration early by explicitly returning `false`. + * + * @static + * @memberOf _ + * @since 1.3.0 + * @category Object + * @param {Object} object The object to iterate over. + * @param {Function} [iteratee=_.identity] The function invoked per iteration. + * @param {*} [accumulator] The custom accumulator value. + * @returns {*} Returns the accumulated value. + * @example + * + * _.transform([2, 3, 4], function(result, n) { + * result.push(n *= n); + * return n % 2 == 0; + * }, []); + * // => [4, 9] + * + * _.transform({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) { + * (result[value] || (result[value] = [])).push(key); + * }, {}); + * // => { '1': ['a', 'c'], '2': ['b'] } + */function transform(object,iteratee,accumulator){var isArr=isArray(object),isArrLike=isArr||isBuffer(object)||isTypedArray(object);iteratee=baseIteratee(iteratee,4);if(accumulator==null){var Ctor=object&&object.constructor;if(isArrLike){accumulator=isArr?new Ctor:[]}else if(isObject(object)){accumulator=isFunction(Ctor)?baseCreate(getPrototype(object)):{}}else{accumulator={}}}(isArrLike?arrayEach:baseForOwn)(object,function(value,index,object){return iteratee(accumulator,value,index,object)});return accumulator}module.exports=transform},{"./_arrayEach":64,"./_baseCreate":81,"./_baseForOwn":88,"./_baseIteratee":105,"./_getPrototype":164,"./isArray":243,"./isBuffer":246,"./isFunction":248,"./isObject":251,"./isTypedArray":257}],285:[function(require,module,exports){var baseFlatten=require("./_baseFlatten"),baseRest=require("./_baseRest"),baseUniq=require("./_baseUniq"),isArrayLikeObject=require("./isArrayLikeObject"); +/** + * Creates an array of unique values, in order, from all given arrays using + * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero) + * for equality comparisons. + * + * @static + * @memberOf _ + * @since 0.1.0 + * @category Array + * @param {...Array} [arrays] The arrays to inspect. + * @returns {Array} Returns the new array of combined values. + * @example + * + * _.union([2], [1, 2]); + * // => [2, 1] + */var union=baseRest(function(arrays){return baseUniq(baseFlatten(arrays,1,isArrayLikeObject,true))});module.exports=union},{"./_baseFlatten":86,"./_baseRest":121,"./_baseUniq":128,"./isArrayLikeObject":245}],286:[function(require,module,exports){var toString=require("./toString"); +/** Used to generate unique IDs. */var idCounter=0; +/** + * Generates a unique ID. If `prefix` is given, the ID is appended to it. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Util + * @param {string} [prefix=''] The value to prefix the ID with. + * @returns {string} Returns the unique ID. + * @example + * + * _.uniqueId('contact_'); + * // => 'contact_104' + * + * _.uniqueId(); + * // => '105' + */function uniqueId(prefix){var id=++idCounter;return toString(prefix)+id}module.exports=uniqueId},{"./toString":283}],287:[function(require,module,exports){var baseValues=require("./_baseValues"),keys=require("./keys"); +/** + * Creates an array of the own enumerable string keyed property values of `object`. + * + * **Note:** Non-object values are coerced to objects. + * + * @static + * @since 0.1.0 + * @memberOf _ + * @category Object + * @param {Object} object The object to query. + * @returns {Array} Returns the array of property values. + * @example + * + * function Foo() { + * this.a = 1; + * this.b = 2; + * } + * + * Foo.prototype.c = 3; + * + * _.values(new Foo); + * // => [1, 2] (iteration order is not guaranteed) + * + * _.values('hi'); + * // => ['h', 'i'] + */function values(object){return object==null?[]:baseValues(object,keys(object))}module.exports=values},{"./_baseValues":129,"./keys":259}],288:[function(require,module,exports){var assignValue=require("./_assignValue"),baseZipObject=require("./_baseZipObject"); +/** + * This method is like `_.fromPairs` except that it accepts two arrays, + * one of property identifiers and one of corresponding values. + * + * @static + * @memberOf _ + * @since 0.4.0 + * @category Array + * @param {Array} [props=[]] The property identifiers. + * @param {Array} [values=[]] The property values. + * @returns {Object} Returns the new object. + * @example + * + * _.zipObject(['a', 'b'], [1, 2]); + * // => { 'a': 1, 'b': 2 } + */function zipObject(props,values){return baseZipObject(props||[],values||[],assignValue)}module.exports=zipObject},{"./_assignValue":75,"./_baseZipObject":130}]},{},[1])(1)}); diff --git a/frontend/packages/core/public/netron/deps/flatbuffers.js b/frontend/packages/core/public/netron/deps/flatbuffers.js new file mode 100644 index 00000000..74cd35ab --- /dev/null +++ b/frontend/packages/core/public/netron/deps/flatbuffers.js @@ -0,0 +1,1259 @@ +/// @file +/// @addtogroup flatbuffers_javascript_api +/// @{ +/// @cond FLATBUFFERS_INTERNAL + +/** + * @fileoverview + * + * Need to suppress 'global this' error so the Node.js export line doesn't cause + * closure compile to error out. + * @suppress {globalThis} + */ + +/** + * @const + * @namespace + */ +var flatbuffers = {}; + +/** + * @typedef {number} + */ +flatbuffers.Offset; + +/** + * @typedef {{ + * bb: flatbuffers.ByteBuffer, + * bb_pos: number + * }} + */ +flatbuffers.Table; + +/** + * @type {number} + * @const + */ +flatbuffers.SIZEOF_SHORT = 2; + +/** + * @type {number} + * @const + */ +flatbuffers.SIZEOF_INT = 4; + +/** + * @type {number} + * @const + */ +flatbuffers.FILE_IDENTIFIER_LENGTH = 4; + +/** + * @type {number} + * @const + */ +flatbuffers.SIZE_PREFIX_LENGTH = 4; + +/** + * @enum {number} + */ +flatbuffers.Encoding = { + UTF8_BYTES: 1, + UTF16_STRING: 2 +}; + +/** + * @type {Int32Array} + * @const + */ +flatbuffers.int32 = new Int32Array(2); + +/** + * @type {Float32Array} + * @const + */ +flatbuffers.float32 = new Float32Array(flatbuffers.int32.buffer); + +/** + * @type {Float64Array} + * @const + */ +flatbuffers.float64 = new Float64Array(flatbuffers.int32.buffer); + +/** + * @type {boolean} + * @const + */ +flatbuffers.isLittleEndian = new Uint16Array(new Uint8Array([1, 0]).buffer)[0] === 1; + +//////////////////////////////////////////////////////////////////////////////// + +/** + * @constructor + * @param {number} low + * @param {number} high + */ +flatbuffers.Long = function(low, high) { + /** + * @type {number} + * @const + */ + this.low = low | 0; + + /** + * @type {number} + * @const + */ + this.high = high | 0; +}; + +/** + * @param {number} low + * @param {number} high + * @returns {!flatbuffers.Long} + */ +flatbuffers.Long.create = function(low, high) { + // Special-case zero to avoid GC overhead for default values + return low == 0 && high == 0 ? flatbuffers.Long.ZERO : new flatbuffers.Long(low, high); +}; + +/** + * @returns {number} + */ +flatbuffers.Long.prototype.toFloat64 = function() { + return (this.low >>> 0) + this.high * 0x100000000; +}; + +/** + * @param {flatbuffers.Long} other + * @returns {boolean} + */ +flatbuffers.Long.prototype.equals = function(other) { + return this.low == other.low && this.high == other.high; +}; + +/** + * @type {!flatbuffers.Long} + * @const + */ +flatbuffers.Long.ZERO = new flatbuffers.Long(0, 0); + +/// @endcond +//////////////////////////////////////////////////////////////////////////////// +/** + * Create a FlatBufferBuilder. + * + * @constructor + * @param {number=} opt_initial_size + */ +flatbuffers.Builder = function(opt_initial_size) { + if (!opt_initial_size) { + var initial_size = 1024; + } else { + var initial_size = opt_initial_size; + } + + /** + * @type {flatbuffers.ByteBuffer} + * @private + */ + this.bb = flatbuffers.ByteBuffer.allocate(initial_size); + + /** + * Remaining space in the ByteBuffer. + * + * @type {number} + * @private + */ + this.space = initial_size; + + /** + * Minimum alignment encountered so far. + * + * @type {number} + * @private + */ + this.minalign = 1; + + /** + * The vtable for the current table. + * + * @type {Array.} + * @private + */ + this.vtable = null; + + /** + * The amount of fields we're actually using. + * + * @type {number} + * @private + */ + this.vtable_in_use = 0; + + /** + * Whether we are currently serializing a table. + * + * @type {boolean} + * @private + */ + this.isNested = false; + + /** + * Starting offset of the current struct/table. + * + * @type {number} + * @private + */ + this.object_start = 0; + + /** + * List of offsets of all vtables. + * + * @type {Array.} + * @private + */ + this.vtables = []; + + /** + * For the current vector being built. + * + * @type {number} + * @private + */ + this.vector_num_elems = 0; + + /** + * False omits default values from the serialized data + * + * @type {boolean} + * @private + */ + this.force_defaults = false; +}; + +flatbuffers.Builder.prototype.clear = function() { + this.bb.clear(); + this.space = this.bb.capacity(); + this.minalign = 1; + this.vtable = null; + this.vtable_in_use = 0; + this.isNested = false; + this.object_start = 0; + this.vtables = []; + this.vector_num_elems = 0; + this.force_defaults = false; +}; + +/** + * In order to save space, fields that are set to their default value + * don't get serialized into the buffer. Forcing defaults provides a + * way to manually disable this optimization. + * + * @param {boolean} forceDefaults true always serializes default values + */ +flatbuffers.Builder.prototype.forceDefaults = function(forceDefaults) { + this.force_defaults = forceDefaults; +}; + +/** + * Get the ByteBuffer representing the FlatBuffer. Only call this after you've + * called finish(). The actual data starts at the ByteBuffer's current position, + * not necessarily at 0. + * + * @returns {flatbuffers.ByteBuffer} + */ +flatbuffers.Builder.prototype.dataBuffer = function() { + return this.bb; +}; + +/** + * Get the bytes representing the FlatBuffer. Only call this after you've + * called finish(). + * + * @returns {!Uint8Array} + */ +flatbuffers.Builder.prototype.asUint8Array = function() { + return this.bb.bytes().subarray(this.bb.position(), this.bb.position() + this.offset()); +}; + +/// @cond FLATBUFFERS_INTERNAL +/** + * Prepare to write an element of `size` after `additional_bytes` have been + * written, e.g. if you write a string, you need to align such the int length + * field is aligned to 4 bytes, and the string data follows it directly. If all + * you need to do is alignment, `additional_bytes` will be 0. + * + * @param {number} size This is the of the new element to write + * @param {number} additional_bytes The padding size + */ +flatbuffers.Builder.prototype.prep = function(size, additional_bytes) { + // Track the biggest thing we've ever aligned to. + if (size > this.minalign) { + this.minalign = size; + } + + // Find the amount of alignment needed such that `size` is properly + // aligned after `additional_bytes` + var align_size = ((~(this.bb.capacity() - this.space + additional_bytes)) + 1) & (size - 1); + + // Reallocate the buffer if needed. + while (this.space < align_size + size + additional_bytes) { + var old_buf_size = this.bb.capacity(); + this.bb = flatbuffers.Builder.growByteBuffer(this.bb); + this.space += this.bb.capacity() - old_buf_size; + } + + this.pad(align_size); +}; + +/** + * @param {number} byte_size + */ +flatbuffers.Builder.prototype.pad = function(byte_size) { + for (var i = 0; i < byte_size; i++) { + this.bb.writeInt8(--this.space, 0); + } +}; + +/** + * @param {number} value + */ +flatbuffers.Builder.prototype.writeInt8 = function(value) { + this.bb.writeInt8(this.space -= 1, value); +}; + +/** + * @param {number} value + */ +flatbuffers.Builder.prototype.writeInt16 = function(value) { + this.bb.writeInt16(this.space -= 2, value); +}; + +/** + * @param {number} value + */ +flatbuffers.Builder.prototype.writeInt32 = function(value) { + this.bb.writeInt32(this.space -= 4, value); +}; + +/** + * @param {flatbuffers.Long} value + */ +flatbuffers.Builder.prototype.writeInt64 = function(value) { + this.bb.writeInt64(this.space -= 8, value); +}; + +/** + * @param {number} value + */ +flatbuffers.Builder.prototype.writeFloat32 = function(value) { + this.bb.writeFloat32(this.space -= 4, value); +}; + +/** + * @param {number} value + */ +flatbuffers.Builder.prototype.writeFloat64 = function(value) { + this.bb.writeFloat64(this.space -= 8, value); +}; +/// @endcond + +/** + * Add an `int8` to the buffer, properly aligned, and grows the buffer (if necessary). + * @param {number} value The `int8` to add the the buffer. + */ +flatbuffers.Builder.prototype.addInt8 = function(value) { + this.prep(1, 0); + this.writeInt8(value); +}; + +/** + * Add an `int16` to the buffer, properly aligned, and grows the buffer (if necessary). + * @param {number} value The `int16` to add the the buffer. + */ +flatbuffers.Builder.prototype.addInt16 = function(value) { + this.prep(2, 0); + this.writeInt16(value); +}; + +/** + * Add an `int32` to the buffer, properly aligned, and grows the buffer (if necessary). + * @param {number} value The `int32` to add the the buffer. + */ +flatbuffers.Builder.prototype.addInt32 = function(value) { + this.prep(4, 0); + this.writeInt32(value); +}; + +/** + * Add an `int64` to the buffer, properly aligned, and grows the buffer (if necessary). + * @param {flatbuffers.Long} value The `int64` to add the the buffer. + */ +flatbuffers.Builder.prototype.addInt64 = function(value) { + this.prep(8, 0); + this.writeInt64(value); +}; + +/** + * Add a `float32` to the buffer, properly aligned, and grows the buffer (if necessary). + * @param {number} value The `float32` to add the the buffer. + */ +flatbuffers.Builder.prototype.addFloat32 = function(value) { + this.prep(4, 0); + this.writeFloat32(value); +}; + +/** + * Add a `float64` to the buffer, properly aligned, and grows the buffer (if necessary). + * @param {number} value The `float64` to add the the buffer. + */ +flatbuffers.Builder.prototype.addFloat64 = function(value) { + this.prep(8, 0); + this.writeFloat64(value); +}; + +/// @cond FLATBUFFERS_INTERNAL +/** + * @param {number} voffset + * @param {number} value + * @param {number} defaultValue + */ +flatbuffers.Builder.prototype.addFieldInt8 = function(voffset, value, defaultValue) { + if (this.force_defaults || value != defaultValue) { + this.addInt8(value); + this.slot(voffset); + } +}; + +/** + * @param {number} voffset + * @param {number} value + * @param {number} defaultValue + */ +flatbuffers.Builder.prototype.addFieldInt16 = function(voffset, value, defaultValue) { + if (this.force_defaults || value != defaultValue) { + this.addInt16(value); + this.slot(voffset); + } +}; + +/** + * @param {number} voffset + * @param {number} value + * @param {number} defaultValue + */ +flatbuffers.Builder.prototype.addFieldInt32 = function(voffset, value, defaultValue) { + if (this.force_defaults || value != defaultValue) { + this.addInt32(value); + this.slot(voffset); + } +}; + +/** + * @param {number} voffset + * @param {flatbuffers.Long} value + * @param {flatbuffers.Long} defaultValue + */ +flatbuffers.Builder.prototype.addFieldInt64 = function(voffset, value, defaultValue) { + if (this.force_defaults || !value.equals(defaultValue)) { + this.addInt64(value); + this.slot(voffset); + } +}; + +/** + * @param {number} voffset + * @param {number} value + * @param {number} defaultValue + */ +flatbuffers.Builder.prototype.addFieldFloat32 = function(voffset, value, defaultValue) { + if (this.force_defaults || value != defaultValue) { + this.addFloat32(value); + this.slot(voffset); + } +}; + +/** + * @param {number} voffset + * @param {number} value + * @param {number} defaultValue + */ +flatbuffers.Builder.prototype.addFieldFloat64 = function(voffset, value, defaultValue) { + if (this.force_defaults || value != defaultValue) { + this.addFloat64(value); + this.slot(voffset); + } +}; + +/** + * @param {number} voffset + * @param {flatbuffers.Offset} value + * @param {flatbuffers.Offset} defaultValue + */ +flatbuffers.Builder.prototype.addFieldOffset = function(voffset, value, defaultValue) { + if (this.force_defaults || value != defaultValue) { + this.addOffset(value); + this.slot(voffset); + } +}; + +/** + * Structs are stored inline, so nothing additional is being added. `d` is always 0. + * + * @param {number} voffset + * @param {flatbuffers.Offset} value + * @param {flatbuffers.Offset} defaultValue + */ +flatbuffers.Builder.prototype.addFieldStruct = function(voffset, value, defaultValue) { + if (value != defaultValue) { + this.nested(value); + this.slot(voffset); + } +}; + +/** + * Structures are always stored inline, they need to be created right + * where they're used. You'll get this assertion failure if you + * created it elsewhere. + * + * @param {flatbuffers.Offset} obj The offset of the created object + */ +flatbuffers.Builder.prototype.nested = function(obj) { + if (obj != this.offset()) { + throw new Error('FlatBuffers: struct must be serialized inline.'); + } +}; + +/** + * Should not be creating any other object, string or vector + * while an object is being constructed + */ +flatbuffers.Builder.prototype.notNested = function() { + if (this.isNested) { + throw new Error('FlatBuffers: object serialization must not be nested.'); + } +}; + +/** + * Set the current vtable at `voffset` to the current location in the buffer. + * + * @param {number} voffset + */ +flatbuffers.Builder.prototype.slot = function(voffset) { + this.vtable[voffset] = this.offset(); +}; + +/** + * @returns {flatbuffers.Offset} Offset relative to the end of the buffer. + */ +flatbuffers.Builder.prototype.offset = function() { + return this.bb.capacity() - this.space; +}; + +/** + * Doubles the size of the backing ByteBuffer and copies the old data towards + * the end of the new buffer (since we build the buffer backwards). + * + * @param {flatbuffers.ByteBuffer} bb The current buffer with the existing data + * @returns {!flatbuffers.ByteBuffer} A new byte buffer with the old data copied + * to it. The data is located at the end of the buffer. + * + * uint8Array.set() formally takes {Array|ArrayBufferView}, so to pass + * it a uint8Array we need to suppress the type check: + * @suppress {checkTypes} + */ +flatbuffers.Builder.growByteBuffer = function(bb) { + var old_buf_size = bb.capacity(); + + // Ensure we don't grow beyond what fits in an int. + if (old_buf_size & 0xC0000000) { + throw new Error('FlatBuffers: cannot grow buffer beyond 2 gigabytes.'); + } + + var new_buf_size = old_buf_size << 1; + var nbb = flatbuffers.ByteBuffer.allocate(new_buf_size); + nbb.setPosition(new_buf_size - old_buf_size); + nbb.bytes().set(bb.bytes(), new_buf_size - old_buf_size); + return nbb; +}; +/// @endcond + +/** + * Adds on offset, relative to where it will be written. + * + * @param {flatbuffers.Offset} offset The offset to add. + */ +flatbuffers.Builder.prototype.addOffset = function(offset) { + this.prep(flatbuffers.SIZEOF_INT, 0); // Ensure alignment is already done. + this.writeInt32(this.offset() - offset + flatbuffers.SIZEOF_INT); +}; + +/// @cond FLATBUFFERS_INTERNAL +/** + * Start encoding a new object in the buffer. Users will not usually need to + * call this directly. The FlatBuffers compiler will generate helper methods + * that call this method internally. + * + * @param {number} numfields + */ +flatbuffers.Builder.prototype.startObject = function(numfields) { + this.notNested(); + if (this.vtable == null) { + this.vtable = []; + } + this.vtable_in_use = numfields; + for (var i = 0; i < numfields; i++) { + this.vtable[i] = 0; // This will push additional elements as needed + } + this.isNested = true; + this.object_start = this.offset(); +}; + +/** + * Finish off writing the object that is under construction. + * + * @returns {flatbuffers.Offset} The offset to the object inside `dataBuffer` + */ +flatbuffers.Builder.prototype.endObject = function() { + if (this.vtable == null || !this.isNested) { + throw new Error('FlatBuffers: endObject called without startObject'); + } + + this.addInt32(0); + var vtableloc = this.offset(); + + // Trim trailing zeroes. + var i = this.vtable_in_use - 1; + for (; i >= 0 && this.vtable[i] == 0; i--) {} + var trimmed_size = i + 1; + + // Write out the current vtable. + for (; i >= 0; i--) { + // Offset relative to the start of the table. + this.addInt16(this.vtable[i] != 0 ? vtableloc - this.vtable[i] : 0); + } + + var standard_fields = 2; // The fields below: + this.addInt16(vtableloc - this.object_start); + var len = (trimmed_size + standard_fields) * flatbuffers.SIZEOF_SHORT; + this.addInt16(len); + + // Search for an existing vtable that matches the current one. + var existing_vtable = 0; + var vt1 = this.space; +outer_loop: + for (i = 0; i < this.vtables.length; i++) { + var vt2 = this.bb.capacity() - this.vtables[i]; + if (len == this.bb.readInt16(vt2)) { + for (var j = flatbuffers.SIZEOF_SHORT; j < len; j += flatbuffers.SIZEOF_SHORT) { + if (this.bb.readInt16(vt1 + j) != this.bb.readInt16(vt2 + j)) { + continue outer_loop; + } + } + existing_vtable = this.vtables[i]; + break; + } + } + + if (existing_vtable) { + // Found a match: + // Remove the current vtable. + this.space = this.bb.capacity() - vtableloc; + + // Point table to existing vtable. + this.bb.writeInt32(this.space, existing_vtable - vtableloc); + } else { + // No match: + // Add the location of the current vtable to the list of vtables. + this.vtables.push(this.offset()); + + // Point table to current vtable. + this.bb.writeInt32(this.bb.capacity() - vtableloc, this.offset() - vtableloc); + } + + this.isNested = false; + return vtableloc; +}; +/// @endcond + +/** + * Finalize a buffer, poiting to the given `root_table`. + * + * @param {flatbuffers.Offset} root_table + * @param {string=} opt_file_identifier + * @param {boolean=} opt_size_prefix + */ +flatbuffers.Builder.prototype.finish = function(root_table, opt_file_identifier, opt_size_prefix) { + var size_prefix = opt_size_prefix ? flatbuffers.SIZE_PREFIX_LENGTH : 0; + if (opt_file_identifier) { + var file_identifier = opt_file_identifier; + this.prep(this.minalign, flatbuffers.SIZEOF_INT + + flatbuffers.FILE_IDENTIFIER_LENGTH + size_prefix); + if (file_identifier.length != flatbuffers.FILE_IDENTIFIER_LENGTH) { + throw new Error('FlatBuffers: file identifier must be length ' + + flatbuffers.FILE_IDENTIFIER_LENGTH); + } + for (var i = flatbuffers.FILE_IDENTIFIER_LENGTH - 1; i >= 0; i--) { + this.writeInt8(file_identifier.charCodeAt(i)); + } + } + this.prep(this.minalign, flatbuffers.SIZEOF_INT + size_prefix); + this.addOffset(root_table); + if (size_prefix) { + this.addInt32(this.bb.capacity() - this.space); + } + this.bb.setPosition(this.space); +}; + +/** + * Finalize a size prefixed buffer, pointing to the given `root_table`. + * + * @param {flatbuffers.Offset} root_table + * @param {string=} opt_file_identifier + */ +flatbuffers.Builder.prototype.finishSizePrefixed = function (root_table, opt_file_identifier) { + this.finish(root_table, opt_file_identifier, true); +}; + +/// @cond FLATBUFFERS_INTERNAL +/** + * This checks a required field has been set in a given table that has + * just been constructed. + * + * @param {flatbuffers.Offset} table + * @param {number} field + */ +flatbuffers.Builder.prototype.requiredField = function(table, field) { + var table_start = this.bb.capacity() - table; + var vtable_start = table_start - this.bb.readInt32(table_start); + var ok = this.bb.readInt16(vtable_start + field) != 0; + + // If this fails, the caller will show what field needs to be set. + if (!ok) { + throw new Error('FlatBuffers: field ' + field + ' must be set'); + } +}; + +/** + * Start a new array/vector of objects. Users usually will not call + * this directly. The FlatBuffers compiler will create a start/end + * method for vector types in generated code. + * + * @param {number} elem_size The size of each element in the array + * @param {number} num_elems The number of elements in the array + * @param {number} alignment The alignment of the array + */ +flatbuffers.Builder.prototype.startVector = function(elem_size, num_elems, alignment) { + this.notNested(); + this.vector_num_elems = num_elems; + this.prep(flatbuffers.SIZEOF_INT, elem_size * num_elems); + this.prep(alignment, elem_size * num_elems); // Just in case alignment > int. +}; + +/** + * Finish off the creation of an array and all its elements. The array must be + * created with `startVector`. + * + * @returns {flatbuffers.Offset} The offset at which the newly created array + * starts. + */ +flatbuffers.Builder.prototype.endVector = function() { + this.writeInt32(this.vector_num_elems); + return this.offset(); +}; +/// @endcond + +/** + * Encode the string `s` in the buffer using UTF-8. If a Uint8Array is passed + * instead of a string, it is assumed to contain valid UTF-8 encoded data. + * + * @param {string|Uint8Array} s The string to encode + * @return {flatbuffers.Offset} The offset in the buffer where the encoded string starts + */ +flatbuffers.Builder.prototype.createString = function(s) { + if (s instanceof Uint8Array) { + var utf8 = s; + } else { + var utf8 = []; + var i = 0; + + while (i < s.length) { + var codePoint; + + // Decode UTF-16 + var a = s.charCodeAt(i++); + if (a < 0xD800 || a >= 0xDC00) { + codePoint = a; + } else { + var b = s.charCodeAt(i++); + codePoint = (a << 10) + b + (0x10000 - (0xD800 << 10) - 0xDC00); + } + + // Encode UTF-8 + if (codePoint < 0x80) { + utf8.push(codePoint); + } else { + if (codePoint < 0x800) { + utf8.push(((codePoint >> 6) & 0x1F) | 0xC0); + } else { + if (codePoint < 0x10000) { + utf8.push(((codePoint >> 12) & 0x0F) | 0xE0); + } else { + utf8.push( + ((codePoint >> 18) & 0x07) | 0xF0, + ((codePoint >> 12) & 0x3F) | 0x80); + } + utf8.push(((codePoint >> 6) & 0x3F) | 0x80); + } + utf8.push((codePoint & 0x3F) | 0x80); + } + } + } + + this.addInt8(0); + this.startVector(1, utf8.length, 1); + this.bb.setPosition(this.space -= utf8.length); + for (var i = 0, offset = this.space, bytes = this.bb.bytes(); i < utf8.length; i++) { + bytes[offset++] = utf8[i]; + } + return this.endVector(); +}; + +/** + * A helper function to avoid generated code depending on this file directly. + * + * @param {number} low + * @param {number} high + * @returns {!flatbuffers.Long} + */ +flatbuffers.Builder.prototype.createLong = function(low, high) { + return flatbuffers.Long.create(low, high); +}; +//////////////////////////////////////////////////////////////////////////////// +/// @cond FLATBUFFERS_INTERNAL +/** + * Create a new ByteBuffer with a given array of bytes (`Uint8Array`). + * + * @constructor + * @param {Uint8Array} bytes + */ +flatbuffers.ByteBuffer = function(bytes) { + /** + * @type {Uint8Array} + * @private + */ + this.bytes_ = bytes; + + /** + * @type {number} + * @private + */ + this.position_ = 0; +}; + +/** + * Create and allocate a new ByteBuffer with a given size. + * + * @param {number} byte_size + * @returns {!flatbuffers.ByteBuffer} + */ +flatbuffers.ByteBuffer.allocate = function(byte_size) { + return new flatbuffers.ByteBuffer(new Uint8Array(byte_size)); +}; + +flatbuffers.ByteBuffer.prototype.clear = function() { + this.position_ = 0; +}; + +/** + * Get the underlying `Uint8Array`. + * + * @returns {Uint8Array} + */ +flatbuffers.ByteBuffer.prototype.bytes = function() { + return this.bytes_; +}; + +/** + * Get the buffer's position. + * + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.position = function() { + return this.position_; +}; + +/** + * Set the buffer's position. + * + * @param {number} position + */ +flatbuffers.ByteBuffer.prototype.setPosition = function(position) { + this.position_ = position; +}; + +/** + * Get the buffer's capacity. + * + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.capacity = function() { + return this.bytes_.length; +}; + +/** + * @param {number} offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.readInt8 = function(offset) { + return this.readUint8(offset) << 24 >> 24; +}; + +/** + * @param {number} offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.readUint8 = function(offset) { + return this.bytes_[offset]; +}; + +/** + * @param {number} offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.readInt16 = function(offset) { + return this.readUint16(offset) << 16 >> 16; +}; + +/** + * @param {number} offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.readUint16 = function(offset) { + return this.bytes_[offset] | this.bytes_[offset + 1] << 8; +}; + +/** + * @param {number} offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.readInt32 = function(offset) { + return this.bytes_[offset] | this.bytes_[offset + 1] << 8 | this.bytes_[offset + 2] << 16 | this.bytes_[offset + 3] << 24; +}; + +/** + * @param {number} offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.readUint32 = function(offset) { + return this.readInt32(offset) >>> 0; +}; + +/** + * @param {number} offset + * @returns {!flatbuffers.Long} + */ +flatbuffers.ByteBuffer.prototype.readInt64 = function(offset) { + return new flatbuffers.Long(this.readInt32(offset), this.readInt32(offset + 4)); +}; + +/** + * @param {number} offset + * @returns {!flatbuffers.Long} + */ +flatbuffers.ByteBuffer.prototype.readUint64 = function(offset) { + return new flatbuffers.Long(this.readUint32(offset), this.readUint32(offset + 4)); +}; + +/** + * @param {number} offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.readFloat32 = function(offset) { + flatbuffers.int32[0] = this.readInt32(offset); + return flatbuffers.float32[0]; +}; + +/** + * @param {number} offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.readFloat64 = function(offset) { + flatbuffers.int32[flatbuffers.isLittleEndian ? 0 : 1] = this.readInt32(offset); + flatbuffers.int32[flatbuffers.isLittleEndian ? 1 : 0] = this.readInt32(offset + 4); + return flatbuffers.float64[0]; +}; + +/** + * @param {number} offset + * @param {number|boolean} value + */ +flatbuffers.ByteBuffer.prototype.writeInt8 = function(offset, value) { + this.bytes_[offset] = /** @type {number} */(value); +}; + +/** + * @param {number} offset + * @param {number} value + */ +flatbuffers.ByteBuffer.prototype.writeUint8 = function(offset, value) { + this.bytes_[offset] = value; +}; + +/** + * @param {number} offset + * @param {number} value + */ +flatbuffers.ByteBuffer.prototype.writeInt16 = function(offset, value) { + this.bytes_[offset] = value; + this.bytes_[offset + 1] = value >> 8; +}; + +/** + * @param {number} offset + * @param {number} value + */ +flatbuffers.ByteBuffer.prototype.writeUint16 = function(offset, value) { + this.bytes_[offset] = value; + this.bytes_[offset + 1] = value >> 8; +}; + +/** + * @param {number} offset + * @param {number} value + */ +flatbuffers.ByteBuffer.prototype.writeInt32 = function(offset, value) { + this.bytes_[offset] = value; + this.bytes_[offset + 1] = value >> 8; + this.bytes_[offset + 2] = value >> 16; + this.bytes_[offset + 3] = value >> 24; +}; + +/** + * @param {number} offset + * @param {number} value + */ +flatbuffers.ByteBuffer.prototype.writeUint32 = function(offset, value) { + this.bytes_[offset] = value; + this.bytes_[offset + 1] = value >> 8; + this.bytes_[offset + 2] = value >> 16; + this.bytes_[offset + 3] = value >> 24; +}; + +/** + * @param {number} offset + * @param {flatbuffers.Long} value + */ +flatbuffers.ByteBuffer.prototype.writeInt64 = function(offset, value) { + this.writeInt32(offset, value.low); + this.writeInt32(offset + 4, value.high); +}; + +/** + * @param {number} offset + * @param {flatbuffers.Long} value + */ +flatbuffers.ByteBuffer.prototype.writeUint64 = function(offset, value) { + this.writeUint32(offset, value.low); + this.writeUint32(offset + 4, value.high); +}; + +/** + * @param {number} offset + * @param {number} value + */ +flatbuffers.ByteBuffer.prototype.writeFloat32 = function(offset, value) { + flatbuffers.float32[0] = value; + this.writeInt32(offset, flatbuffers.int32[0]); +}; + +/** + * @param {number} offset + * @param {number} value + */ +flatbuffers.ByteBuffer.prototype.writeFloat64 = function(offset, value) { + flatbuffers.float64[0] = value; + this.writeInt32(offset, flatbuffers.int32[flatbuffers.isLittleEndian ? 0 : 1]); + this.writeInt32(offset + 4, flatbuffers.int32[flatbuffers.isLittleEndian ? 1 : 0]); +}; + +/** + * Return the file identifier. Behavior is undefined for FlatBuffers whose + * schema does not include a file_identifier (likely points at padding or the + * start of a the root vtable). + * @returns {string} + */ +flatbuffers.ByteBuffer.prototype.getBufferIdentifier = function() { + if (this.bytes_.length < this.position_ + flatbuffers.SIZEOF_INT + + flatbuffers.FILE_IDENTIFIER_LENGTH) { + throw new Error( + 'FlatBuffers: ByteBuffer is too short to contain an identifier.'); + } + var result = ""; + for (var i = 0; i < flatbuffers.FILE_IDENTIFIER_LENGTH; i++) { + result += String.fromCharCode( + this.readInt8(this.position_ + flatbuffers.SIZEOF_INT + i)); + } + return result; +}; + +/** + * Look up a field in the vtable, return an offset into the object, or 0 if the + * field is not present. + * + * @param {number} bb_pos + * @param {number} vtable_offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.__offset = function(bb_pos, vtable_offset) { + var vtable = bb_pos - this.readInt32(bb_pos); + return vtable_offset < this.readInt16(vtable) ? this.readInt16(vtable + vtable_offset) : 0; +}; + +/** + * Initialize any Table-derived type to point to the union at the given offset. + * + * @param {flatbuffers.Table} t + * @param {number} offset + * @returns {flatbuffers.Table} + */ +flatbuffers.ByteBuffer.prototype.__union = function(t, offset) { + t.bb_pos = offset + this.readInt32(offset); + t.bb = this; + return t; +}; + +/** + * Create a JavaScript string from UTF-8 data stored inside the FlatBuffer. + * This allocates a new string and converts to wide chars upon each access. + * + * To avoid the conversion to UTF-16, pass flatbuffers.Encoding.UTF8_BYTES as + * the "optionalEncoding" argument. This is useful for avoiding conversion to + * and from UTF-16 when the data will just be packaged back up in another + * FlatBuffer later on. + * + * @param {number} offset + * @param {flatbuffers.Encoding=} opt_encoding Defaults to UTF16_STRING + * @returns {string|!Uint8Array} + */ +flatbuffers.ByteBuffer.prototype.__string = function(offset, opt_encoding) { + offset += this.readInt32(offset); + + var length = this.readInt32(offset); + var result = ''; + var i = 0; + + offset += flatbuffers.SIZEOF_INT; + + if (opt_encoding === flatbuffers.Encoding.UTF8_BYTES) { + return this.bytes_.subarray(offset, offset + length); + } + + while (i < length) { + var codePoint; + + // Decode UTF-8 + var a = this.readUint8(offset + i++); + if (a < 0xC0) { + codePoint = a; + } else { + var b = this.readUint8(offset + i++); + if (a < 0xE0) { + codePoint = + ((a & 0x1F) << 6) | + (b & 0x3F); + } else { + var c = this.readUint8(offset + i++); + if (a < 0xF0) { + codePoint = + ((a & 0x0F) << 12) | + ((b & 0x3F) << 6) | + (c & 0x3F); + } else { + var d = this.readUint8(offset + i++); + codePoint = + ((a & 0x07) << 18) | + ((b & 0x3F) << 12) | + ((c & 0x3F) << 6) | + (d & 0x3F); + } + } + } + + // Encode UTF-16 + if (codePoint < 0x10000) { + result += String.fromCharCode(codePoint); + } else { + codePoint -= 0x10000; + result += String.fromCharCode( + (codePoint >> 10) + 0xD800, + (codePoint & ((1 << 10) - 1)) + 0xDC00); + } + } + + return result; +}; + +/** + * Retrieve the relative offset stored at "offset" + * @param {number} offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.__indirect = function(offset) { + return offset + this.readInt32(offset); +}; + +/** + * Get the start of data of a vector whose offset is stored at "offset" in this object. + * + * @param {number} offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.__vector = function(offset) { + return offset + this.readInt32(offset) + flatbuffers.SIZEOF_INT; // data starts after the length +}; + +/** + * Get the length of a vector whose offset is stored at "offset" in this object. + * + * @param {number} offset + * @returns {number} + */ +flatbuffers.ByteBuffer.prototype.__vector_len = function(offset) { + return this.readInt32(offset + this.readInt32(offset)); +}; + +/** + * @param {string} ident + * @returns {boolean} + */ +flatbuffers.ByteBuffer.prototype.__has_identifier = function(ident) { + if (ident.length != flatbuffers.FILE_IDENTIFIER_LENGTH) { + throw new Error('FlatBuffers: file identifier must be length ' + + flatbuffers.FILE_IDENTIFIER_LENGTH); + } + for (var i = 0; i < flatbuffers.FILE_IDENTIFIER_LENGTH; i++) { + if (ident.charCodeAt(i) != this.readInt8(this.position_ + flatbuffers.SIZEOF_INT + i)) { + return false; + } + } + return true; +}; + +/** + * A helper function to avoid generated code depending on this file directly. + * + * @param {number} low + * @param {number} high + * @returns {!flatbuffers.Long} + */ +flatbuffers.ByteBuffer.prototype.createLong = function(low, high) { + return flatbuffers.Long.create(low, high); +}; + +// Exports for Node.js and RequireJS +this.flatbuffers = flatbuffers; + +/// @endcond +/// @} diff --git a/frontend/packages/core/public/netron/deps/long.js b/frontend/packages/core/public/netron/deps/long.js new file mode 100644 index 00000000..93458930 --- /dev/null +++ b/frontend/packages/core/public/netron/deps/long.js @@ -0,0 +1,2 @@ +!function(t,i){"object"==typeof exports&&"object"==typeof module?module.exports=i():"function"==typeof define&&define.amd?define([],i):"object"==typeof exports?exports.Long=i():t.Long=i()}("undefined"!=typeof self?self:this,function(){return function(t){function i(e){if(n[e])return n[e].exports;var r=n[e]={i:e,l:!1,exports:{}};return t[e].call(r.exports,r,r.exports,i),r.l=!0,r.exports}var n={};return i.m=t,i.c=n,i.d=function(t,n,e){i.o(t,n)||Object.defineProperty(t,n,{configurable:!1,enumerable:!0,get:e})},i.n=function(t){var n=t&&t.__esModule?function(){return t.default}:function(){return t};return i.d(n,"a",n),n},i.o=function(t,i){return Object.prototype.hasOwnProperty.call(t,i)},i.p="",i(i.s=0)}([function(t,i){function n(t,i,n){this.low=0|t,this.high=0|i,this.unsigned=!!n}function e(t){return!0===(t&&t.__isLong__)}function r(t,i){var n,e,r;return i?(t>>>=0,(r=0<=t&&t<256)&&(e=l[t])?e:(n=h(t,(0|t)<0?-1:0,!0),r&&(l[t]=n),n)):(t|=0,(r=-128<=t&&t<128)&&(e=f[t])?e:(n=h(t,t<0?-1:0,!1),r&&(f[t]=n),n))}function s(t,i){if(isNaN(t))return i?p:m;if(i){if(t<0)return p;if(t>=c)return q}else{if(t<=-v)return _;if(t+1>=v)return E}return t<0?s(-t,i).neg():h(t%d|0,t/d|0,i)}function h(t,i,e){return new n(t,i,e)}function u(t,i,n){if(0===t.length)throw Error("empty string");if("NaN"===t||"Infinity"===t||"+Infinity"===t||"-Infinity"===t)return m;if("number"==typeof i?(n=i,i=!1):i=!!i,(n=n||10)<2||360)throw Error("interior hyphen");if(0===e)return u(t.substring(1),i,n).neg();for(var r=s(a(n,8)),h=m,o=0;o>>0:this.low},B.toNumber=function(){return this.unsigned?(this.high>>>0)*d+(this.low>>>0):this.high*d+(this.low>>>0)},B.toString=function(t){if((t=t||10)<2||36>>0,f=g.toString(t);if(h=o,h.isZero())return f+u;for(;f.length<6;)f="0"+f;u=""+f+u}},B.getHighBits=function(){return this.high},B.getHighBitsUnsigned=function(){return this.high>>>0},B.getLowBits=function(){return this.low},B.getLowBitsUnsigned=function(){return this.low>>>0},B.getNumBitsAbs=function(){if(this.isNegative())return this.eq(_)?64:this.neg().getNumBitsAbs();for(var t=0!=this.high?this.high:this.low,i=31;i>0&&0==(t&1<=0},B.isOdd=function(){return 1==(1&this.low)},B.isEven=function(){return 0==(1&this.low)},B.equals=function(t){return e(t)||(t=o(t)),(this.unsigned===t.unsigned||this.high>>>31!=1||t.high>>>31!=1)&&(this.high===t.high&&this.low===t.low)},B.eq=B.equals,B.notEquals=function(t){return!this.eq(t)},B.neq=B.notEquals,B.ne=B.notEquals,B.lessThan=function(t){return this.comp(t)<0},B.lt=B.lessThan,B.lessThanOrEqual=function(t){return this.comp(t)<=0},B.lte=B.lessThanOrEqual,B.le=B.lessThanOrEqual,B.greaterThan=function(t){return this.comp(t)>0},B.gt=B.greaterThan,B.greaterThanOrEqual=function(t){return this.comp(t)>=0},B.gte=B.greaterThanOrEqual,B.ge=B.greaterThanOrEqual,B.compare=function(t){if(e(t)||(t=o(t)),this.eq(t))return 0;var i=this.isNegative(),n=t.isNegative();return i&&!n?-1:!i&&n?1:this.unsigned?t.high>>>0>this.high>>>0||t.high===this.high&&t.low>>>0>this.low>>>0?-1:1:this.sub(t).isNegative()?-1:1},B.comp=B.compare,B.negate=function(){return!this.unsigned&&this.eq(_)?_:this.not().add(y)},B.neg=B.negate,B.add=function(t){e(t)||(t=o(t));var i=this.high>>>16,n=65535&this.high,r=this.low>>>16,s=65535&this.low,u=t.high>>>16,g=65535&t.high,f=t.low>>>16,l=65535&t.low,a=0,d=0,c=0,v=0;return v+=s+l,c+=v>>>16,v&=65535,c+=r+f,d+=c>>>16,c&=65535,d+=n+g,a+=d>>>16,d&=65535,a+=i+u,a&=65535,h(c<<16|v,a<<16|d,this.unsigned)},B.subtract=function(t){return e(t)||(t=o(t)),this.add(t.neg())},B.sub=B.subtract,B.multiply=function(t){if(this.isZero())return m;if(e(t)||(t=o(t)),g){return h(g.mul(this.low,this.high,t.low,t.high),g.get_high(),this.unsigned)}if(t.isZero())return m;if(this.eq(_))return t.isOdd()?_:m;if(t.eq(_))return this.isOdd()?_:m;if(this.isNegative())return t.isNegative()?this.neg().mul(t.neg()):this.neg().mul(t).neg();if(t.isNegative())return this.mul(t.neg()).neg();if(this.lt(w)&&t.lt(w))return s(this.toNumber()*t.toNumber(),this.unsigned);var i=this.high>>>16,n=65535&this.high,r=this.low>>>16,u=65535&this.low,f=t.high>>>16,l=65535&t.high,a=t.low>>>16,d=65535&t.low,c=0,v=0,p=0,y=0;return y+=u*d,p+=y>>>16,y&=65535,p+=r*d,v+=p>>>16,p&=65535,p+=u*a,v+=p>>>16,p&=65535,v+=n*d,c+=v>>>16,v&=65535,v+=r*a,c+=v>>>16,v&=65535,v+=u*l,c+=v>>>16,v&=65535,c+=i*d+n*a+r*l+u*f,c&=65535,h(p<<16|y,c<<16|v,this.unsigned)},B.mul=B.multiply,B.divide=function(t){if(e(t)||(t=o(t)),t.isZero())throw Error("division by zero");if(g){if(!this.unsigned&&-2147483648===this.high&&-1===t.low&&-1===t.high)return this;return h((this.unsigned?g.div_u:g.div_s)(this.low,this.high,t.low,t.high),g.get_high(),this.unsigned)}if(this.isZero())return this.unsigned?p:m;var i,n,r;if(this.unsigned){if(t.unsigned||(t=t.toUnsigned()),t.gt(this))return p;if(t.gt(this.shru(1)))return b;r=p}else{if(this.eq(_)){if(t.eq(y)||t.eq(N))return _;if(t.eq(_))return y;return i=this.shr(1).div(t).shl(1),i.eq(m)?t.isNegative()?y:N:(n=this.sub(t.mul(i)),r=i.add(n.div(t)))}if(t.eq(_))return this.unsigned?p:m;if(this.isNegative())return t.isNegative()?this.neg().div(t.neg()):this.neg().div(t).neg();if(t.isNegative())return this.div(t.neg()).neg();r=m}for(n=this;n.gte(t);){i=Math.max(1,Math.floor(n.toNumber()/t.toNumber()));for(var u=Math.ceil(Math.log(i)/Math.LN2),f=u<=48?1:a(2,u-48),l=s(i),d=l.mul(t);d.isNegative()||d.gt(n);)i-=f,l=s(i,this.unsigned),d=l.mul(t);l.isZero()&&(l=y),r=r.add(l),n=n.sub(d)}return r},B.div=B.divide,B.modulo=function(t){if(e(t)||(t=o(t)),g){return h((this.unsigned?g.rem_u:g.rem_s)(this.low,this.high,t.low,t.high),g.get_high(),this.unsigned)}return this.sub(this.div(t).mul(t))},B.mod=B.modulo,B.rem=B.modulo,B.not=function(){return h(~this.low,~this.high,this.unsigned)},B.and=function(t){return e(t)||(t=o(t)),h(this.low&t.low,this.high&t.high,this.unsigned)},B.or=function(t){return e(t)||(t=o(t)),h(this.low|t.low,this.high|t.high,this.unsigned)},B.xor=function(t){return e(t)||(t=o(t)),h(this.low^t.low,this.high^t.high,this.unsigned)},B.shiftLeft=function(t){return e(t)&&(t=t.toInt()),0==(t&=63)?this:t<32?h(this.low<>>32-t,this.unsigned):h(0,this.low<>>t|this.high<<32-t,this.high>>t,this.unsigned):h(this.high>>t-32,this.high>=0?0:-1,this.unsigned)},B.shr=B.shiftRight,B.shiftRightUnsigned=function(t){if(e(t)&&(t=t.toInt()),0===(t&=63))return this;var i=this.high;if(t<32){return h(this.low>>>t|i<<32-t,i>>>t,this.unsigned)}return 32===t?h(i,0,this.unsigned):h(i>>>t-32,0,this.unsigned)},B.shru=B.shiftRightUnsigned,B.shr_u=B.shiftRightUnsigned,B.toSigned=function(){return this.unsigned?h(this.low,this.high,!1):this},B.toUnsigned=function(){return this.unsigned?this:h(this.low,this.high,!0)},B.toBytes=function(t){return t?this.toBytesLE():this.toBytesBE()},B.toBytesLE=function(){var t=this.high,i=this.low;return[255&i,i>>>8&255,i>>>16&255,i>>>24,255&t,t>>>8&255,t>>>16&255,t>>>24]},B.toBytesBE=function(){var t=this.high,i=this.low;return[t>>>24,t>>>16&255,t>>>8&255,255&t,i>>>24,i>>>16&255,i>>>8&255,255&i]},n.fromBytes=function(t,i,e){return e?n.fromBytesLE(t,i):n.fromBytesBE(t,i)},n.fromBytesLE=function(t,i){return new n(t[0]|t[1]<<8|t[2]<<16|t[3]<<24,t[4]|t[5]<<8|t[6]<<16|t[7]<<24,i)},n.fromBytesBE=function(t,i){return new n(t[4]<<24|t[5]<<16|t[6]<<8|t[7],t[0]<<24|t[1]<<16|t[2]<<8|t[3],i)}}])}); +//# sourceMappingURL=long.js.map \ No newline at end of file diff --git a/frontend/packages/core/public/netron/deps/marked.min.js b/frontend/packages/core/public/netron/deps/marked.min.js new file mode 100644 index 00000000..b9d0f20e --- /dev/null +++ b/frontend/packages/core/public/netron/deps/marked.min.js @@ -0,0 +1,6 @@ +/** + * marked - a markdown parser + * Copyright (c) 2011-2020, Christopher Jeffrey. (MIT Licensed) + * https://github.com/markedjs/marked + */ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e=e||self).marked=t()}(this,function(){"use strict";function s(e,t){for(var n=0;ne.length)&&(t=e.length);for(var n=0,r=new Array(t);n=e.length?{done:!0}:{done:!1,value:e[t++]}};throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}function n(e){return c[e]}var e,t=(function(t){function e(){return{baseUrl:null,breaks:!1,gfm:!0,headerIds:!0,headerPrefix:"",highlight:null,langPrefix:"language-",mangle:!0,pedantic:!1,renderer:null,sanitize:!1,sanitizer:null,silent:!1,smartLists:!1,smartypants:!1,tokenizer:null,walkTokens:null,xhtml:!1}}t.exports={defaults:e(),getDefaults:e,changeDefaults:function(e){t.exports.defaults=e}}}(e={exports:{}}),e.exports),i=(t.defaults,t.getDefaults,t.changeDefaults,/[&<>"']/),a=/[&<>"']/g,l=/[<>"']|&(?!#?\w+;)/,o=/[<>"']|&(?!#?\w+;)/g,c={"&":"&","<":"<",">":">",'"':""","'":"'"};var h=/&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/gi;function u(e){return e.replace(h,function(e,t){return"colon"===(t=t.toLowerCase())?":":"#"===t.charAt(0)?"x"===t.charAt(1)?String.fromCharCode(parseInt(t.substring(2),16)):String.fromCharCode(+t.substring(1)):""})}var p=/(^|[^\[])\^/g;var f=/[^\w:]/g,d=/^$|^[a-z][a-z0-9+.-]*:|^[?#]/i;var k={},b=/^[^:]+:\/*[^/]*$/,m=/^([^:]+:)[\s\S]*$/,x=/^([^:]+:\/*[^/]*)[\s\S]*$/;function w(e,t){k[" "+e]||(b.test(e)?k[" "+e]=e+"/":k[" "+e]=v(e,"/",!0));var n=-1===(e=k[" "+e]).indexOf(":");return"//"===t.substring(0,2)?n?t:e.replace(m,"$1")+t:"/"===t.charAt(0)?n?t:e.replace(x,"$1")+t:e+t}function v(e,t,n){var r=e.length;if(0===r)return"";for(var i=0;it)n.splice(t);else for(;n.length=r.length?e.slice(r.length):e}).join("\n")}(n,t[3]||"");return{type:"code",raw:n,lang:t[2]?t[2].trim():t[2],text:r}}},t.heading=function(e){var t=this.rules.block.heading.exec(e);if(t)return{type:"heading",raw:t[0],depth:t[1].length,text:t[2]}},t.nptable=function(e){var t=this.rules.block.nptable.exec(e);if(t){var n={type:"table",header:O(t[1].replace(/^ *| *\| *$/g,"")),align:t[2].replace(/^ *|\| *$/g,"").split(/ *\| */),cells:t[3]?t[3].replace(/\n$/,"").split("\n"):[],raw:t[0]};if(n.header.length===n.align.length){for(var r=n.align.length,i=0;i ?/gm,"");return{type:"blockquote",raw:t[0],text:n}}},t.list=function(e){var t=this.rules.block.list.exec(e);if(t){for(var n,r,i,s,a,l,o,c=t[0],h=t[2],u=1/i.test(r[0])&&(t=!1),!n&&/^<(pre|code|kbd|script)(\s|>)/i.test(r[0])?n=!0:n&&/^<\/(pre|code|kbd|script)(\s|>)/i.test(r[0])&&(n=!1),{type:this.options.sanitize?"text":"html",raw:r[0],inLink:t,inRawBlock:n,text:this.options.sanitize?this.options.sanitizer?this.options.sanitizer(r[0]):C(r[0]):r[0]}},t.link=function(e){var t=this.rules.inline.link.exec(e);if(t){var n,r=j(t[2],"()");-1$/,"$1"))?s.replace(this.rules.inline._escapes,"$1"):s,title:a?a.replace(this.rules.inline._escapes,"$1"):a},t[0])}},t.reflink=function(e,t){var n;if((n=this.rules.inline.reflink.exec(e))||(n=this.rules.inline.nolink.exec(e))){var r=(n[2]||n[1]).replace(/\s+/g," ");if((r=t[r.toLowerCase()])&&r.href)return E(n,r,n[0]);var i=n[0].charAt(0);return{type:"text",raw:i,text:i}}},t.strong=function(e){var t=this.rules.inline.strong.exec(e);if(t)return{type:"strong",raw:t[0],text:t[4]||t[3]||t[2]||t[1]}},t.em=function(e){var t=this.rules.inline.em.exec(e);if(t)return{type:"em",raw:t[0],text:t[6]||t[5]||t[4]||t[3]||t[2]||t[1]}},t.codespan=function(e){var t=this.rules.inline.code.exec(e);if(t){var n=t[2].replace(/\n/g," "),r=/[^ ]/.test(n),i=n.startsWith(" ")&&n.endsWith(" ");return r&&i&&(n=n.substring(1,n.length-1)),n=C(n,!0),{type:"codespan",raw:t[0],text:n}}},t.br=function(e){var t=this.rules.inline.br.exec(e);if(t)return{type:"br",raw:t[0]}},t.del=function(e){var t=this.rules.inline.del.exec(e);if(t)return{type:"del",raw:t[0],text:t[1]}},t.autolink=function(e,t){var n=this.rules.inline.autolink.exec(e);if(n){var r,i="@"===n[2]?"mailto:"+(r=C(this.options.mangle?t(n[1]):n[1])):r=C(n[1]);return{type:"link",raw:n[0],text:r,href:i,tokens:[{type:"text",raw:r,text:r}]}}},t.url=function(e,t){var n,r,i,s;if(n=this.rules.inline.url.exec(e)){if("@"===n[2])i="mailto:"+(r=C(this.options.mangle?t(n[0]):n[0]));else{for(;s=n[0],n[0]=this.rules.inline._backpedal.exec(n[0])[0],s!==n[0];);r=C(n[0]),i="www."===n[1]?"http://"+r:r}return{type:"link",raw:n[0],text:r,href:i,tokens:[{type:"text",raw:r,text:r}]}}},t.inlineText=function(e,t,n){var r=this.rules.inline.text.exec(e);if(r){var i=t?this.options.sanitize?this.options.sanitizer?this.options.sanitizer(r[0]):C(r[0]):r[0]:C(this.options.smartypants?n(r[0]):r[0]);return{type:"text",raw:r[0],text:i}}},e}(),L=S,P=z,U=A,B={newline:/^\n+/,code:/^( {4}[^\n]+\n*)+/,fences:/^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?:\n+|$)|$)/,hr:/^ {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\* *){3,})(?:\n+|$)/,heading:/^ {0,3}(#{1,6}) +([^\n]*?)(?: +#+)? *(?:\n+|$)/,blockquote:/^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/,list:/^( {0,3})(bull) [\s\S]+?(?:hr|def|\n{2,}(?! )(?!\1bull )\n*|\s*$)/,html:"^ {0,3}(?:<(script|pre|style)[\\s>][\\s\\S]*?(?:[^\\n]*\\n+|$)|comment[^\\n]*(\\n+|$)|<\\?[\\s\\S]*?\\?>\\n*|\\n*|\\n*|)[\\s\\S]*?(?:\\n{2,}|$)|<(?!script|pre|style)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:\\n{2,}|$)|(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:\\n{2,}|$))",def:/^ {0,3}\[(label)\]: *\n? *]+)>?(?:(?: +\n? *| *\n *)(title))? *(?:\n+|$)/,nptable:L,table:L,lheading:/^([^\n]+)\n {0,3}(=+|-+) *(?:\n+|$)/,_paragraph:/^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html)[^\n]+)*)/,text:/^[^\n]+/,_label:/(?!\s*\])(?:\\[\[\]]|[^\[\]])+/,_title:/(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/};B.def=P(B.def).replace("label",B._label).replace("title",B._title).getRegex(),B.bullet=/(?:[*+-]|\d{1,9}\.)/,B.item=/^( *)(bull) ?[^\n]*(?:\n(?!\1bull ?)[^\n]*)*/,B.item=P(B.item,"gm").replace(/bull/g,B.bullet).getRegex(),B.list=P(B.list).replace(/bull/g,B.bullet).replace("hr","\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))").replace("def","\\n+(?="+B.def.source+")").getRegex(),B._tag="address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul",B._comment=//,B.html=P(B.html,"i").replace("comment",B._comment).replace("tag",B._tag).replace("attribute",/ +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex(),B.paragraph=P(B._paragraph).replace("hr",B.hr).replace("heading"," {0,3}#{1,6} ").replace("|lheading","").replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|!--)").replace("tag",B._tag).getRegex(),B.blockquote=P(B.blockquote).replace("paragraph",B.paragraph).getRegex(),B.normal=U({},B),B.gfm=U({},B.normal,{nptable:"^ *([^|\\n ].*\\|.*)\\n *([-:]+ *\\|[-| :]*)(?:\\n((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)",table:"^ *\\|(.+)\\n *\\|?( *[-:]+[-| :]*)(?:\\n *((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)"}),B.gfm.nptable=P(B.gfm.nptable).replace("hr",B.hr).replace("heading"," {0,3}#{1,6} ").replace("blockquote"," {0,3}>").replace("code"," {4}[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|!--)").replace("tag",B._tag).getRegex(),B.gfm.table=P(B.gfm.table).replace("hr",B.hr).replace("heading"," {0,3}#{1,6} ").replace("blockquote"," {0,3}>").replace("code"," {4}[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|!--)").replace("tag",B._tag).getRegex(),B.pedantic=U({},B.normal,{html:P("^ *(?:comment *(?:\\n|\\s*$)|<(tag)[\\s\\S]+? *(?:\\n{2,}|\\s*$)|\\s]*)*?/?> *(?:\\n{2,}|\\s*$))").replace("comment",B._comment).replace(/tag/g,"(?!(?:a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)\\b)\\w+(?!:|[^\\w\\s@]*@)\\b").getRegex(),def:/^ *\[([^\]]+)\]: *]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,heading:/^ *(#{1,6}) *([^\n]+?) *(?:#+ *)?(?:\n+|$)/,fences:L,paragraph:P(B.normal._paragraph).replace("hr",B.hr).replace("heading"," *#{1,6} *[^\n]").replace("lheading",B.lheading).replace("blockquote"," {0,3}>").replace("|fences","").replace("|list","").replace("|html","").getRegex()});var F={escape:/^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,autolink:/^<(scheme:[^\s\x00-\x1f<>]*|email)>/,url:L,tag:"^comment|^|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>|^<\\?[\\s\\S]*?\\?>|^|^",link:/^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/,reflink:/^!?\[(label)\]\[(?!\s*\])((?:\\[\[\]]?|[^\[\]\\])+)\]/,nolink:/^!?\[(?!\s*\])((?:\[[^\[\]]*\]|\\[\[\]]|[^\[\]])*)\](?:\[\])?/,strong:/^__([^\s_])__(?!_)|^\*\*([^\s*])\*\*(?!\*)|^__([^\s][\s\S]*?[^\s])__(?!_)|^\*\*([^\s][\s\S]*?[^\s])\*\*(?!\*)/,em:/^_([^\s_])_(?!_)|^_([^\s_<][\s\S]*?[^\s_])_(?!_|[^\s,punctuation])|^_([^\s_<][\s\S]*?[^\s])_(?!_|[^\s,punctuation])|^\*([^\s*<\[])\*(?!\*)|^\*([^\s<"][\s\S]*?[^\s\[\*])\*(?![\]`punctuation])|^\*([^\s*"<\[][\s\S]*[^\s])\*(?!\*)/,code:/^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,br:/^( {2,}|\\)\n(?!\s*$)/,del:L,text:/^(`+|[^`])(?:[\s\S]*?(?:(?=[\\?@\\[^_{|}~"};F.em=P(F.em).replace(/punctuation/g,F._punctuation).getRegex(),F._escapes=/\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g,F._scheme=/[a-zA-Z][a-zA-Z0-9+.-]{1,31}/,F._email=/[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/,F.autolink=P(F.autolink).replace("scheme",F._scheme).replace("email",F._email).getRegex(),F._attribute=/\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/,F.tag=P(F.tag).replace("comment",B._comment).replace("attribute",F._attribute).getRegex(),F._label=/(?:\[[^\[\]]*\]|\\.|`[^`]*`|[^\[\]\\`])*?/,F._href=/<(?:\\[<>]?|[^\s<>\\])*>|[^\s\x00-\x1f]*/,F._title=/"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/,F.link=P(F.link).replace("label",F._label).replace("href",F._href).replace("title",F._title).getRegex(),F.reflink=P(F.reflink).replace("label",F._label).getRegex(),F.normal=U({},F),F.pedantic=U({},F.normal,{strong:/^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/,em:/^_(?=\S)([\s\S]*?\S)_(?!_)|^\*(?=\S)([\s\S]*?\S)\*(?!\*)/,link:P(/^!?\[(label)\]\((.*?)\)/).replace("label",F._label).getRegex(),reflink:P(/^!?\[(label)\]\s*\[([^\]]*)\]/).replace("label",F._label).getRegex()}),F.gfm=U({},F.normal,{escape:P(F.escape).replace("])","~|])").getRegex(),_extended_email:/[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/,url:/^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/,_backpedal:/(?:[^?!.,:;*_~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_~)]+(?!$))+/,del:/^~+(?=\S)([\s\S]*?\S)~+/,text:/^(`+|[^`])(?:[\s\S]*?(?:(?=[\\'+(n?e:Q(e,!0))+"\n":"
"+(n?e:Q(e,!0))+"
\n"},t.blockquote=function(e){return"
\n"+e+"
\n"},t.html=function(e){return e},t.heading=function(e,t,n,r){return this.options.headerIds?"'+e+"\n":""+e+"\n"},t.hr=function(){return this.options.xhtml?"
\n":"
\n"},t.list=function(e,t,n){var r=t?"ol":"ul";return"<"+r+(t&&1!==n?' start="'+n+'"':"")+">\n"+e+"\n"},t.listitem=function(e){return"
  • "+e+"
  • \n"},t.checkbox=function(e){return" "},t.paragraph=function(e){return"

    "+e+"

    \n"},t.table=function(e,t){return"\n\n"+e+"\n"+(t=t&&""+t+"")+"
    \n"},t.tablerow=function(e){return"\n"+e+"\n"},t.tablecell=function(e,t){var n=t.header?"th":"td";return(t.align?"<"+n+' align="'+t.align+'">':"<"+n+">")+e+"\n"},t.strong=function(e){return""+e+""},t.em=function(e){return""+e+""},t.codespan=function(e){return""+e+""},t.br=function(){return this.options.xhtml?"
    ":"
    "},t.del=function(e){return""+e+""},t.link=function(e,t,n){if(null===(e=K(this.options.sanitize,this.options.baseUrl,e)))return n;var r='"},t.image=function(e,t,n){if(null===(e=K(this.options.sanitize,this.options.baseUrl,e)))return n;var r=''+n+'":">"},t.text=function(e){return e},e}(),ee=function(){function e(){}var t=e.prototype;return t.strong=function(e){return e},t.em=function(e){return e},t.codespan=function(e){return e},t.del=function(e){return e},t.html=function(e){return e},t.text=function(e){return e},t.link=function(e,t,n){return""+n},t.image=function(e,t,n){return""+n},t.br=function(){return""},e}(),te=function(){function e(){this.seen={}}return e.prototype.slug=function(e){var t=e.toLowerCase().trim().replace(/<[!\/a-z].*?>/gi,"").replace(/[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,./:;<=>?@[\]^`{|}~]/g,"").replace(/\s/g,"-");if(this.seen.hasOwnProperty(t))for(var n=t;this.seen[n]++,t=n+"-"+this.seen[n],this.seen.hasOwnProperty(t););return this.seen[t]=0,t},e}(),ne=t.defaults,re=_,ie=function(){function n(e){this.options=e||ne,this.options.renderer=this.options.renderer||new Y,this.renderer=this.options.renderer,this.renderer.options=this.options,this.textRenderer=new ee,this.slugger=new te}n.parse=function(e,t){return new n(t).parse(e)};var e=n.prototype;return e.parse=function(e,t){void 0===t&&(t=!0);for(var n,r,i,s,a,l,o,c,h,u,p,g,f,d,k,b,m,x="",w=e.length,v=0;vAn error occurred:

    "+le(e.message+"",!0)+"
    ";throw e}}return ue.options=ue.setOptions=function(e){return se(ue.defaults,e),ce(ue.defaults),ue},ue.getDefaults=oe,ue.defaults=he,ue.use=function(l){var t,n=se({},l);l.renderer&&function(){var a=ue.defaults.renderer||new Y;for(var e in l.renderer)!function(i){var s=a[i];a[i]=function(){for(var e=arguments.length,t=new Array(e),n=0;n>>6:(a<65536?e[r++]=224|a>>>12:(e[r++]=240|a>>>18,e[r++]=128|a>>>12&63),e[r++]=128|a>>>6&63),e[r++]=128|63&a);return e},a.buf2binstring=function(t){return d(t,t.length)},a.binstring2buf=function(t){for(var e=new l.Buf8(t.length),a=0,i=e.length;a>10&1023,o[i++]=56320|1023&n)}return d(o,i)},a.utf8border=function(t,e){var a;for((e=e||t.length)>t.length&&(e=t.length),a=e-1;0<=a&&128==(192&t[a]);)a--;return a<0?e:0===a?e:a+h[t[a]]>e?a:e}},{"./common":3}],5:[function(t,e,a){"use strict";e.exports=function(t,e,a,i){for(var n=65535&t|0,r=t>>>16&65535|0,s=0;0!==a;){for(a-=s=2e3>>1:t>>>1;e[a]=t}return e}();e.exports=function(t,e,a,i){var n=o,r=i+a;t^=-1;for(var s=i;s>>8^n[255&(t^e[s])];return-1^t}},{}],8:[function(t,e,a){"use strict";var l,_=t("../utils/common"),h=t("./trees"),u=t("./adler32"),c=t("./crc32"),i=t("./messages"),d=0,f=4,b=0,g=-2,m=-1,w=4,n=2,p=8,v=9,r=286,s=30,o=19,k=2*r+1,y=15,x=3,z=258,B=z+x+1,S=42,E=113,A=1,Z=2,R=3,C=4;function N(t,e){return t.msg=i[e],e}function O(t){return(t<<1)-(4t.avail_out&&(a=t.avail_out),0!==a&&(_.arraySet(t.output,e.pending_buf,e.pending_out,a,t.next_out),t.next_out+=a,e.pending_out+=a,t.total_out+=a,t.avail_out-=a,e.pending-=a,0===e.pending&&(e.pending_out=0))}function U(t,e){h._tr_flush_block(t,0<=t.block_start?t.block_start:-1,t.strstart-t.block_start,e),t.block_start=t.strstart,I(t.strm)}function T(t,e){t.pending_buf[t.pending++]=e}function F(t,e){t.pending_buf[t.pending++]=e>>>8&255,t.pending_buf[t.pending++]=255&e}function L(t,e){var a,i,n=t.max_chain_length,r=t.strstart,s=t.prev_length,o=t.nice_match,l=t.strstart>t.w_size-B?t.strstart-(t.w_size-B):0,h=t.window,d=t.w_mask,f=t.prev,_=t.strstart+z,u=h[r+s-1],c=h[r+s];t.prev_length>=t.good_match&&(n>>=2),o>t.lookahead&&(o=t.lookahead);do{if(h[(a=e)+s]===c&&h[a+s-1]===u&&h[a]===h[r]&&h[++a]===h[r+1]){r+=2,a++;do{}while(h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&h[++r]===h[++a]&&r<_);if(i=z-(_-r),r=_-z,sl&&0!=--n);return s<=t.lookahead?s:t.lookahead}function H(t){var e,a,i,n,r,s,o,l,h,d,f=t.w_size;do{if(n=t.window_size-t.lookahead-t.strstart,t.strstart>=f+(f-B)){for(_.arraySet(t.window,t.window,f,f,0),t.match_start-=f,t.strstart-=f,t.block_start-=f,e=a=t.hash_size;i=t.head[--e],t.head[e]=f<=i?i-f:0,--a;);for(e=a=f;i=t.prev[--e],t.prev[e]=f<=i?i-f:0,--a;);n+=f}if(0===t.strm.avail_in)break;if(s=t.strm,o=t.window,l=t.strstart+t.lookahead,h=n,d=void 0,d=s.avail_in,h=x)for(r=t.strstart-t.insert,t.ins_h=t.window[r],t.ins_h=(t.ins_h<=x&&(t.ins_h=(t.ins_h<=x)if(i=h._tr_tally(t,t.strstart-t.match_start,t.match_length-x),t.lookahead-=t.match_length,t.match_length<=t.max_lazy_match&&t.lookahead>=x){for(t.match_length--;t.strstart++,t.ins_h=(t.ins_h<=x&&(t.ins_h=(t.ins_h<=x&&t.match_length<=t.prev_length){for(n=t.strstart+t.lookahead-x,i=h._tr_tally(t,t.strstart-1-t.prev_match,t.prev_length-x),t.lookahead-=t.prev_length-1,t.prev_length-=2;++t.strstart<=n&&(t.ins_h=(t.ins_h<t.pending_buf_size-5&&(a=t.pending_buf_size-5);;){if(t.lookahead<=1){if(H(t),0===t.lookahead&&e===d)return A;if(0===t.lookahead)break}t.strstart+=t.lookahead,t.lookahead=0;var i=t.block_start+a;if((0===t.strstart||t.strstart>=i)&&(t.lookahead=t.strstart-i,t.strstart=i,U(t,!1),0===t.strm.avail_out))return A;if(t.strstart-t.block_start>=t.w_size-B&&(U(t,!1),0===t.strm.avail_out))return A}return t.insert=0,e===f?(U(t,!0),0===t.strm.avail_out?R:C):(t.strstart>t.block_start&&(U(t,!1),t.strm.avail_out),A)}),new M(4,4,8,4,j),new M(4,5,16,8,j),new M(4,6,32,32,j),new M(4,4,16,16,K),new M(8,16,32,32,K),new M(8,16,128,128,K),new M(8,32,128,256,K),new M(32,128,258,1024,K),new M(32,258,258,4096,K)],a.deflateInit=function(t,e){return G(t,e,p,15,8,0)},a.deflateInit2=G,a.deflateReset=q,a.deflateResetKeep=Y,a.deflateSetHeader=function(t,e){return t&&t.state?2!==t.state.wrap?g:(t.state.gzhead=e,b):g},a.deflate=function(t,e){var a,i,n,r;if(!t||!t.state||5>8&255),T(i,i.gzhead.time>>16&255),T(i,i.gzhead.time>>24&255),T(i,9===i.level?2:2<=i.strategy||i.level<2?4:0),T(i,255&i.gzhead.os),i.gzhead.extra&&i.gzhead.extra.length&&(T(i,255&i.gzhead.extra.length),T(i,i.gzhead.extra.length>>8&255)),i.gzhead.hcrc&&(t.adler=c(t.adler,i.pending_buf,i.pending,0)),i.gzindex=0,i.status=69):(T(i,0),T(i,0),T(i,0),T(i,0),T(i,0),T(i,9===i.level?2:2<=i.strategy||i.level<2?4:0),T(i,3),i.status=E);else{var s=p+(i.w_bits-8<<4)<<8;s|=(2<=i.strategy||i.level<2?0:i.level<6?1:6===i.level?2:3)<<6,0!==i.strstart&&(s|=32),s+=31-s%31,i.status=E,F(i,s),0!==i.strstart&&(F(i,t.adler>>>16),F(i,65535&t.adler)),t.adler=1}if(69===i.status)if(i.gzhead.extra){for(n=i.pending;i.gzindex<(65535&i.gzhead.extra.length)&&(i.pending!==i.pending_buf_size||(i.gzhead.hcrc&&i.pending>n&&(t.adler=c(t.adler,i.pending_buf,i.pending-n,n)),I(t),n=i.pending,i.pending!==i.pending_buf_size));)T(i,255&i.gzhead.extra[i.gzindex]),i.gzindex++;i.gzhead.hcrc&&i.pending>n&&(t.adler=c(t.adler,i.pending_buf,i.pending-n,n)),i.gzindex===i.gzhead.extra.length&&(i.gzindex=0,i.status=73)}else i.status=73;if(73===i.status)if(i.gzhead.name){n=i.pending;do{if(i.pending===i.pending_buf_size&&(i.gzhead.hcrc&&i.pending>n&&(t.adler=c(t.adler,i.pending_buf,i.pending-n,n)),I(t),n=i.pending,i.pending===i.pending_buf_size)){r=1;break}T(i,r=i.gzindexn&&(t.adler=c(t.adler,i.pending_buf,i.pending-n,n)),0===r&&(i.gzindex=0,i.status=91)}else i.status=91;if(91===i.status)if(i.gzhead.comment){n=i.pending;do{if(i.pending===i.pending_buf_size&&(i.gzhead.hcrc&&i.pending>n&&(t.adler=c(t.adler,i.pending_buf,i.pending-n,n)),I(t),n=i.pending,i.pending===i.pending_buf_size)){r=1;break}T(i,r=i.gzindexn&&(t.adler=c(t.adler,i.pending_buf,i.pending-n,n)),0===r&&(i.status=103)}else i.status=103;if(103===i.status&&(i.gzhead.hcrc?(i.pending+2>i.pending_buf_size&&I(t),i.pending+2<=i.pending_buf_size&&(T(i,255&t.adler),T(i,t.adler>>8&255),t.adler=0,i.status=E)):i.status=E),0!==i.pending){if(I(t),0===t.avail_out)return i.last_flush=-1,b}else if(0===t.avail_in&&O(e)<=O(a)&&e!==f)return N(t,-5);if(666===i.status&&0!==t.avail_in)return N(t,-5);if(0!==t.avail_in||0!==i.lookahead||e!==d&&666!==i.status){var o=2===i.strategy?function(t,e){for(var a;;){if(0===t.lookahead&&(H(t),0===t.lookahead)){if(e===d)return A;break}if(t.match_length=0,a=h._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++,a&&(U(t,!1),0===t.strm.avail_out))return A}return t.insert=0,e===f?(U(t,!0),0===t.strm.avail_out?R:C):t.last_lit&&(U(t,!1),0===t.strm.avail_out)?A:Z}(i,e):3===i.strategy?function(t,e){for(var a,i,n,r,s=t.window;;){if(t.lookahead<=z){if(H(t),t.lookahead<=z&&e===d)return A;if(0===t.lookahead)break}if(t.match_length=0,t.lookahead>=x&&0t.lookahead&&(t.match_length=t.lookahead)}if(t.match_length>=x?(a=h._tr_tally(t,1,t.match_length-x),t.lookahead-=t.match_length,t.strstart+=t.match_length,t.match_length=0):(a=h._tr_tally(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++),a&&(U(t,!1),0===t.strm.avail_out))return A}return t.insert=0,e===f?(U(t,!0),0===t.strm.avail_out?R:C):t.last_lit&&(U(t,!1),0===t.strm.avail_out)?A:Z}(i,e):l[i.level].func(i,e);if(o!==R&&o!==C||(i.status=666),o===A||o===R)return 0===t.avail_out&&(i.last_flush=-1),b;if(o===Z&&(1===e?h._tr_align(i):5!==e&&(h._tr_stored_block(i,0,0,!1),3===e&&(D(i.head),0===i.lookahead&&(i.strstart=0,i.block_start=0,i.insert=0))),I(t),0===t.avail_out))return i.last_flush=-1,b}return e!==f?b:i.wrap<=0?1:(2===i.wrap?(T(i,255&t.adler),T(i,t.adler>>8&255),T(i,t.adler>>16&255),T(i,t.adler>>24&255),T(i,255&t.total_in),T(i,t.total_in>>8&255),T(i,t.total_in>>16&255),T(i,t.total_in>>24&255)):(F(i,t.adler>>>16),F(i,65535&t.adler)),I(t),0=a.w_size&&(0===r&&(D(a.head),a.strstart=0,a.block_start=0,a.insert=0),h=new _.Buf8(a.w_size),_.arraySet(h,e,d-a.w_size,a.w_size,0),e=h,d=a.w_size),s=t.avail_in,o=t.next_in,l=t.input,t.avail_in=d,t.next_in=0,t.input=e,H(a);a.lookahead>=x;){for(i=a.strstart,n=a.lookahead-(x-1);a.ins_h=(a.ins_h<>>=v=p>>>24,c-=v,0===(v=p>>>16&255))S[r++]=65535&p;else{if(!(16&v)){if(0==(64&v)){p=b[(65535&p)+(u&(1<>>=v,c-=v),c<15&&(u+=B[i++]<>>=v=p>>>24,c-=v,!(16&(v=p>>>16&255))){if(0==(64&v)){p=g[(65535&p)+(u&(1<>>=v,c-=v,(v=r-s)>3,u&=(1<<(c-=k<<3))-1,t.next_in=i,t.next_out=r,t.avail_in=i>>24&255)+(t>>>8&65280)+((65280&t)<<8)+((255&t)<<24)}function r(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new Z.Buf16(320),this.work=new Z.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function s(t){var e;return t&&t.state?(e=t.state,t.total_in=t.total_out=e.total=0,t.msg="",e.wrap&&(t.adler=1&e.wrap),e.mode=F,e.last=0,e.havedict=0,e.dmax=32768,e.head=null,e.hold=0,e.bits=0,e.lencode=e.lendyn=new Z.Buf32(i),e.distcode=e.distdyn=new Z.Buf32(n),e.sane=1,e.back=-1,U):T}function o(t){var e;return t&&t.state?((e=t.state).wsize=0,e.whave=0,e.wnext=0,s(t)):T}function l(t,e){var a,i;return t&&t.state?(i=t.state,e<0?(a=0,e=-e):(a=1+(e>>4),e<48&&(e&=15)),e&&(e<8||15=r.wsize?(Z.arraySet(r.window,e,a-r.wsize,r.wsize,0),r.wnext=0,r.whave=r.wsize):(i<(n=r.wsize-r.wnext)&&(n=i),Z.arraySet(r.window,e,a-i,n,r.wnext),(i-=n)?(Z.arraySet(r.window,e,a-i,i,0),r.wnext=i,r.whave=r.wsize):(r.wnext+=n,r.wnext===r.wsize&&(r.wnext=0),r.whave>>8&255,a.check=C(a.check,E,2,0),d=h=0,a.mode=2;break}if(a.flags=0,a.head&&(a.head.done=!1),!(1&a.wrap)||(((255&h)<<8)+(h>>8))%31){t.msg="incorrect header check",a.mode=30;break}if(8!=(15&h)){t.msg="unknown compression method",a.mode=30;break}if(d-=4,y=8+(15&(h>>>=4)),0===a.wbits)a.wbits=y;else if(y>a.wbits){t.msg="invalid window size",a.mode=30;break}a.dmax=1<>8&1),512&a.flags&&(E[0]=255&h,E[1]=h>>>8&255,a.check=C(a.check,E,2,0)),d=h=0,a.mode=3;case 3:for(;d<32;){if(0===o)break t;o--,h+=i[r++]<>>8&255,E[2]=h>>>16&255,E[3]=h>>>24&255,a.check=C(a.check,E,4,0)),d=h=0,a.mode=4;case 4:for(;d<16;){if(0===o)break t;o--,h+=i[r++]<>8),512&a.flags&&(E[0]=255&h,E[1]=h>>>8&255,a.check=C(a.check,E,2,0)),d=h=0,a.mode=5;case 5:if(1024&a.flags){for(;d<16;){if(0===o)break t;o--,h+=i[r++]<>>8&255,a.check=C(a.check,E,2,0)),d=h=0}else a.head&&(a.head.extra=null);a.mode=6;case 6:if(1024&a.flags&&(o<(u=a.length)&&(u=o),u&&(a.head&&(y=a.head.extra_len-a.length,a.head.extra||(a.head.extra=new Array(a.head.extra_len)),Z.arraySet(a.head.extra,i,r,u,y)),512&a.flags&&(a.check=C(a.check,i,u,r)),o-=u,r+=u,a.length-=u),a.length))break t;a.length=0,a.mode=7;case 7:if(2048&a.flags){if(0===o)break t;for(u=0;y=i[r+u++],a.head&&y&&a.length<65536&&(a.head.name+=String.fromCharCode(y)),y&&u>9&1,a.head.done=!0),t.adler=a.check=0,a.mode=12;break;case 10:for(;d<32;){if(0===o)break t;o--,h+=i[r++]<>>=7&d,d-=7&d,a.mode=27;break}for(;d<3;){if(0===o)break t;o--,h+=i[r++]<>>=1)){case 0:a.mode=14;break;case 1:if(H(a),a.mode=20,6!==e)break;h>>>=2,d-=2;break t;case 2:a.mode=17;break;case 3:t.msg="invalid block type",a.mode=30}h>>>=2,d-=2;break;case 14:for(h>>>=7&d,d-=7&d;d<32;){if(0===o)break t;o--,h+=i[r++]<>>16^65535)){t.msg="invalid stored block lengths",a.mode=30;break}if(a.length=65535&h,d=h=0,a.mode=15,6===e)break t;case 15:a.mode=16;case 16:if(u=a.length){if(o>>=5,d-=5,a.ndist=1+(31&h),h>>>=5,d-=5,a.ncode=4+(15&h),h>>>=4,d-=4,286>>=3,d-=3}for(;a.have<19;)a.lens[A[a.have++]]=0;if(a.lencode=a.lendyn,a.lenbits=7,z={bits:a.lenbits},x=O(0,a.lens,0,19,a.lencode,0,a.work,z),a.lenbits=z.bits,x){t.msg="invalid code lengths set",a.mode=30;break}a.have=0,a.mode=19;case 19:for(;a.have>>16&255,w=65535&S,!((g=S>>>24)<=d);){if(0===o)break t;o--,h+=i[r++]<>>=g,d-=g,a.lens[a.have++]=w;else{if(16===w){for(B=g+2;d>>=g,d-=g,0===a.have){t.msg="invalid bit length repeat",a.mode=30;break}y=a.lens[a.have-1],u=3+(3&h),h>>>=2,d-=2}else if(17===w){for(B=g+3;d>>=g)),h>>>=3,d-=3}else{for(B=g+7;d>>=g)),h>>>=7,d-=7}if(a.have+u>a.nlen+a.ndist){t.msg="invalid bit length repeat",a.mode=30;break}for(;u--;)a.lens[a.have++]=y}}if(30===a.mode)break;if(0===a.lens[256]){t.msg="invalid code -- missing end-of-block",a.mode=30;break}if(a.lenbits=9,z={bits:a.lenbits},x=O(D,a.lens,0,a.nlen,a.lencode,0,a.work,z),a.lenbits=z.bits,x){t.msg="invalid literal/lengths set",a.mode=30;break}if(a.distbits=6,a.distcode=a.distdyn,z={bits:a.distbits},x=O(I,a.lens,a.nlen,a.ndist,a.distcode,0,a.work,z),a.distbits=z.bits,x){t.msg="invalid distances set",a.mode=30;break}if(a.mode=20,6===e)break t;case 20:a.mode=21;case 21:if(6<=o&&258<=l){t.next_out=s,t.avail_out=l,t.next_in=r,t.avail_in=o,a.hold=h,a.bits=d,N(t,_),s=t.next_out,n=t.output,l=t.avail_out,r=t.next_in,i=t.input,o=t.avail_in,h=a.hold,d=a.bits,12===a.mode&&(a.back=-1);break}for(a.back=0;m=(S=a.lencode[h&(1<>>16&255,w=65535&S,!((g=S>>>24)<=d);){if(0===o)break t;o--,h+=i[r++]<>p)])>>>16&255,w=65535&S,!(p+(g=S>>>24)<=d);){if(0===o)break t;o--,h+=i[r++]<>>=p,d-=p,a.back+=p}if(h>>>=g,d-=g,a.back+=g,a.length=w,0===m){a.mode=26;break}if(32&m){a.back=-1,a.mode=12;break}if(64&m){t.msg="invalid literal/length code",a.mode=30;break}a.extra=15&m,a.mode=22;case 22:if(a.extra){for(B=a.extra;d>>=a.extra,d-=a.extra,a.back+=a.extra}a.was=a.length,a.mode=23;case 23:for(;m=(S=a.distcode[h&(1<>>16&255,w=65535&S,!((g=S>>>24)<=d);){if(0===o)break t;o--,h+=i[r++]<>p)])>>>16&255,w=65535&S,!(p+(g=S>>>24)<=d);){if(0===o)break t;o--,h+=i[r++]<>>=p,d-=p,a.back+=p}if(h>>>=g,d-=g,a.back+=g,64&m){t.msg="invalid distance code",a.mode=30;break}a.offset=w,a.extra=15&m,a.mode=24;case 24:if(a.extra){for(B=a.extra;d>>=a.extra,d-=a.extra,a.back+=a.extra}if(a.offset>a.dmax){t.msg="invalid distance too far back",a.mode=30;break}a.mode=25;case 25:if(0===l)break t;if(u=_-l,a.offset>u){if((u=a.offset-u)>a.whave&&a.sane){t.msg="invalid distance too far back",a.mode=30;break}u>a.wnext?(u-=a.wnext,c=a.wsize-u):c=a.wnext-u,u>a.length&&(u=a.length),b=a.window}else b=n,c=s-a.offset,u=a.length;for(lu?(b=N[O+s[p]],g=A[Z+s[p]]):(b=96,g=0),l=1<>z)+(h-=l)]=c<<24|b<<16|g|0,0!==h;);for(l=1<>=1;if(0!==l?(E&=l-1,E+=l):E=0,p++,0==--R[w]){if(w===k)break;w=e[a+s[p]]}if(y>>7)]}function T(t,e){t.pending_buf[t.pending++]=255&e,t.pending_buf[t.pending++]=e>>>8&255}function F(t,e,a){t.bi_valid>n-a?(t.bi_buf|=e<>n-t.bi_valid,t.bi_valid+=a-n):(t.bi_buf|=e<>>=1,a<<=1,0<--e;);return a>>>1}function j(t,e,a){var i,n,r=new Array(m+1),s=0;for(i=1;i<=m;i++)r[i]=s=s+a[i-1]<<1;for(n=0;n<=e;n++){var o=t[2*n+1];0!==o&&(t[2*n]=H(r[o]++,o))}}function K(t){var e;for(e=0;e<_;e++)t.dyn_ltree[2*e]=0;for(e=0;e>1;1<=a;a--)Y(t,r,a);for(n=l;a=t.heap[1],t.heap[1]=t.heap[t.heap_len--],Y(t,r,1),i=t.heap[1],t.heap[--t.heap_max]=a,t.heap[--t.heap_max]=i,r[2*n]=r[2*a]+r[2*i],t.depth[n]=(t.depth[a]>=t.depth[i]?t.depth[a]:t.depth[i])+1,r[2*a+1]=r[2*i+1]=n,t.heap[1]=n++,Y(t,r,1),2<=t.heap_len;);t.heap[--t.heap_max]=t.heap[1],function(t,e){var a,i,n,r,s,o,l=e.dyn_tree,h=e.max_code,d=e.stat_desc.static_tree,f=e.stat_desc.has_stree,_=e.stat_desc.extra_bits,u=e.stat_desc.extra_base,c=e.stat_desc.max_length,b=0;for(r=0;r<=m;r++)t.bl_count[r]=0;for(l[2*t.heap[t.heap_max]+1]=0,a=t.heap_max+1;a>=7;i>>=1)if(1&a&&0!==t.dyn_ltree[2*e])return o;if(0!==t.dyn_ltree[18]||0!==t.dyn_ltree[20]||0!==t.dyn_ltree[26])return h;for(e=32;e>>3,(r=t.static_len+3+7>>>3)<=n&&(n=r)):n=r=a+5,a+4<=n&&-1!==e?Q(t,e,a,i):4===t.strategy||r===n?(F(t,2+(i?1:0),3),q(t,S,E)):(F(t,4+(i?1:0),3),function(t,e,a,i){var n;for(F(t,e-257,5),F(t,a-1,5),F(t,i-4,4),n=0;n>>8&255,t.pending_buf[t.d_buf+2*t.last_lit+1]=255&e,t.pending_buf[t.l_buf+t.last_lit]=255&a,t.last_lit++,0===e?t.dyn_ltree[2*a]++:(t.matches++,e--,t.dyn_ltree[2*(Z[a]+f+1)]++,t.dyn_dtree[2*U(e)]++),t.last_lit===t.lit_bufsize-1},a._tr_align=function(t){var e;F(t,2,3),L(t,w,S),16===(e=t).bi_valid?(T(e,e.bi_buf),e.bi_buf=0,e.bi_valid=0):8<=e.bi_valid&&(e.pending_buf[e.pending++]=255&e.bi_buf,e.bi_buf>>=8,e.bi_valid-=8)}},{"../utils/common":3}],15:[function(t,e,a){"use strict";e.exports=function(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}},{}],"/":[function(t,e,a){"use strict";var i={};(0,t("./lib/utils/common").assign)(i,t("./lib/deflate"),t("./lib/inflate"),t("./lib/zlib/constants")),e.exports=i},{"./lib/deflate":1,"./lib/inflate":2,"./lib/utils/common":3,"./lib/zlib/constants":6}]},{},[])("/")}); diff --git a/frontend/packages/core/public/netron/deps/protobuf.min.js b/frontend/packages/core/public/netron/deps/protobuf.min.js new file mode 100644 index 00000000..9be32b62 --- /dev/null +++ b/frontend/packages/core/public/netron/deps/protobuf.min.js @@ -0,0 +1,8 @@ +/*! + * protobuf.js v6.8.8 (c) 2016, daniel wirtz + * compiled thu, 19 jul 2018 00:33:26 utc + * licensed under the bsd-3-clause license + * see: https://github.com/dcodeio/protobuf.js for details + */ +!function(tt){"use strict";var r,e,t,i;r={1:[function(t,i){i.exports=function(t,i){var n=Array(arguments.length-1),s=0,r=2,u=!0;for(;r>2],r=(3&f)<<4,o=1;break;case 1:s[u++]=h[r|f>>4],r=(15&f)<<2,o=2;break;case 2:s[u++]=h[r|f>>6],s[u++]=h[63&f],o=0}8191>4,r=o,s=2;break;case 2:i[n++]=(15&r)<<4|(60&o)>>2,r=o,s=3;break;case 3:i[n++]=(3&r)<<6|o,s=0}}if(1===s)throw Error(a);return n-e},r.test=function(t){return/^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/.test(t)}},{}],3:[function(t,i){function c(i,n){"string"==typeof i&&(n=i,i=tt);var f=[];function h(t){if("string"!=typeof t){var i=a();if(c.verbose&&console.log("codegen: "+i),i="return "+i,t){for(var n=Object.keys(t),r=Array(n.length+1),e=Array(n.length),s=0;s>>0,n,r);else if(i<11754943508222875e-54)t((e<<31|Math.round(i/1401298464324817e-60))>>>0,n,r);else{var s=Math.floor(Math.log(i)/Math.LN2);t((e<<31|s+127<<23|8388607&Math.round(i*Math.pow(2,-s)*8388608))>>>0,n,r)}}function i(t,i,n){var r=t(i,n),e=2*(r>>31)+1,s=r>>>23&255,u=8388607&r;return 255===s?u?NaN:e*(1/0):0===s?1401298464324817e-60*e*u:e*Math.pow(2,s-150)*(u+8388608)}o.writeFloatLE=t.bind(null,r),o.writeFloatBE=t.bind(null,e),o.readFloatLE=i.bind(null,s),o.readFloatBE=i.bind(null,u)}(),"undefined"!=typeof Float64Array?function(){var r=new Float64Array([-0]),e=new Uint8Array(r.buffer),t=128===e[7];function i(t,i,n){r[0]=t,i[n]=e[0],i[n+1]=e[1],i[n+2]=e[2],i[n+3]=e[3],i[n+4]=e[4],i[n+5]=e[5],i[n+6]=e[6],i[n+7]=e[7]}function n(t,i,n){r[0]=t,i[n]=e[7],i[n+1]=e[6],i[n+2]=e[5],i[n+3]=e[4],i[n+4]=e[3],i[n+5]=e[2],i[n+6]=e[1],i[n+7]=e[0]}function s(t,i){return e[0]=t[i],e[1]=t[i+1],e[2]=t[i+2],e[3]=t[i+3],e[4]=t[i+4],e[5]=t[i+5],e[6]=t[i+6],e[7]=t[i+7],r[0]}function u(t,i){return e[7]=t[i],e[6]=t[i+1],e[5]=t[i+2],e[4]=t[i+3],e[3]=t[i+4],e[2]=t[i+5],e[1]=t[i+6],e[0]=t[i+7],r[0]}o.writeDoubleLE=t?i:n,o.writeDoubleBE=t?n:i,o.readDoubleLE=t?s:u,o.readDoubleBE=t?u:s}():function(){function t(t,i,n,r,e,s){var u=r<0?1:0;if(u&&(r=-r),0===r)t(0,e,s+i),t(0<1/r?0:2147483648,e,s+n);else if(isNaN(r))t(0,e,s+i),t(2146959360,e,s+n);else if(17976931348623157e292>>0,e,s+n);else{var o;if(r<22250738585072014e-324)t((o=r/5e-324)>>>0,e,s+i),t((u<<31|o/4294967296)>>>0,e,s+n);else{var f=Math.floor(Math.log(r)/Math.LN2);1024===f&&(f=1023),t(4503599627370496*(o=r*Math.pow(2,-f))>>>0,e,s+i),t((u<<31|f+1023<<20|1048576*o&1048575)>>>0,e,s+n)}}}function i(t,i,n,r,e){var s=t(r,e+i),u=t(r,e+n),o=2*(u>>31)+1,f=u>>>20&2047,h=4294967296*(1048575&u)+s;return 2047===f?h?NaN:o*(1/0):0===f?5e-324*o*h:o*Math.pow(2,f-1075)*(h+4503599627370496)}o.writeDoubleLE=t.bind(null,r,0,4),o.writeDoubleBE=t.bind(null,e,4,0),o.readDoubleLE=i.bind(null,s,0,4),o.readDoubleBE=i.bind(null,u,4,0)}(),o}function r(t,i,n){i[n]=255&t,i[n+1]=t>>>8&255,i[n+2]=t>>>16&255,i[n+3]=t>>>24}function e(t,i,n){i[n]=t>>>24,i[n+1]=t>>>16&255,i[n+2]=t>>>8&255,i[n+3]=255&t}function s(t,i){return(t[i]|t[i+1]<<8|t[i+2]<<16|t[i+3]<<24)>>>0}function u(t,i){return(t[i]<<24|t[i+1]<<16|t[i+2]<<8|t[i+3])>>>0}i.exports=n(n)},{}],7:[function(t,i,n){function r(t){try{var i=eval("require")(t);if(i&&(i.length||Object.keys(i).length))return i}catch(t){}return null}i.exports=r},{}],8:[function(t,i,n){var r=n,s=r.isAbsolute=function(t){return/^(?:\/|\w+:)/.test(t)},e=r.normalize=function(t){var i=(t=t.replace(/\\/g,"/").replace(/\/{2,}/g,"/")).split("/"),n=s(t),r="";n&&(r=i.shift()+"/");for(var e=0;e>>1,u=null,o=e;return function(t){if(t<1||s>10),s[u++]=56320+(1023&r)):s[u++]=(15&r)<<12|(63&t[i++])<<6|63&t[i++],8191>6|192:(55296==(64512&r)&&56320==(64512&(e=t.charCodeAt(u+1)))?(r=65536+((1023&r)<<10)+(1023&e),++u,i[n++]=r>>18|240,i[n++]=r>>12&63|128):i[n++]=r>>12|224,i[n++]=r>>6&63|128),i[n++]=63&r|128);return n-s}},{}],11:[function(t,i){i.exports=e;var n,r=/\/|\./;function e(t,i){r.test(t)||(t="google/protobuf/"+t+".proto",i={nested:{google:{nested:{protobuf:{nested:i}}}}}),e[t]=i}e("any",{Any:{fields:{type_url:{type:"string",id:1},value:{type:"bytes",id:2}}}}),e("duration",{Duration:n={fields:{seconds:{type:"int64",id:1},nanos:{type:"int32",id:2}}}}),e("timestamp",{Timestamp:n}),e("empty",{Empty:{fields:{}}}),e("struct",{Struct:{fields:{fields:{keyType:"string",type:"Value",id:1}}},Value:{oneofs:{kind:{oneof:["nullValue","numberValue","stringValue","boolValue","structValue","listValue"]}},fields:{nullValue:{type:"NullValue",id:1},numberValue:{type:"double",id:2},stringValue:{type:"string",id:3},boolValue:{type:"bool",id:4},structValue:{type:"Struct",id:5},listValue:{type:"ListValue",id:6}}},NullValue:{values:{NULL_VALUE:0}},ListValue:{fields:{values:{rule:"repeated",type:"Value",id:1}}}}),e("wrappers",{DoubleValue:{fields:{value:{type:"double",id:1}}},FloatValue:{fields:{value:{type:"float",id:1}}},Int64Value:{fields:{value:{type:"int64",id:1}}},UInt64Value:{fields:{value:{type:"uint64",id:1}}},Int32Value:{fields:{value:{type:"int32",id:1}}},UInt32Value:{fields:{value:{type:"uint32",id:1}}},BoolValue:{fields:{value:{type:"bool",id:1}}},StringValue:{fields:{value:{type:"string",id:1}}},BytesValue:{fields:{value:{type:"bytes",id:1}}}}),e("field_mask",{FieldMask:{fields:{paths:{rule:"repeated",type:"string",id:1}}}}),e.get=function(t){return e[t]||null}},{}],12:[function(t,i,n){var r=n,l=t(15),v=t(37);function u(t,i,n,r){if(i.resolvedType)if(i.resolvedType instanceof l){t("switch(d%s){",r);for(var e=i.resolvedType.values,s=Object.keys(e),u=0;u>>0",r,r);break;case"int32":case"sint32":case"sfixed32":t("m%s=d%s|0",r,r);break;case"uint64":o=!0;case"int64":case"sint64":case"fixed64":case"sfixed64":t("if(util.Long)")("(m%s=util.Long.fromValue(d%s)).unsigned=%j",r,r,o)('else if(typeof d%s==="string")',r)("m%s=parseInt(d%s,10)",r,r)('else if(typeof d%s==="number")',r)("m%s=d%s",r,r)('else if(typeof d%s==="object")',r)("m%s=new util.LongBits(d%s.low>>>0,d%s.high>>>0).toNumber(%s)",r,r,r,o?"true":"");break;case"bytes":t('if(typeof d%s==="string")',r)("util.base64.decode(d%s,m%s=util.newBuffer(util.base64.length(d%s)),0)",r,r,r)("else if(d%s.length)",r)("m%s=d%s",r,r);break;case"string":t("m%s=String(d%s)",r,r);break;case"bool":t("m%s=Boolean(d%s)",r,r)}}return t}function d(t,i,n,r){if(i.resolvedType)i.resolvedType instanceof l?t("d%s=o.enums===String?types[%i].values[m%s]:m%s",r,n,r,r):t("d%s=types[%i].toObject(m%s,o)",r,n,r);else{var e=!1;switch(i.type){case"double":case"float":t("d%s=o.json&&!isFinite(m%s)?String(m%s):m%s",r,r,r,r);break;case"uint64":e=!0;case"int64":case"sint64":case"fixed64":case"sfixed64":t('if(typeof m%s==="number")',r)("d%s=o.longs===String?String(m%s):m%s",r,r,r)("else")("d%s=o.longs===String?util.Long.prototype.toString.call(m%s):o.longs===Number?new util.LongBits(m%s.low>>>0,m%s.high>>>0).toNumber(%s):m%s",r,r,r,r,e?"true":"",r);break;case"bytes":t("d%s=o.bytes===String?util.base64.encode(m%s,0,m%s.length):o.bytes===Array?Array.prototype.slice.call(m%s):m%s",r,r,r,r,r);break;default:t("d%s=m%s",r,r)}}return t}r.fromObject=function(t){var i=t.fieldsArray,n=v.codegen(["d"],t.name+"$fromObject")("if(d instanceof this.ctor)")("return d");if(!i.length)return n("return new this.ctor");n("var m=new this.ctor");for(var r=0;r>>3){");for(var n=0;n>>0,8|a.mapKey[s.keyType],s.keyType),f===tt?n("types[%i].encode(%s[ks[i]],w.uint32(18).fork()).ldelim().ldelim()",u,i):n(".uint32(%i).%s(%s[ks[i]]).ldelim()",16|f,o,i),n("}")("}")):s.repeated?(n("if(%s!=null&&%s.length){",i,i),s.packed&&a.packed[o]!==tt?n("w.uint32(%i).fork()",(s.id<<3|2)>>>0)("for(var i=0;i<%s.length;++i)",i)("w.%s(%s[i])",o,i)("w.ldelim()"):(n("for(var i=0;i<%s.length;++i)",i),f===tt?l(n,s,u,i+"[i]"):n("w.uint32(%i).%s(%s[i])",(s.id<<3|f)>>>0,o,i)),n("}")):(s.optional&&n("if(%s!=null&&m.hasOwnProperty(%j))",i,s.name),f===tt?l(n,s,u,i):n("w.uint32(%i).%s(%s)",(s.id<<3|f)>>>0,o,i))}return n("return w")};var h=t(15),a=t(36),c=t(37);function l(t,i,n,r){return i.resolvedType.group?t("types[%i].encode(%s,w.uint32(%i)).uint32(%i)",n,r,(i.id<<3|3)>>>0,(i.id<<3|4)>>>0):t("types[%i].encode(%s,w.uint32(%i).fork()).ldelim()",n,r,(i.id<<3|2)>>>0)}},{15:15,36:36,37:37}],15:[function(t,i){i.exports=e;var o=t(24);((e.prototype=Object.create(o.prototype)).constructor=e).className="Enum";var n=t(23),r=t(37);function e(t,i,n,r,e){if(o.call(this,t,n),i&&"object"!=typeof i)throw TypeError("values must be an object");if(this.valuesById={},this.values=Object.create(this.valuesById),this.comment=r,this.comments=e||{},this.reserved=tt,i)for(var s=Object.keys(i),u=0;u=i)return!0;return!1},h.isReservedName=function(t,i){if(t)for(var n=0;n");var r=h();if(!G.test(r))throw b(r,"name");l("=");var e=new U(y(r),k(h()),i,n);S(e,function(t){if("option"!==t)throw b(t);N(e,t),l(";")},function(){$(e)}),t.add(e)}(n);break;case"required":case"optional":case"repeated":T(n,t);break;case"oneof":!function(t,i){if(!G.test(i=h()))throw b(i,"name");var n=new _(y(i));S(n,function(t){"option"===t?(N(n,t),l(";")):(a(t),T(n,"optional"))}),t.add(n)}(n,t);break;case"extensions":j(n.extensions||(n.extensions=[]));break;case"reserved":j(n.reserved||(n.reserved=[]),!0);break;default:if(!p||!K.test(t))throw b(t);a(t),T(n,"optional")}}),t.add(n)}(t,i),!0;case"enum":return function(t,i){if(!G.test(i=h()))throw b(i,"name");var n=new q(i);S(n,function(t){switch(t){case"option":N(n,t),l(";");break;case"reserved":j(n.reserved||(n.reserved=[]),!0);break;default:!function(t,i){if(!G.test(i))throw b(i,"name");l("=");var n=k(h(),!0),r={};S(r,function(t){if("option"!==t)throw b(t);N(r,t),l(";")},function(){$(r)}),t.add(i,n,r.comment)}(n,t)}}),t.add(n)}(t,i),!0;case"service":return function(t,i){if(!G.test(i=h()))throw b(i,"service name");var n=new R(i);S(n,function(t){if(!x(n,t)){if("rpc"!==t)throw b(t);!function(t,i){var n=i;if(!G.test(i=h()))throw b(i,"name");var r,e,s,u,o=i;l("("),l("stream",!0)&&(e=!0);if(!K.test(i=h()))throw b(i);r=i,l(")"),l("returns"),l("("),l("stream",!0)&&(u=!0);if(!K.test(i=h()))throw b(i);s=i,l(")");var f=new z(o,n,r,s,e,u);S(f,function(t){if("option"!==t)throw b(t);N(f,t),l(";")}),t.add(f)}(n,t)}}),t.add(n)}(t,i),!0;case"extend":return function(i,t){if(!K.test(t=h()))throw b(t,"reference");var n=t;S(null,function(t){switch(t){case"required":case"repeated":case"optional":T(i,t,n);break;default:if(!p||!K.test(t))throw b(t);a(t),T(i,"optional",n)}})}(t,i),!0}return!1}function S(t,i,n){var r=f.line;if(t&&(t.comment=v(),t.filename=Y.filename),l("{",!0)){for(var e;"}"!==(e=h());)i(e);l(";",!0)}else n&&n(),l(";"),t&&"string"!=typeof t.comment&&(t.comment=v(r))}function T(t,i,n){var r=h();if("group"!==r){if(!K.test(r))throw b(r,"type");var e=h();if(!G.test(e))throw b(e,"name");e=y(e),l("=");var s=new L(e,k(h()),r,i,n);S(s,function(t){if("option"!==t)throw b(t);N(s,t),l(";")},function(){$(s)}),t.add(s),p||!s.repeated||Z.packed[r]===tt&&Z.basic[r]!==tt||s.setOption("packed",!1,!0)}else!function(t,i){var n=h();if(!G.test(n))throw b(n,"name");var r=B.lcFirst(n);n===r&&(n=B.ucFirst(n));l("=");var e=k(h()),s=new F(n);s.group=!0;var u=new L(r,e,n,i);u.filename=Y.filename,S(s,function(t){switch(t){case"option":N(s,t),l(";");break;case"required":case"optional":case"repeated":T(s,t);break;default:throw b(t)}}),t.add(s).add(u)}(t,i)}function N(t,i){var n=l("(",!0);if(!K.test(i=h()))throw b(i,"name");var r=i;n&&(l(")"),r="("+r+")",i=c(),Q.test(i)&&(r+=i,h())),l("="),function t(i,n){if(l("{",!0))do{if(!G.test(o=h()))throw b(o,"name");"{"===c()?t(i,n+"."+o):(l(":"),"{"===c()?t(i,n+"."+o):V(i,n+"."+o,g(!0))),l(",",!0)}while(!l("}",!0));else V(i,n,g(!0))}(t,r)}function V(t,i,n){t.setOption&&t.setOption(i,n)}function $(t){if(l("[",!0)){for(;N(t,"option"),l(",",!0););l("]")}return t}for(;null!==(o=h());)switch(o){case"package":if(!d)throw b(o);E();break;case"import":if(!d)throw b(o);O();break;case"syntax":if(!d)throw b(o);A();break;case"option":if(!d)throw b(o);N(w,o),l(";");break;default:if(x(w,o)){d=!1;continue}throw b(o)}return Y.filename=null,{package:r,imports:e,weakImports:s,syntax:u,root:i}}},{15:15,16:16,20:20,22:22,25:25,29:29,33:33,34:34,35:35,36:36,37:37}],27:[function(t,i){i.exports=o;var n,r=t(39),e=r.LongBits,s=r.utf8;function u(t,i){return RangeError("index out of range: "+t.pos+" + "+(i||1)+" > "+t.len)}function o(t){this.buf=t,this.pos=0,this.len=t.length}var f,h="undefined"!=typeof Uint8Array?function(t){if(t instanceof Uint8Array||Array.isArray(t))return new o(t);throw Error("illegal buffer")}:function(t){if(Array.isArray(t))return new o(t);throw Error("illegal buffer")};function a(){var t=new e(0,0),i=0;if(!(4=this.len)throw u(this);if(t.lo=(t.lo|(127&this.buf[this.pos])<<7*i)>>>0,this.buf[this.pos++]<128)return t}return t.lo=(t.lo|(127&this.buf[this.pos++])<<7*i)>>>0,t}for(;i<4;++i)if(t.lo=(t.lo|(127&this.buf[this.pos])<<7*i)>>>0,this.buf[this.pos++]<128)return t;if(t.lo=(t.lo|(127&this.buf[this.pos])<<28)>>>0,t.hi=(t.hi|(127&this.buf[this.pos])>>4)>>>0,this.buf[this.pos++]<128)return t;if(i=0,4>>0,this.buf[this.pos++]<128)return t}else for(;i<5;++i){if(this.pos>=this.len)throw u(this);if(t.hi=(t.hi|(127&this.buf[this.pos])<<7*i+3)>>>0,this.buf[this.pos++]<128)return t}throw Error("invalid varint encoding")}function c(t,i){return(t[i-4]|t[i-3]<<8|t[i-2]<<16|t[i-1]<<24)>>>0}function l(){if(this.pos+8>this.len)throw u(this,8);return new e(c(this.buf,this.pos+=4),c(this.buf,this.pos+=4))}o.create=r.Buffer?function(t){return(o.create=function(t){return r.Buffer.isBuffer(t)?new n(t):h(t)})(t)}:h,o.prototype.c=r.Array.prototype.subarray||r.Array.prototype.slice,o.prototype.uint32=(f=4294967295,function(){if(f=(127&this.buf[this.pos])>>>0,this.buf[this.pos++]<128)return f;if(f=(f|(127&this.buf[this.pos])<<7)>>>0,this.buf[this.pos++]<128)return f;if(f=(f|(127&this.buf[this.pos])<<14)>>>0,this.buf[this.pos++]<128)return f;if(f=(f|(127&this.buf[this.pos])<<21)>>>0,this.buf[this.pos++]<128)return f;if(f=(f|(15&this.buf[this.pos])<<28)>>>0,this.buf[this.pos++]<128)return f;if((this.pos+=5)>this.len)throw this.pos=this.len,u(this,10);return f}),o.prototype.int32=function(){return 0|this.uint32()},o.prototype.sint32=function(){var t=this.uint32();return t>>>1^-(1&t)|0},o.prototype.bool=function(){return 0!==this.uint32()},o.prototype.fixed32=function(){if(this.pos+4>this.len)throw u(this,4);return c(this.buf,this.pos+=4)},o.prototype.sfixed32=function(){if(this.pos+4>this.len)throw u(this,4);return 0|c(this.buf,this.pos+=4)},o.prototype.float=function(){if(this.pos+4>this.len)throw u(this,4);var t=r.float.readFloatLE(this.buf,this.pos);return this.pos+=4,t},o.prototype.double=function(){if(this.pos+8>this.len)throw u(this,4);var t=r.float.readDoubleLE(this.buf,this.pos);return this.pos+=8,t},o.prototype.bytes=function(){var t=this.uint32(),i=this.pos,n=this.pos+t;if(n>this.len)throw u(this,t);return this.pos+=t,Array.isArray(this.buf)?this.buf.slice(i,n):i===n?new this.buf.constructor(0):this.c.call(this.buf,i,n)},o.prototype.string=function(){var t=this.bytes();return s.read(t,0,t.length)},o.prototype.skip=function(t){if("number"==typeof t){if(this.pos+t>this.len)throw u(this,t);this.pos+=t}else do{if(this.pos>=this.len)throw u(this)}while(128&this.buf[this.pos++]);return this},o.prototype.skipType=function(t){switch(t){case 0:this.skip();break;case 1:this.skip(8);break;case 2:this.skip(this.uint32());break;case 3:for(;4!=(t=7&this.uint32());)this.skipType(t);break;case 5:this.skip(4);break;default:throw Error("invalid wire type "+t+" at offset "+this.pos)}return this},o.o=function(t){n=t;var i=r.Long?"toLong":"toNumber";r.merge(o.prototype,{int64:function(){return a.call(this)[i](!1)},uint64:function(){return a.call(this)[i](!0)},sint64:function(){return a.call(this).zzDecode()[i](!1)},fixed64:function(){return l.call(this)[i](!0)},sfixed64:function(){return l.call(this)[i](!1)}})}},{39:39}],28:[function(t,i){i.exports=e;var n=t(27);(e.prototype=Object.create(n.prototype)).constructor=e;var r=t(39);function e(t){n.call(this,t)}r.Buffer&&(e.prototype.c=r.Buffer.prototype.slice),e.prototype.string=function(){var t=this.uint32();return this.buf.utf8Slice(this.pos,this.pos=Math.min(this.pos+t,this.len))}},{27:27,39:39}],29:[function(t,i){i.exports=n;var r=t(23);((n.prototype=Object.create(r.prototype)).constructor=n).className="Root";var e,v,d,s=t(16),u=t(15),o=t(25),p=t(37);function n(t){r.call(this,"",t),this.deferred=[],this.files=[]}function w(){}n.fromJSON=function(t,i){return i||(i=new n),t.options&&i.setOptions(t.options),i.addJSON(t.nested)},n.prototype.resolvePath=p.path.resolve,n.prototype.load=function t(i,s,u){"function"==typeof s&&(u=s,s=tt);var o=this;if(!u)return p.asPromise(t,o,i,s);var f=u===w;function h(t,i){if(u){var n=u;if(u=null,f)throw t;n(t,i)}}function a(t,i){try{if(p.isString(i)&&"{"===i.charAt(0)&&(i=JSON.parse(i)),p.isString(i)){v.filename=t;var n,r=v(i,o,s),e=0;if(r.imports)for(;e]/g,O=/(?:"([^"\\]*(?:\\.[^"\\]*)*)")/g,A=/(?:'([^'\\]*(?:\\.[^'\\]*)*)')/g,x=/^ *[*/]+ */,S=/^\s*\*?\/*/,T=/\n/g,N=/\s/,n=/\\(.?)/g,r={0:"\0",r:"\r",n:"\n",t:"\t"};function V(t){return t.replace(n,function(t,i){switch(i){case"\\":case"":return i;default:return r[i]||""}})}function e(o,f){o=o.toString();var h=0,a=o.length,c=1,u=null,l=null,v=0,d=!1,p=[],w=null;function y(t){return Error("illegal "+t+" (line "+c+")")}function b(t){return o.charAt(t)}function m(t,i){u=o.charAt(t++),v=c,d=!1;var n,r=t-(f?2:3);do{if(--r<0||"\n"===(n=o.charAt(r))){d=!0;break}}while(" "===n||"\t"===n);for(var e=o.substring(t,i).split(T),s=0;s>>0,this.hi=i>>>0}var s=e.zero=new e(0,0);s.toNumber=function(){return 0},s.zzEncode=s.zzDecode=function(){return this},s.length=function(){return 1};var r=e.zeroHash="\0\0\0\0\0\0\0\0";e.fromNumber=function(t){if(0===t)return s;var i=t<0;i&&(t=-t);var n=t>>>0,r=(t-n)/4294967296>>>0;return i&&(r=~r>>>0,n=~n>>>0,4294967295<++n&&(n=0,4294967295<++r&&(r=0))),new e(n,r)},e.from=function(t){if("number"==typeof t)return e.fromNumber(t);if(n.isString(t)){if(!n.Long)return e.fromNumber(parseInt(t,10));t=n.Long.fromString(t)}return t.low||t.high?new e(t.low>>>0,t.high>>>0):s},e.prototype.toNumber=function(t){if(!t&&this.hi>>>31){var i=1+~this.lo>>>0,n=~this.hi>>>0;return i||(n=n+1>>>0),-(i+4294967296*n)}return this.lo+4294967296*this.hi},e.prototype.toLong=function(t){return n.Long?new n.Long(0|this.lo,0|this.hi,!!t):{low:0|this.lo,high:0|this.hi,unsigned:!!t}};var u=String.prototype.charCodeAt;e.fromHash=function(t){return t===r?s:new e((u.call(t,0)|u.call(t,1)<<8|u.call(t,2)<<16|u.call(t,3)<<24)>>>0,(u.call(t,4)|u.call(t,5)<<8|u.call(t,6)<<16|u.call(t,7)<<24)>>>0)},e.prototype.toHash=function(){return String.fromCharCode(255&this.lo,this.lo>>>8&255,this.lo>>>16&255,this.lo>>>24,255&this.hi,this.hi>>>8&255,this.hi>>>16&255,this.hi>>>24)},e.prototype.zzEncode=function(){var t=this.hi>>31;return this.hi=((this.hi<<1|this.lo>>>31)^t)>>>0,this.lo=(this.lo<<1^t)>>>0,this},e.prototype.zzDecode=function(){var t=-(1&this.lo);return this.lo=((this.lo>>>1|this.hi<<31)^t)>>>0,this.hi=(this.hi>>>1^t)>>>0,this},e.prototype.length=function(){var t=this.lo,i=(this.lo>>>28|this.hi<<4)>>>0,n=this.hi>>>24;return 0===n?0===i?t<16384?t<128?1:2:t<2097152?3:4:i<16384?i<128?5:6:i<2097152?7:8:n<128?9:10}},{39:39}],39:[function(t,i,n){var r=n;function e(t,i,n){for(var r=Object.keys(i),e=0;e>>7|t.hi<<25)>>>0,t.hi>>>=7;for(;127>>7;i[n++]=t.lo}function d(t,i,n){i[n]=255&t,i[n+1]=t>>>8&255,i[n+2]=t>>>16&255,i[n+3]=t>>>24}a.create=r.Buffer?function(){return(a.create=function(){return new n})()}:function(){return new a},a.alloc=function(t){return new r.Array(t)},r.Array!==Array&&(a.alloc=r.pool(a.alloc,r.Array.prototype.subarray)),a.prototype.g=function(t,i,n){return this.tail=this.tail.next=new o(t,i,n),this.len+=i,this},(l.prototype=Object.create(o.prototype)).fn=function(t,i,n){for(;127>>=7;i[n]=t},a.prototype.uint32=function(t){return this.len+=(this.tail=this.tail.next=new l((t>>>=0)<128?1:t<16384?2:t<2097152?3:t<268435456?4:5,t)).len,this},a.prototype.int32=function(t){return t<0?this.g(v,10,e.fromNumber(t)):this.uint32(t)},a.prototype.sint32=function(t){return this.uint32((t<<1^t>>31)>>>0)},a.prototype.int64=a.prototype.uint64=function(t){var i=e.from(t);return this.g(v,i.length(),i)},a.prototype.sint64=function(t){var i=e.from(t).zzEncode();return this.g(v,i.length(),i)},a.prototype.bool=function(t){return this.g(c,1,t?1:0)},a.prototype.sfixed32=a.prototype.fixed32=function(t){return this.g(d,4,t>>>0)},a.prototype.sfixed64=a.prototype.fixed64=function(t){var i=e.from(t);return this.g(d,4,i.lo).g(d,4,i.hi)},a.prototype.float=function(t){return this.g(r.float.writeFloatLE,4,t)},a.prototype.double=function(t){return this.g(r.float.writeDoubleLE,8,t)};var p=r.Array.prototype.set?function(t,i,n){i.set(t,n)}:function(t,i,n){for(var r=0;r>>0;if(!i)return this.g(c,1,0);if(r.isString(t)){var n=a.alloc(i=s.length(t));s.decode(t,n,0),t=n}return this.uint32(i).g(p,i,t)},a.prototype.string=function(t){var i=u.length(t);return i?this.uint32(i).g(u.write,i,t):this.g(c,1,0)},a.prototype.fork=function(){return this.states=new h(this),this.head=this.tail=new o(f,0,0),this.len=0,this},a.prototype.reset=function(){return this.states?(this.head=this.states.head,this.tail=this.states.tail,this.len=this.states.len,this.states=this.states.next):(this.head=this.tail=new o(f,0,0),this.len=0),this},a.prototype.ldelim=function(){var t=this.head,i=this.tail,n=this.len;return this.reset().uint32(n),n&&(this.tail.next=t.next,this.tail=i,this.len+=n),this},a.prototype.finish=function(){for(var t=this.head.next,i=this.constructor.alloc(this.len),n=0;t;)t.fn(t.val,i,n),n+=t.len,t=t.next;return i},a.o=function(t){n=t}},{39:39}],43:[function(t,i){i.exports=s;var n=t(42);(s.prototype=Object.create(n.prototype)).constructor=s;var r=t(39),e=r.Buffer;function s(){n.call(this)}s.alloc=function(t){return(s.alloc=r.b)(t)};var u=e&&e.prototype instanceof Uint8Array&&"set"===e.prototype.set.name?function(t,i,n){i.set(t,n)}:function(t,i,n){if(t.copy)t.copy(i,n,0,t.length);else for(var r=0;r>>0;return this.uint32(i),i&&this.g(u,i,t),this},s.prototype.string=function(t){var i=e.byteLength(t);return this.uint32(i),i&&this.g(o,i,t),this}},{39:39,42:42}]},e={},t=[19],i=function t(i){var n=e[i];return n||r[i][0].call(n=e[i]={exports:{}},t,n,n.exports),n.exports}(t[0]),i.util.global.protobuf=i,"function"==typeof define&&define.amd&&define(["long"],function(t){return t&&t.isLong&&(i.util.Long=t,i.configure()),i}),"object"==typeof module&&module&&module.exports&&(module.exports=i)}(); +//# sourceMappingURL=protobuf.min.js.map diff --git a/frontend/packages/core/public/netron/deps/prototxt.js b/frontend/packages/core/public/netron/deps/prototxt.js new file mode 100644 index 00000000..e5992643 --- /dev/null +++ b/frontend/packages/core/public/netron/deps/prototxt.js @@ -0,0 +1,455 @@ + +var prototxt = prototxt || {} + +prototxt.TextReader = function(text) { + this.text = text; + this.position = 0; + this.lineEnd = -1; + this.lineStart = 0; + this.line = -1; + this.depth = 0; + this.array_depth = 0; + this.token = ""; + prototxt.TextReader.Array = typeof Uint8Array !== "undefined" ? Uint8Array : Array; +} + +prototxt.TextReader.create = function(text) { + return new prototxt.TextReader(text); +}; + +prototxt.TextReader.prototype.start = function() { + if (this.depth > 0) { + this.expect("{"); + } + this.depth++; +}; + +prototxt.TextReader.prototype.end = function() { + var token = this.peek(); + if (this.depth > 0 && token === "}") { + this.expect("}"); + this.match(";"); + this.depth--; + return true; + } + return token === ""; +}; + +prototxt.TextReader.prototype.tag = function() { + var name = this.read(); + var separator = this.peek(); + if (separator != "[" && separator != "{") { + this.expect(":"); + } + return name; +}; + +prototxt.TextReader.prototype.assert = function(tag) { + var token = this.tag(); + if (token != tag) { + throw new Error("Unexpected '" + token + "' instead of '" + tag + "'" + this.location()); + } +}; + +prototxt.TextReader.prototype.int32 = function() { + var token = this.read(); + var value = Number.parseInt(token, 10); + if (Number.isNaN(token - value)) { + throw new Error("Couldn't parse int '" + token + "'" + this.location()); + } + this.semicolon(); + return value; +}; + +prototxt.TextReader.prototype.uint32 = function() { + var token = this.read(); + var value = Number.parseInt(token, 10); + if (Number.isNaN(token - value)) { + throw new Error("Couldn't parse int '" + token + "'" + this.location()); + } + this.semicolon(); + return value; +}; + +prototxt.TextReader.prototype.int64 = function() { + var token = this.read(); + var value = Number.parseInt(token, 10); + if (Number.isNaN(token - value)) { + throw new Error("Couldn't parse int '" + token + "'" + this.location()); + } + this.semicolon(); + return value; +}; + +prototxt.TextReader.prototype.float = function() { + return this.double(); +}; + +prototxt.TextReader.prototype.double = function() { + var token = this.read(); + if (token.startsWith("nan")) { + return NaN; + } + if (token.startsWith("inf")) { + return Infinity; + } + if (token.startsWith("-inf")) { + return -Infinity; + } + if (token.endsWith("f")) { + token = token.substring(0, token.length - 1); + } + var value = Number.parseFloat(token); + if (Number.isNaN(token - value)) { + throw new Error("Couldn't parse float '" + token + "'" + this.location()); + } + this.semicolon(); + return value; +}; + +prototxt.TextReader.prototype.string = function() { + var token = this.read(); + if (token.length < 2) { + throw new Error("String is too short" + this.location()); + } + var quote = token[0]; + if (quote !== "'" && quote !== "\"") { + throw new Error("String is not in quotes" + this.location()); + } + if (quote !== token[token.length - 1]) { + throw new Error("String quotes do not match" + this.location()); + } + var value = token.substring(1, token.length - 1); + this.semicolon(); + return value; +}; + +prototxt.TextReader.prototype.bool = function() { + var token = this.read(); + switch (token) { + case "true": + case "True": + case "1": + this.semicolon(); + return true; + case "false": + case "False": + case "0": + this.semicolon(); + return false; + } + throw new Error("Couldn't parse boolean '" + token + "'" + this.location()); +}; + +prototxt.TextReader.prototype.bytes = function() { + var token = this.string(); + var i = 0; + var o = 0; + var length = token.length; + var a = new prototxt.TextReader.Array(length); + while (i < length) { + var c = token.charCodeAt(i++); + if (c !== 0x5C) { + a[o++] = c; + } + else { + if (i >= length) { + throw new Error("Unexpected end of bytes string" + this.location()); + } + c = token.charCodeAt(i++); + switch (c) { + case 0x27: a[o++] = 0x27; break; // ' + case 0x5C: a[o++] = 0x5C; break; // \\ + case 0x22: a[o++] = 0x22; break; // " + case 0x72: a[o++] = 0x0D; break; // \r + case 0x6E: a[o++] = 0x0A; break; // \n + case 0x74: a[o++] = 0x09; break; // \t + case 0x62: a[o++] = 0x08; break; // \b + case 0x58: // x + case 0x78: // X + for (var xi = 0; xi < 2; xi++) { + if (i >= length) { + throw new Error("Unexpected end of bytes string" + this.location()); + } + var xd = token.charCodeAt(i++); + xd = xd >= 65 && xd <= 70 ? xd - 55 : xd >= 97 && xd <= 102 ? xd - 87 : xd >= 48 && xd <= 57 ? xd - 48 : -1; + if (xd === -1) { + throw new Error("Unexpected hex digit '" + xd + "' in bytes string" + this.location()); + } + a[o] = a[o] << 4 | xd; + } + o++; + break; + default: + if (c < 48 || c > 57) { // 0-9 + throw new Error("Unexpected character '" + c + "' in bytes string" + this.location()); + } + i--; + for (var oi = 0; oi < 3; oi++) { + if (i >= length) { + throw new Error("Unexpected end of bytes string" + this.location()); + } + var od = token.charCodeAt(i++); + if (od < 48 || od > 57) { + throw new Error("Unexpected octal digit '" + od + "' in bytes string" + this.location()); + } + a[o] = a[o] << 3 | od - 48; + } + o++; + break; + } + } + } + return a.slice(0, o); +}; + +prototxt.TextReader.prototype.enum = function(type) { + var token = this.read(); + if (!Object.prototype.hasOwnProperty.call(type, token)) { + var value = Number.parseInt(token, 10); + if (!Number.isNaN(token - value)) { + this.semicolon(); + return value; + } + throw new Error("Couldn't parse enum '" + token + "'" + this.location()); + } + this.semicolon(); + return type[token]; +}; + +prototxt.TextReader.prototype.any = function(message) { + if (this.match("[")) { + this.read(); + var begin = this.position; + var end = this.text.indexOf("]", begin); + if (end === -1 || end >= this.next) { + throw new Error("End of Any type_url not found" + this.location()); + } + message.type_url = this.text.substring(begin, end); + this.position = end + 1; + message.value = this.skip().substring(1); + this.expect("}"); + this.match(";"); + return true; + } + return false; +}; + +prototxt.TextReader.prototype.first = function(c) { + if (this.match("[")) { + this.array_depth++; + return true; + } + return false; +}; + +prototxt.TextReader.prototype.last = function() { + if (this.match("]")) { + this.array_depth--; + return true; + } + return false; +}; + +prototxt.TextReader.prototype.next = function() { + var token = this.peek(); + if (token == ",") { + this.read(); + return; + } + if (token == "]") { + return; + } + this.handle(token); +} + +prototxt.TextReader.prototype.skip = function() { + var token = this.peek(); + if (token == "{") { + var message = this.position; + var depth = this.depth; + this.start(); + while (!this.end() || depth < this.depth) { + var token = this.peek(); + if (token === "{") { + this.start(); + } + else if (token !== "}") { + this.read(); + this.match(";"); + } + } + return this.text.substring(message, this.position); + } + else if (token == "[") { + var list = this.position; + this.read(); + while (!this.last()) { + token = this.read(); + if (token == "") { + this.handle(token); + } + } + return this.text.substring(list, this.position); + } + var position = this.position; + this.read(); + this.semicolon(); + return this.text.substring(position, this.position); +}; + +prototxt.TextReader.prototype.handle = function(token) { + throw new Error("Unexpected token '" + token + "'" + this.location()); +}; + +prototxt.TextReader.prototype.field = function(token, module) { + throw new Error("Unknown field '" + token + "'" + this.location()); +}; + +prototxt.TextReader.prototype.whitespace = function() { + for (;;) { + while (this.position >= this.lineEnd) { + this.lineStart = this.lineEnd + 1; + this.position = this.lineStart; + if (this.position >= this.text.length) { + return false; + } + this.lineEnd = this.text.indexOf("\n", this.position); + if (this.lineEnd === -1) { + this.lineEnd = this.text.length; + } + this.line++; + } + var c = this.text[this.position]; + switch (c) { + case " ": + case "\r": + case "\t": + this.position++; + break; + case "#": + this.position = this.lineEnd; + break; + default: + return true; + } + } +}; + +prototxt.TextReader.prototype.tokenize = function() { + if (!this.whitespace()) { + this.token = ""; + return this.token; + } + var c = this.text[this.position]; + if (c === '[' && this.position + 2 < this.lineEnd) { + var i = this.position + 1; + var x = this.text[i]; + if (x >= "a" && x <= "z" || x >= "A" && x <= "Z") { + i++; + while (i < this.lineEnd) { + x = this.text[i]; + i++; + if (x >= "a" && x <= "z" || x >= "A" && x <= "Z" || x >= "0" && x <= "9" || x === "." || x === "/") { + continue; + } + if (x === ']') { + this.token = this.text.substring(this.position, i); + return this.token; + } + } + } + } + if (c === "{" || c === "}" || c === ":" || c === "[" || c === "," || c === "]" || c === ";") { + this.token = c; + return this.token; + } + var position = this.position + 1; + if (c >= "a" && c <= "z" || c >= "A" && c <= "Z" || c === "_" || c === "$") { + while (position < this.lineEnd) { + c = this.text[position]; + if (c >= "a" && c <= "z" || c >= "A" && c <= "Z" || c >= "0" && c <= "9" || c === "_" || c === "+" || c === "-") { + position++; + continue; + } + break; + } + this.token = this.text.substring(this.position, position); + return this.token; + } + if (c >= "0" && c <= "9" || c === "-" || c === "+" || c === ".") { + while (position < this.lineEnd) { + c = this.text[position]; + if (c >= "a" && c <= "z" || c >= "A" && c <= "Z" || c >= "0" && c <= "9" || c === "_" || c === "+" || c === "-" || c === ".") { + position++; + continue; + } + break; + } + this.token = this.text.substring(this.position, position); + return this.token; + } + if (c === "\"" || c === "'") { + var quote = c; + while (position < this.lineEnd) { + c = this.text[position]; + if (c === "\\" && position < this.lineEnd) { + position += 2; + continue; + } + position++; + if (c === quote) { + break; + } + } + this.token = this.text.substring(this.position, position); + return this.token; + } + throw new Error("Unexpected token '" + c + "'" + this.location()); +}; + +prototxt.TextReader.prototype.peek = function() { + if (!this.cache) { + this.token = this.tokenize(); + this.cache = true; + } + return this.token; +}; + +prototxt.TextReader.prototype.read = function() { + if (!this.cache) { + this.token = this.tokenize(); + } + this.position += this.token.length; + this.cache = false; + return this.token; +}; + +prototxt.TextReader.prototype.expect = function(value) { + var token = this.read(); + if (token !== value) { + throw new Error("Unexpected '" + token + "' instead of '" + value + "'" + this.location()); + } +}; + +prototxt.TextReader.prototype.match = function(value) { + if (this.peek() === value) { + this.read(); + return true; + } + return false; +}; + +prototxt.TextReader.prototype.semicolon = function() { + if (this.array_depth == 0) { + this.match(";"); + } +}; + +prototxt.TextReader.prototype.location = function() { + return " at " + (this.line + 1).toString() + ":" + (this.position - this.lineStart + 1).toString(); +}; + +if (typeof module !== "undefined" && typeof module.exports === "object") { + module.exports.TextReader = prototxt.TextReader; +} diff --git a/frontend/packages/core/public/netron/dl4j-metadata.json b/frontend/packages/core/public/netron/dl4j-metadata.json new file mode 100644 index 00000000..8a054c33 --- /dev/null +++ b/frontend/packages/core/public/netron/dl4j-metadata.json @@ -0,0 +1,128 @@ +[ + { + "name": "Dense", + "schema": { + "category": "Layer", + "attributes": [ + ] + } + }, + { + "name": "Output", + "schema": { + "category": "Layer", + "attributes": [ + ] + } + }, + { + "name": "Convolution", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "dilation" }, + { "name": "kernelSize" }, + { "name": "padding" } + ] + } + }, + { + "name": "SeparableConvolution2D", + "schema": { + "category": "Layer", + "attributes": [ + ] + } + }, + { + "name": "BatchNormalization", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "eps" }, + { "name": "gamma" }, + { "name": "decay" } + ] + } + }, + { + "name": "Sigmoid", + "schema": { + "category": "Activation", + "attributes": [ + ] + } + }, + { + "name": "LReLU", + "schema": { + "category": "Activation", + "attributes": [ + ] + } + }, + { + "name": "ReLU", + "schema": { + "category": "Activation", + "attributes": [ + ] + } + }, + { + "name": "TanH", + "schema": { + "category": "Activation", + "attributes": [ + ] + } + }, + { + "name": "Softmax", + "schema": { + "category": "Activation", + "attributes": [ + ] + } + }, + { + "name": "Merge", + "schema": { + "category": "Tensor", + "attributes": [ + ] + } + }, + { + "name": "Upsampling2D", + "schema": { + "category": "Layer", + "attributes": [ + ] + } + }, + { + "name": "Dropout", + "schema": { + "category": "Dropout", + "attributes": [ + ] + } + }, + { + "name": "GlobalPooling", + "schema": { + "category": "Pool", + "attributes": [ + ] + } + }, + { + "name": "Subsampling", + "schema": { + "category": "Layer", + "attributes": [ + ] + } + } +] \ No newline at end of file diff --git a/frontend/packages/core/public/netron/dl4j.js b/frontend/packages/core/public/netron/dl4j.js new file mode 100644 index 00000000..6b83ef6a --- /dev/null +++ b/frontend/packages/core/public/netron/dl4j.js @@ -0,0 +1,615 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental + +var dl4j = dl4j || {}; +var long = long || { Long: require('long') }; + +dl4j.ModelFactory = class { + + match(context) { + const identifier = context.identifier.toLowerCase(); + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'zip' && context.entries('zip').length > 0) { + if (dl4j.ModelFactory._openContainer(context)) { + return true; + } + } + return false; + } + + open(context, host) { + const identifier = context.identifier; + try { + const container = dl4j.ModelFactory._openContainer(context); + const configuration = JSON.parse(container.configuration); + return dl4j.Metadata.open(host).then((metadata) => { + try { + return new dl4j.Model(metadata, configuration, container.coefficients); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new dl4j.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + return Promise.reject(new dl4j.Error(message.replace(/\.$/, '') + " in '" + identifier + "'.")); + } + } + + static _openContainer(context) { + const entries = context.entries('zip'); + const configurationEntries = entries.filter((entry) => entry.name === 'configuration.json'); + if (configurationEntries.length != 1) { + return null; + } + let configuration = null; + try { + configuration = new TextDecoder('utf-8').decode(configurationEntries[0].data); + } + catch (error) { + return null; + } + if (configuration.indexOf('"vertices"') === -1 && configuration.indexOf('"confs"') === -1) { + return null; + } + const coefficientsEntries = entries.filter((entry) => entry.name === 'coefficients.bin'); + if (coefficientsEntries.length > 1) { + return null; + } + const coefficients = coefficientsEntries.length == 1 ? coefficientsEntries[0].data : []; + return { + configuration: configuration, + coefficients: coefficients + }; + } +}; + +dl4j.Model = class { + + constructor(metadata, configuration, coefficients) { + this._graphs = []; + this._graphs.push(new dl4j.Graph(metadata, configuration, coefficients)); + } + + get format() { + return 'Deeplearning4j'; + } + + get graphs() { + return this._graphs; + } +}; + +dl4j.Graph = class { + + constructor(metadata, configuration, coefficients) { + + this._inputs = []; + this._outputs =[]; + this._nodes = []; + + const reader = new dl4j.NDArrayReader(coefficients); + const dataType = reader.dataType; + + if (configuration.networkInputs) { + for (const input of configuration.networkInputs) { + this._inputs.push(new dl4j.Parameter(input, true, [ + new dl4j.Argument(input, null, null) + ])); + } + } + + if (configuration.networkOutputs) { + for (const output of configuration.networkOutputs) { + this._outputs.push(new dl4j.Parameter(output, true, [ + new dl4j.Argument(output, null, null) + ])); + } + } + + let inputs = null; + + // Computation Graph + if (configuration.vertices) { + for (const name in configuration.vertices) { + const vertex = dl4j.Node._object(configuration.vertices[name]); + inputs = configuration.vertexInputs[name]; + let variables = []; + let layer = null; + + switch (vertex.__type__) { + case 'LayerVertex': + layer = dl4j.Node._object(vertex.layerConf.layer); + variables = vertex.layerConf.variables; + break; + case 'MergeVertex': + layer = { __type__: 'Merge', layerName: name }; + break; + case 'ElementWiseVertex': + layer = { __type__: 'ElementWise', layerName: name, op: vertex.op }; + break; + case 'PreprocessorVertex': + layer = { __type__: 'Preprocessor', layerName: name }; + break; + default: + throw new dl4j.Error("Unsupported vertex class '" + vertex['@class'] + "'."); + } + + this._nodes.push(new dl4j.Node(metadata, layer, inputs, dataType, variables)); + } + } + + // Multi Layer Network + if (configuration.confs) { + inputs = [ 'input' ]; + this._inputs.push(new dl4j.Parameter('input', true, [ + new dl4j.Argument('input', null, null) + ])); + for (const conf of configuration.confs) { + const layer = dl4j.Node._object(conf.layer); + this._nodes.push(new dl4j.Node(metadata, layer, inputs, dataType, conf.variables)); + inputs = [ layer.layerName ]; + } + this._outputs.push(new dl4j.Parameter('output', true, [ + new dl4j.Argument(inputs[0], null, null) + ])); + } + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +dl4j.Parameter = class { + + constructor(name, visible, args) { + this._name = name; + this._visible = visible; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get arguments() { + return this._arguments; + } +}; + +dl4j.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new dl4j.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type; + this._initializer = initializer; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +dl4j.Node = class { + + constructor(metadata, layer, inputs, dataType, variables) { + + this._metadata = metadata; + this._type = layer.__type__; + this._name = layer.layerName || ''; + this._inputs = []; + this._outputs = []; + this._attributes = []; + + if (inputs && inputs.length > 0) { + const args = inputs.map((input) => new dl4j.Argument(input, null, null)); + this._inputs.push(new dl4j.Parameter(args.length < 2 ? 'input' : 'inputs', true, args)); + } + + if (variables) { + for (const variable of variables) { + let tensor = null; + switch (this._type) { + case 'Convolution': + switch (variable) { + case 'W': + tensor = new dl4j.Tensor(dataType, layer.kernelSize.concat([ layer.nin, layer.nout ])); + break; + case 'b': + tensor = new dl4j.Tensor(dataType, [ layer.nout ]); + break; + default: + throw new dl4j.Error("Unknown '" + this._type + "' variable '" + variable + "'."); + } + break; + case 'SeparableConvolution2D': + switch (variable) { + case 'W': + tensor = new dl4j.Tensor(dataType, layer.kernelSize.concat([ layer.nin, layer.nout ])); + break; + case 'pW': + tensor = new dl4j.Tensor(dataType, [ layer.nout ]); + break; + default: + throw new dl4j.Error("Unknown '" + this._type + "' variable '" + variable + "'."); + } + break; + case 'Output': + case 'Dense': + switch (variable) { + case 'W': + tensor = new dl4j.Tensor(dataType, [ layer.nout, layer.nin ]); + break; + case 'b': + tensor = new dl4j.Tensor(dataType, [ layer.nout ]); + break; + default: + throw new dl4j.Error("Unknown '" + this._type + "' variable '" + variable + "'."); + } + break; + case 'BatchNormalization': + tensor = new dl4j.Tensor(dataType, [ layer.nin ]); + break; + default: + throw new dl4j.Error("Unknown '" + this._type + "' variable '" + variable + "'."); + } + this._inputs.push(new dl4j.Parameter(variable, true, [ + new dl4j.Argument(variable, null, tensor) + ])); + } + } + + if (this._name) { + this._outputs.push(new dl4j.Parameter('output', true, [ + new dl4j.Argument(this._name, null, null) + ])); + } + + let attributes = layer; + + if (layer.activationFn) { + let activation = dl4j.Node._object(layer.activationFn); + if (activation.__type__ !== 'ActivationIdentity' && activation.__type__ !== 'Identity') { + if (activation.__type__.startsWith('Activation')) { + activation.__type__ = activation.__type__.substring('Activation'.length); + } + if (this._type == 'Activation') { + this._type = activation.__type__; + attributes = activation; + } + else { + this._chain = this._chain || []; + this._chain.push(new dl4j.Node(metadata, activation, [], null, null)); + } + } + } + + for (const key in attributes) { + switch (key) { + case '__type__': + case 'constraints': + case 'layerName': + case 'activationFn': + case 'idropout': + case 'hasBias': + continue; + } + this._attributes.push(new dl4j.Attribute(metadata.attribute(this._type, key), key, attributes[key])); + } + + if (layer.idropout) { + const dropout = dl4j.Node._object(layer.idropout); + if (dropout.p !== 1.0) { + throw new dl4j.Error("Layer 'idropout' not implemented."); + } + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } + + get chain() { + return this._chain; + } + + static _object(value) { + let result = {}; + if (value['@class']) { + result = value; + let type = value['@class'].split('.').pop(); + if (type.endsWith('Layer')) { + type = type.substring(0, type.length - 5); + } + delete value['@class']; + result.__type__ = type; + } + else { + let key = Object.keys(value)[0]; + result = value[key]; + if (key.length > 0) { + key = key[0].toUpperCase() + key.substring(1); + } + result.__type__ = key; + } + return result; + } +}; + +dl4j.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + this._value = value; + this._visible = false; + if (schema) { + if (schema.visible) { + this._visible = true; + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible; + } +}; +dl4j.Tensor = class { + + constructor(dataType, shape) { + this._type = new dl4j.TensorType(dataType, new dl4j.TensorShape(shape)); + } + + get type() { + return this._type; + } + + get state() { + return 'Not implemented.'; + } +}; + +dl4j.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return (this.dataType || '?') + this._shape.toString(); + } +}; + +dl4j.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (this._dimensions) { + if (this._dimensions.length == 0) { + return ''; + } + return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']'; + } + return ''; + } +}; + +dl4j.Metadata = class { + + static open(host) { + dl4j.Metadata.textDecoder = dl4j.Metadata.textDecoder || new TextDecoder('utf-8'); + if (dl4j.Metadata._metadata) { + return Promise.resolve(dl4j.Metadata._metadata); + } + return host.request(null, 'dl4j-metadata.json', 'utf-8').then((data) => { + dl4j.Metadata._metadata = new dl4j.Metadata(data); + return dl4j.Metadata._metadata; + }).catch(() => { + dl4j.Metadata._metadata = new dl4j.Metadata(null); + return dl4j.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + this._attributeCache = {}; + if (data) { + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + item.schema.name = item.name; + this._map[item.name] = item.schema; + } + } + } + } + } + + type(name) { + return this._map[name]; + } + + attribute(type, name) { + let map = this._attributeCache[type]; + if (!map) { + map = {}; + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[type] = map; + } + return map[name] || null; + } +}; + +dl4j.NDArrayReader = class { + + constructor(buffer) { + let reader = new dl4j.BinaryReader(buffer); + /* let shape = */ dl4j.NDArrayReader._header(reader); + let data = dl4j.NDArrayReader._header(reader); + this._dataType = data.type; + } + + get dataType() { + return this._dataType; + } + + static _header(reader) { + let header = {}; + header.alloc = reader.string(); + header.length = 0; + switch (header.alloc) { + case 'DIRECT': + case 'HEAP': + case 'JAVACPP': + header.length = reader.int32(); + break; + case 'LONG_SHAPE': + case 'MIXED_DATA_TYPES': + header.length = reader.int64(); + break; + } + header.type = reader.string(); + switch (header.type) { + case 'INT': + header.type = 'int32'; + header.itemsize = 4; + break; + case 'FLOAT': + header.type = 'float32'; + header.itemsize = 4; + break; + } + header.data = reader.bytes(header.itemsize * header.length); + return header; + } +}; + +dl4j.BinaryReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._position = 0; + } + + bytes(size) { + let data = this._buffer.subarray(this._position, this._position + size); + this._position += size; + return data; + } + + string() { + let size = this._buffer[this._position++] << 8 | this._buffer[this._position++]; + let buffer = this.bytes(size); + return new TextDecoder('ascii').decode(buffer); + } + + int32() { + return this._buffer[this._position++] << 24 | + this._buffer[this._position++] << 16 | + this._buffer[this._position++] << 8 | + this._buffer[this._position++]; + } + + int64() { + let hi = this.int32(); + let lo = this.int32(); + return new long.Long(hi, lo, true).toNumber(); + } +}; + +dl4j.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Error loading Deeplearning4j model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = dl4j.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/flux-metadata.json b/frontend/packages/core/public/netron/flux-metadata.json new file mode 100644 index 00000000..32960f8c --- /dev/null +++ b/frontend/packages/core/public/netron/flux-metadata.json @@ -0,0 +1,2 @@ +[ +] \ No newline at end of file diff --git a/frontend/packages/core/public/netron/flux.js b/frontend/packages/core/public/netron/flux.js new file mode 100644 index 00000000..9afe18dd --- /dev/null +++ b/frontend/packages/core/public/netron/flux.js @@ -0,0 +1,147 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental + +var flux = flux || {}; + +flux.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'bson') { + return true; + } + return false; + } + + open(context, host) { + return host.require('./bson').then((bson) => { + let model = null; + const identifier = context.identifier; + try { + const reader = new bson.Reader(context.buffer); + const root = reader.read(); + const obj = flux.ModelFactory._backref(root, root); + model = obj.model; + if (!model) { + throw new flux.Error('File does not contain Flux model.'); + } + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new flux.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + return flux.Metadata.open(host).then((metadata) => { + try { + return new flux.Model(metadata, model); + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new flux.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + }); + } + + static _backref(obj, root) { + if (Array.isArray(obj)) { + for (let i = 0; i < obj.length; i++) { + obj[i] = flux.ModelFactory._backref(obj[i], root); + } + } + else if (obj === Object(obj)) { + if (obj.tag == 'backref' && obj.ref) { + if (!root._backrefs[obj.ref - 1]) { + throw new flux.Error("Invalid backref '" + obj.ref + "'."); + } + obj = root._backrefs[obj.ref - 1]; + } + for (const key of Object.keys(obj)) { + if (obj !== root || key !== '_backrefs') { + obj[key] = flux.ModelFactory._backref(obj[key], root); + } + } + } + return obj; + } +}; + +flux.Model = class { + + constructor(/* root */) { + this._format = 'Flux'; + this._graphs = []; + } + + get format() { + return this._format; + } + + get graphs() { + return this._graphs; + } +}; + +flux.Metadata = class { + + static open(host) { + if (flux.Metadata._metadata) { + return Promise.resolve(flux.Metadata._metadata); + } + return host.request(null, 'flux-metadata.json', 'utf-8').then((data) => { + flux.Metadata._metadata = new flux.Metadata(data); + return flux.Metadata._metadata; + }).catch(() => { + flux.Metadata._metadata = new flux.Metadata(null); + return flux.Metadata._metadatas; + }); + } + + constructor(data) { + this._map = {}; + this._attributeCache = {}; + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + this._map[item.name] = item.schema; + } + } + } + } + } + + type(name) { + return this._map[name] || null; + } + + attribute(type, name) { + let map = this._attributeCache[type]; + if (!map) { + map = {}; + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[type] = map; + } + return map[name] || null; + } +}; + +flux.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Flux Error'; + } +}; + +if (module && module.exports) { + module.exports.ModelFactory = flux.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/gzip.js b/frontend/packages/core/public/netron/gzip.js new file mode 100644 index 00000000..64985758 --- /dev/null +++ b/frontend/packages/core/public/netron/gzip.js @@ -0,0 +1,168 @@ +/* jshint esversion: 6 */ +/* global pako */ + +var gzip = gzip || {}; + +gzip.Archive = class { + + constructor(buffer) { + this._entries = []; + if (buffer.length < 18 || buffer[0] != 0x1f || buffer[1] != 0x8b) { + throw new gzip.Error('Invalid GZIP archive.'); + } + const reader = new gzip.Reader(buffer, 0, buffer.length); + this._entries.push(new gzip.Entry(reader)); + } + + get entries() { + return this._entries; + } +}; + +gzip.Entry = class { + + constructor(reader) { + if (!reader.match([ 0x1f, 0x8b ])) { + throw new gzip.Error('Invalid GZIP signature.'); + } + const compressionMethod = reader.byte(); + if (compressionMethod != 8) { + throw new gzip.Error("Invalid compression method '" + compressionMethod.toString() + "'."); + } + const flags = reader.byte(); + reader.uint32(); // MTIME + reader.byte(); + reader.byte(); // OS + if ((flags & 4) != 0) { + const xlen = reader.uint16(); + reader.skip(xlen); + } + if ((flags & 8) != 0) { + this._name = reader.string(); + } + if ((flags & 16) != 0) { // FLG.FCOMMENT + reader.string(); + } + if ((flags & 1) != 0) { + reader.uint16(); // CRC16 + } + const compressedData = reader.bytes(); + if (typeof process === 'object' && typeof process.versions == 'object' && typeof process.versions.node !== 'undefined') { + this._data = require('zlib').inflateRawSync(compressedData); + } + else if (typeof pako !== 'undefined') { + this._data = pako.inflateRaw(compressedData); + } + else { + this._data = new require('./zip').Inflater().inflateRaw(compressedData); + } + reader.position = -8; + reader.uint32(); // CRC32 + const size = reader.uint32(); + if (size != this._data.length) { + throw new gzip.Error('Invalid size.'); + } + } + + get name() { + return this._name; + } + + get data() { + return this._data; + } + +}; + +gzip.Reader = class { + + constructor(buffer, start, end) { + this._buffer = buffer; + this._position = start; + this._end = end; + } + + match(signature) { + if (this._position + signature.length <= this._end) { + for (let i = 0; i < signature.length; i++) { + if (this._buffer[this._position + i] != signature[i]) { + return false; + } + } + } + this._position += signature.length; + return true; + } + + get position() { + return this._position; + } + + set position(value) { + this._position = value >= 0 ? value : this._end + value; + } + + skip(size) { + if (this._position + size > this._end) { + throw new gzip.Error('Data not available.'); + } + this._position += size; + } + + bytes(size) { + if (this._position + size > this._end) { + throw new gzip.Error('Data not available.'); + } + size = size === undefined ? this._end : size; + const data = this._buffer.subarray(this._position, this._position + size); + this._position += size; + return data; + } + + byte() { + if (this._position + 1 > this._end) { + throw new gzip.Error('Data not available.'); + } + const value = this._buffer[this._position]; + this._position++; + return value; + } + + uint16() { + if (this._position + 2 > this._end) { + throw new gzip.Error('Data not available.'); + } + const value = this._buffer[this._position] | (this._buffer[this._position + 1] << 8); + this._position += 2; + return value; + } + + uint32() { + return this.uint16() | (this.uint16() << 16); + } + + string() { + let result = ''; + const end = this._buffer.indexOf(0x00, this._position); + if (end < 0) { + throw new gzip.Error('End of string not found.'); + } + while (this._position < end) { + result += String.fromCharCode(this._buffer[this._position++]); + } + this._position++; + return result; + } + +}; + +gzip.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'gzip Error'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.Archive = gzip.Archive; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/hdf5.js b/frontend/packages/core/public/netron/hdf5.js new file mode 100755 index 00000000..50fe953e --- /dev/null +++ b/frontend/packages/core/public/netron/hdf5.js @@ -0,0 +1,1431 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental HDF5 JavaScript reader + +var hdf5 = hdf5 || {}; +var long = long || { Long: require('long') }; +var zip = zip || require('./zip'); + +hdf5.File = class { + + constructor(buffer) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html + const reader = new hdf5.Reader(buffer, 0); + this._globalHeap = new hdf5.GlobalHeap(reader); + if (!reader.match('\x89HDF\r\n\x1A\n')) { + throw new hdf5.Error('Not a valid HDF5 file.'); + } + const version = reader.byte(); + switch (version) { + case 0: + case 1: { + this._freeSpaceStorageVersion = reader.byte(); + this._rootGroupEntryVersion = reader.byte(); + reader.skip(1); + this._sharedHeaderMessageVersionFormat = reader.byte(); + reader.initialize(); + reader.skip(1); + this._groupLeafNodeK = reader.uint16(); // 0x04? + this._groupInternalNodeK = reader.uint16(); // 0x10? + reader.skip(4); + if (version > 0) { + this._indexedStorageInternalNodeK = reader.uint16(); + this.seek(2); // Reserved + } + this._baseAddress = reader.offset(); + reader.offset(); // Address of File Free space Info + this._endOfFileAddress = reader.offset(); + reader.offset(); // Driver Information Block Address + if (this._baseAddress != 0) { + throw new hdf5.Error('Base address is not zero.'); + } + const rootGroupEntry = new hdf5.SymbolTableEntry(reader); + this._rootGroup = new hdf5.Group(reader, rootGroupEntry, null, this._globalHeap, '', ''); + break; + } + case 2: + case 3: { + reader.initialize(); + reader.byte(); + this._baseAddress = reader.offset(); + this._superBlockExtensionAddress = reader.offset(); + this._endOfFileAddress = reader.offset(); + const rootGroupObjectHeader = new hdf5.DataObjectHeader(reader.at(reader.offset())); + this._rootGroup = new hdf5.Group(reader, null, rootGroupObjectHeader, this._globalHeap, '', ''); + break; + } + default: + throw new hdf5.Error('Unsupported Superblock version ' + version + '.'); + } + } + + get rootGroup() { + return this._rootGroup; + } +}; + +hdf5.Group = class { + + constructor(reader, entry, objectHeader, globalHeap, parentPath, name) { + this._reader = reader; + this._entry = entry; + this._dataObjectHeader = objectHeader; + this._globalHeap = globalHeap; + this._name = name; + this._path = parentPath == '/' ? (parentPath + name) : (parentPath + '/' + name); + } + + get name() { + return this._name; + } + + get path() { + return this._path; + } + + group(path) { + this._decodeGroups(); + const index = path.indexOf('/'); + if (index != -1) { + const childPath = path.substring(index + 1); + const subPath = path.substring(0, index); + const subGroup = this.group(subPath); + if (subGroup != null) { + return subGroup.group(childPath); + } + } + else { + const group = this._groupMap[path]; + if (group) { + return group; + } + } + return null; + } + + get groups() { + this._decodeGroups(); + return this._groups; + } + + attribute(name) { + this._decodeDataObject(); + return this._attributes[name]; + } + + get attributes() { + this._decodeDataObject(); + return this._attributes; + } + + get value() { + this._decodeDataObject(); + return this._value; + } + + _decodeDataObject() { + if (!this._dataObjectHeader) { + this._dataObjectHeader = new hdf5.DataObjectHeader(this._reader.at(this._entry.objectHeaderAddress)); + } + if (!this._attributes) { + this._attributes = {}; + for (const attribute of this._dataObjectHeader.attributes) { + const name = attribute.name; + const value = attribute.decodeValue(this._globalHeap); + this._attributes[name] = value; + } + this._value = null; + const datatype = this._dataObjectHeader.datatype; + const dataspace = this._dataObjectHeader.dataspace; + const dataLayout = this._dataObjectHeader.dataLayout; + const filterPipeline = this._dataObjectHeader.filterPipeline; + if (datatype && dataspace && dataLayout) { + this._value = new hdf5.Variable(this._reader, this._globalHeap, datatype, dataspace, dataLayout, filterPipeline); + } + } + } + + _decodeGroups() { + if (!this._groups) { + this._groupMap = {}; + this._groups = []; + if (this._entry) { + if (this._entry.treeAddress || this._entry.heapAddress) { + const heap = new hdf5.Heap(this._reader.at(this._entry.heapAddress)); + const tree = new hdf5.Tree(this._reader.at(this._entry.treeAddress)); + for (const node of tree.nodes) { + for (const entry of node.entries) { + const name = heap.getString(entry.linkNameOffset); + const group = new hdf5.Group(this._reader, entry, null, this._globalHeap, this._path, name); + this._groups.push(group); + this._groupMap[name] = group; + } + } + } + } + else { + this._decodeDataObject(); + for (const link of this._dataObjectHeader.links) { + if (Object.prototype.hasOwnProperty.call(link, 'objectHeaderAddress')) { + const name = link.name; + const objectHeader = new hdf5.DataObjectHeader(this._reader.at(link.objectHeaderAddress)); + const linkGroup = new hdf5.Group(this._reader, null, objectHeader, this._globalHeap, this._path, name); + this._groups.push(linkGroup); + this._groupMap[name] = linkGroup; + } + } + } + } + } +}; + +hdf5.Variable = class { + + constructor(reader, globalHeap, datatype, dataspace, dataLayout, filterPipeline) { + this._reader = reader; + this._globalHeap = globalHeap; + this._datatype = datatype; + this._dataspace = dataspace; + this._dataLayout = dataLayout; + this._filterPipeline = filterPipeline; + } + + get type () { + return this._datatype.type; + } + + get littleEndian() { + return this._datatype.littleEndian; + } + + get shape() { + return this._dataspace.shape; + } + + get value() { + const data = this.data; + if (data) { + const reader = new hdf5.Reader(data); + const array = this._dataspace.read(this._datatype, reader); + return this._dataspace.decode(this._datatype, array, array, this._globalHeap); + } + return null; + } + + get data() { + switch (this._dataLayout.layoutClass) { + case 1: // Contiguous + if (this._dataLayout.address) { + return this._reader.at(this._dataLayout.address).bytes(this._dataLayout.size); + } + break; + case 2: { // Chunked + const tree = new hdf5.Tree(this._reader.at(this._dataLayout.address), this._dataLayout.dimensionality); + if (this._dataLayout.dimensionality == 2 && this._dataspace.shape.length == 1) { + let size = this._dataLayout.datasetElementSize; + for (let i = 0; i < this._dataspace.shape.length; i++) { + size *= this._dataspace.shape[i]; + } + const data = new Uint8Array(size); + for (const node of tree.nodes) { + if (node.fields.length !== 2 || node.fields[1] !== 0) { + return null; + } + if (node.filterMask !== 0) { + return null; + } + const start = node.fields[0] * this._dataLayout.datasetElementSize; + let chunk = node.data; + if (this._filterPipeline) { + for (const filter of this._filterPipeline.filters) { + chunk = filter.decode(chunk); + } + } + for (let i = 0; i < chunk.length; i++) { + data[start + i] = chunk[i]; + } + } + return data; + } + break; + } + default: { + throw new hdf5.Error("Unknown data layout class '" + this.layoutClass + "'."); + } + } + return null; + } +}; + +hdf5.Reader = class { + + constructor(buffer) { + if (buffer) { + this._buffer = buffer; + this._dataView = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._position = 0; + this._offset = 0; + } + } + + initialize() { + this._offsetSize = this.byte(); + this._lengthSize = this.byte(); + } + + skip(offset) { + this._offset += offset; + if (this._position + this._offset > this._buffer.length) { + throw new hdf5.Error('Expected ' + (this._position + this._offset - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + int8() { + const offset = this._offset; + this.skip(1); + return this._dataView.getInt8(this._position + offset); + } + + byte() { + const offset = this._offset; + this.skip(1); + return this._dataView.getUint8(this._position + offset); + } + + bytes(length) { + const offset = this._offset; + this.skip(length); + return this._buffer.subarray(this._position + offset, this._position + this._offset); + } + + int16() { + const offset = this._offset; + this.skip(2); + return this._dataView.getInt16(this._position + offset, true); + } + + uint16() { + const offset = this._offset; + this.skip(2); + return this._dataView.getUint16(this._position + offset, true); + } + + int32() { + const offset = this._offset; + this.skip(4); + return this._dataView.getInt32(this._position + offset, true); + } + + uint32() { + const offset = this._offset; + this.skip(4); + return this._dataView.getUint32(this._position + offset, true); + } + + int64() { + const offset = this._offset; + this.skip(8); + const lo = this._dataView.getUint32(this._position + offset, true); + const hi = this._dataView.getUint32(this._position + offset + 4, true); + return new long.Long(lo, hi, false).toNumber(); + } + + uint64() { + const offset = this._offset; + this.skip(8); + const lo = this._dataView.getUint32(this._position + offset, true); + const hi = this._dataView.getUint32(this._position + offset + 4, true); + return new long.Long(lo, hi, true).toNumber(); + } + + uint(type) { + switch (type) { + case 0: return this.byte(); + case 1: return this.uint16(); + case 2: return this.uint32(); + case 3: return this.uint64(); + } + } + + float16() { + const offset = this._offset; + this.skip(2); + const value = this._dataView.getUint16(this._position + offset, true); + // decode float16 value + const s = (value & 0x8000) >> 15; + const e = (value & 0x7C00) >> 10; + const f = value & 0x03FF; + if(e == 0) { + return (s ? -1 : 1) * Math.pow(2, -14) * (f / Math.pow(2, 10)); + } + else if (e == 0x1F) { + return f ? NaN : ((s ? -1 : 1) * Infinity); + } + return (s ? -1 : 1) * Math.pow(2, e-15) * (1 + (f / Math.pow(2, 10))); + } + + float32() { + const offset = this._offset; + this.skip(4); + return this._dataView.getFloat32(this._position + offset, true); + } + + float64() { + const offset = this._offset; + this.skip(8); + return this._dataView.getFloat64(this._position + offset, true); + } + + string(size, encoding) { + if (!size || size == -1) { + let position = this._position + this._offset; + while (this._buffer[position] != 0) { + position++; + } + size = position - this._position + this._offset + 1; + } + const data = this.bytes(size); + return hdf5.Reader.decode(data, encoding); + } + + static decode(data, encoding) { + let text = ''; + if (encoding == 'utf-8') { + if (!hdf5.Reader._utf8Decoder) { + hdf5.Reader._utf8Decoder = new TextDecoder('utf-8'); + } + text = hdf5.Reader._utf8Decoder.decode(data); + } + else { + if (!hdf5.Reader._asciiDecoder) { + hdf5.Reader._asciiDecoder = new TextDecoder('ascii'); + } + text = hdf5.Reader._asciiDecoder.decode(data); + } + return text.replace(/\0/g, ''); + } + + offset() { + switch (this._offsetSize) { + case 8: { + const lo = this.uint32(); + const hi = this.uint32(); + if (lo === 0xffffffff && hi === 0xffffffff) { + return undefined; + } + return new long.Long(lo, hi, true).toNumber(); + } + case 4: { + const value = this.uint32(); + if (value === 0xffffffff) { + return undefined; + } + return value; + } + } + throw new hdf5.Error('Unsupported offset size \'' + this._offsetSize + '\'.'); + } + + length() { + switch (this._lengthSize) { + case 8: { + const lo = this.uint32(); + const hi = this.uint32(); + if (lo === 0xffffffff && hi === 0xffffffff) { + return undefined; + } + return new long.Long(lo, hi, true).toNumber(); + } + case 4: { + const value = this.uint32(); + if (value === 0xffffffff) { + return undefined; + } + return value; + } + } + throw new hdf5.Error('Unsupported length size \'' + this._lengthSize + '\'.'); + } + + at(position) { + const reader = new hdf5.Reader(null); + reader._buffer = this._buffer; + reader._dataView = this._dataView; + reader._position = position; + reader._offset = 0; + reader._offsetSize = this._offsetSize; + reader._lengthSize = this._lengthSize; + return reader; + } + + clone() { + const reader = new hdf5.Reader(this._buffer, this._position); + reader._buffer = this._buffer; + reader._dataView = this._dataView; + reader._position = this._position; + reader._offset = this._offset; + reader._offsetSize = this._offsetSize; + reader._lengthSize = this._lengthSize; + return reader; + } + + align(mod) { + if (this._offset % mod != 0) { + this._offset = (Math.floor(this._offset / mod) + 1) * mod; + } + } + + match(text) { + if (this._position + this._offset + text.length > this._buffer.length) { + return false; + } + const offset = this._offset; + const buffer = this.bytes(text.length); + for (let i = 0; i < text.length; i++) { + if (text.charCodeAt(i) != buffer[i]) { + this._offset = offset; + return false; + } + } + return true; + } + + get position() { + return this._position + this._offset; + } + + get size() { + return this._buffer.length; + } +}; + +hdf5.SymbolTableNode = class { + + constructor(reader) { + if (!reader.match('SNOD')) { + throw new hdf5.Error("Not a valid 'SNOD' block."); + } + const version = reader.byte(); + if (version == 1) { + reader.skip(1); + const entriesUsed = reader.uint16(); + this.entries = []; + for (let i = 0; i < entriesUsed; i++) { + this.entries.push(new hdf5.SymbolTableEntry(reader)); + } + } + else { + throw new hdf5.Error('Unsupported symbol table node version \'' + version + '\'.'); + } + } +}; + +hdf5.SymbolTableEntry = class { + + constructor(reader) { + this.linkNameOffset = reader.offset(); + this.objectHeaderAddress = reader.offset(); + const cacheType = reader.uint32(); + reader.skip(4); // Reserved + switch (cacheType) { + case 0: + break; + case 1: { + const scratchReader = reader.clone(); + this.treeAddress = scratchReader.offset(); + this.heapAddress = scratchReader.offset(); + break; + } + default: + throw new hdf5.Error('Unsupported cache type \'' + cacheType + '\'.'); + } + reader.skip(16); // Scratch-pad space + } +}; + +hdf5.DataObjectHeader = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#ObjectHeader + this.attributes = []; + this.links = []; + this.continuations = []; + const version = reader.match('OHDR') ? reader.byte() : reader.byte(); + switch (version) { + case 1: { + reader.skip(1); + const messageCount = reader.uint16(); + reader.uint32(); + const objectHeaderSize = reader.uint32(); + reader.align(8); + let end = reader.position + objectHeaderSize; + for (let i = 0; i < messageCount; i++) { + const messageType = reader.uint16(); + const messageSize = reader.uint16(); + const messageFlags = reader.byte(); + reader.skip(3); + reader.align(8); + const next = this._readMessage(reader, messageType, messageSize, messageFlags); + if ((!next || reader.position >= end) && this.continuations.length > 0) { + const continuation = this.continuations.shift(); + reader = reader.at(continuation.offset); + end = continuation.offset + continuation.length; + } + else { + reader.align(8); + } + } + break; + } + case 2: { + const flags = reader.byte(); + if ((flags & 0x20) != 0) { + reader.uint32(); + reader.uint32(); + reader.uint32(); + reader.uint32(); + } + if ((flags & 0x10) != 0) { + reader.uint16(); + reader.uint16(); + } + const size = reader.uint(flags & 0x03); + let next = true; + let end = reader.position + size; + while (next && reader.position < end) { + const messageType = reader.byte(); + const messageSize = reader.uint16(); + const messageFlags = reader.byte(); + if (reader.position < end) { + if ((flags & 0x04) != 0) { + reader.uint16(); + } + next = this._readMessage(reader, messageType, messageSize, messageFlags); + } + if ((!next || reader.position >= end) && this.continuations.length > 0) { + const continuation = this.continuations.shift(); + reader = reader.at(continuation.offset); + end = continuation.offset + continuation.length; + if (!reader.match('OCHK')) { + throw new hdf5.Error("Invalid continuation block signature."); + } + next = true; + } + } + break; + } + default: { + throw new hdf5.Error('Unsupported data object header version \'' + version + '\'.'); + } + } + } + + _readMessage(reader, type, size, flags) { + switch(type) { + case 0x0000: // NIL + return false; + case 0x0001: // Dataspace + this.dataspace = (size != 4 || flags != 1) ? new hdf5.Dataspace(reader.clone()) : null; + break; + case 0x0002: // Link Info + this.linkInfo = new hdf5.LinkInfo(reader.clone()); + break; + case 0x0003: // Datatype + this.datatype = new hdf5.Datatype(reader.clone()); + break; + case 0x0004: + case 0x0005: // Fill Value + this.fillValue = new hdf5.FillValue(reader.clone(), type); + break; + case 0x0006: // Link + this.links.push(new hdf5.Link(reader.clone())); + break; + case 0x0008: // Data Layout + this.dataLayout = new hdf5.DataLayout(reader.clone()); + break; + case 0x000A: // Group Info + this.groupInfo = new hdf5.GroupInfo(reader.clone()); + break; + case 0x000B: // Filter Pipeline + this.filterPipeline = new hdf5.FilterPipeline(reader.clone()); + break; + case 0x000C: // Attribute + this.attributes.push(new hdf5.Attribute(reader.clone())); + break; + case 0x000D: // Object Comment Message + this.comment = reader.string(-1, 'ascii'); + break; + case 0x0010: // Object Header Continuation + this.continuations.push(new hdf5.ObjectHeaderContinuation(reader.clone())); + break; + case 0x0011: // Symbol Table + this.symbolTable = new hdf5.SymbolTable(reader.clone()); + break; + case 0x000E: // Object Modification Time (Old) + case 0x0012: // Object Modification Time + this.objectModificationTime = new hdf5.ObjectModificationTime(reader.clone(), type); + break; + case 0x0015: // Attribute Info + this.attributeInfo = new hdf5.AttributeInfo(reader.clone()); + break; + default: + throw new hdf5.Error('Unsupported message type \'' + type + '\'.'); + } + reader.skip(size); + return true; + } +}; + +hdf5.Message = class { + + constructor(type, data, flags) { + this._type = type; + this._data = data; + this._flags = flags; + } +}; + +hdf5.Dataspace = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#DataspaceMessage + this._sizes = []; + const version = reader.byte(); + switch (version) { + case 1: + this._dimensions = reader.byte(); + this._flags = reader.byte(); + reader.skip(1); + reader.skip(4); + for (let i = 0; i < this._dimensions; i++) { + this._sizes.push(reader.length()); + } + if ((this._flags & 0x01) != 0) { + this._maxSizes = []; + for (let j = 0; j < this._dimensions; j++) { + this._maxSizes.push(reader.length()); + if (this._maxSizes[j] != this._sizes[j]) { + throw new hdf5.Error('Max size is not supported.'); + } + } + } + if ((this._flags & 0x02) != 0) { + throw new hdf5.Error('Permutation indices not supported.'); + } + break; + case 2: + this._dimensions = reader.byte(); + this._flags = reader.byte(); + this._type = reader.byte(); // 0 scalar, 1 simple, 2 null + for (let k = 0; k < this._dimensions; k++) { + this._sizes.push(reader.length()); + } + if ((this._flags & 0x01) != 0) { + this._maxSizes = []; + for (let l = 0; l < this._dimensions; l++) { + this._maxSizes.push(reader.length()); + } + } + break; + default: + throw new hdf5.Error("Unsupported dataspace message version '" + version + "'."); + + } + } + + get shape() { + return this._sizes; + } + + read(datatype, reader) { + if (this._dimensions == 0) { + return datatype.read(reader); + } + return this._readArray(datatype, reader, this._sizes, 0); + } + + _readArray(datatype, reader, shape, dimension) { + const array = []; + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + array.push(datatype.read(reader)); + } + } + else { + for (let j = 0; j < size; j++) { + array.push(this._readArray(datatype, reader, shape, dimension + 1)); + } + } + return array; + } + + decode(datatype, data, globalHeap) { + if (this._dimensions == 0) { + return datatype.decode(data, globalHeap); + } + return this._decodeArray(datatype, data, globalHeap, this._sizes, 0); + } + + _decodeArray(datatype, data, globalHeap, shape, dimension) { + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + data[i] = datatype.decode(data[i], globalHeap); + } + } + else { + for (let j = 0; j < size; j++) { + data[j] = this._decodeArray(datatype, data[j], shape, dimension + 1); + } + } + return data; + } +}; + +hdf5.LinkInfo = class { + + constructor(reader) { + const version = reader.byte(); + switch (version) { + case 0: { + const flags = reader.byte(); + if ((flags & 1) != 0) { + this.maxCreationIndex = reader.uint64(); + } + this.fractalHeapAddress = reader.offset(); + this.nameIndexTreeAddress = reader.offset(); + if ((flags & 2) != 0) { + this.creationOrderIndexTreeAddress = reader.offset(); + } + break; + } + default: + throw new hdf5.Error("Unsupported link info message version '" + version + "'."); + } + } +}; + +hdf5.Datatype = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#DatatypeMessage + const format = reader.byte(); + const version = format >> 4; + this._class = format & 0xf; + switch (version) { + case 1: + case 2: + this._flags = reader.byte() | reader.byte() << 8 | reader.byte() << 16; + this._size = reader.uint32(); + break; + default: + throw new hdf5.Error('Unsupported datatype version \'' + version + '\'.'); + } + } + + get type() { + switch (this._class) { + case 0: // fixed-point + if ((this._flags & 0xfff6) === 0) { + if ((this._flags && 0x08) !== 0) { + switch (this._size) { + case 1: return 'int8'; + case 2: return 'int16'; + case 4: return 'int32'; + case 8: return 'int64'; + } + } + else { + switch (this._size) { + case 1: return 'uint8'; + case 2: return 'uint16'; + case 4: return 'uint32'; + case 8: return 'uint64'; + } + } + } + break; + case 1: // floating-point + if (this._size == 2 && this._flags == 0x0f20) { + return 'float16'; + } + else if (this._size == 4 && this._flags == 0x1f20) { + return 'float32'; + } + else if (this._size == 8 && this._flags == 0x3f20) { + return 'float64'; + } + break; + case 3: // string + return 'string'; + case 5: // opaque + return 'uint8[]'; + case 9: // variable-length + if ((this._flags & 0x0f) == 1) { // type + return 'char[]'; + } + break; + } + throw new hdf5.Error('Unsupported datatype class \'' + this._class + '\'.'); + } + + get littleEndian() { + switch (this._class) { + case 0: // fixed-point + case 1: // floating-point + return (this.flags & 0x01) == 0; + } + return true; + } + + read(reader) { + switch (this._class) { + case 0: // fixed-point + if (this._size == 1) { + return ((this._flags & 0x8) != 0) ? reader.int8() : reader.byte(); + } + else if (this._size == 2) { + return ((this._flags & 0x8) != 0) ? reader.int16() : reader.uint16(); + } + else if (this._size == 4) { + return ((this._flags & 0x8) != 0) ? reader.int32() : reader.uint32(); + } + else if (this._size == 8) { + return ((this._flags & 0x8) != 0) ? reader.int64() : reader.uint64(); + } + throw new hdf5.Error('Unsupported fixed-point datatype.'); + case 1: // floating-point + if (this._size == 2 && this._flags == 0x0f20) { + return reader.float16(); + } + else if (this._size == 4 && this._flags == 0x1f20) { + return reader.float32(); + } + else if (this._size == 8 && this._flags == 0x3f20) { + return reader.float64(); + } + throw new hdf5.Error('Unsupported floating-point datatype.'); + case 3: // string + switch ((this._flags >> 8) & 0x0f) { // character set + case 0: + return hdf5.Reader.decode(reader.bytes(this._size), 'ascii'); + case 1: + return hdf5.Reader.decode(reader.bytes(this._size), 'utf-8'); + } + throw new hdf5.Error('Unsupported character encoding.'); + case 5: // opaque + return reader.bytes(this._size); + case 9: // variable-length + return { + length: reader.uint32(), + globalHeapID: new hdf5.GlobalHeapID(reader) + }; + } + throw new hdf5.Error('Unsupported datatype class \'' + this._class + '\'.'); + } + + decode(data, globalHeap) { + switch (this._class) { + case 0: // fixed-point + return data; + case 1: // floating-point + return data; + case 3: // string + return data; + case 5: // opaque + return data; + case 9: { // variable-length + const globalHeapObject = globalHeap.get(data.globalHeapID); + if (globalHeapObject != null) { + const characterSet = (this._flags >> 8) & 0x0f; + switch (characterSet) { + case 0: + return hdf5.Reader.decode(globalHeapObject.data, 'ascii'); + case 1: + return hdf5.Reader.decode(globalHeapObject.data, 'utf-8'); + } + throw new hdf5.Error('Unsupported character encoding.'); + } + break; + } + default: + throw new hdf5.Error('Unsupported datatype class \'' + this._class + '\'.'); + } + return null; + } +}; + +hdf5.FillValue = class { + + constructor(reader, type) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#FillValueMessage + switch (type) { + case 0x0004: { + const size = reader.uint32(); + this.data = reader.bytes(size); + break; + } + case 0x0005: + default: { + const version = reader.byte(); + switch (version) { + case 1: + case 2: { + reader.byte(); + reader.byte(); + const valueDefined = reader.byte(); + if (version === 1 || valueDefined === 1) { + const size = reader.uint32(); + this.data = reader.bytes(size); + } + break; + } + default: + throw new hdf5.Error('Unsupported fill value version \'' + version + '\'.'); + } + break; + } + } + } +}; + +hdf5.Link = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#FillValueMessage + const version = reader.byte(); + switch (version) { + case 1: { + const flags = reader.byte(); + this.type = (flags & 0x08) != 0 ? reader.byte() : 0; + if ((flags & 0x04) != 0) { + this.creationOrder = reader.uint32(); + } + const encoding = ((flags & 0x10) != 0 && reader.byte() == 1) ? 'utf-8' : 'ascii'; + this.name = reader.string(reader.uint(flags & 0x03), encoding); + switch (this.type) { + case 0: // hard link + this.objectHeaderAddress = reader.offset(); + break; + case 1: // soft link + break; + } + break; + } + default: + throw new hdf5.Error('Unsupported link message version \'' + version + '\'.'); + } + } +}; + +hdf5.DataLayout = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#LayoutMessage + const version = reader.byte(); + switch (version) { + case 1: + case 2: { + this.dimensionality = reader.byte(); + this.layoutClass = reader.byte(); + reader.skip(5); + switch (this.layoutClass) { + case 1: + this.address = reader.offset(); + this.dimensionSizes = []; + for (let i = 0; i < this.dimensionality - 1; i++) { + this.dimensionSizes.push(reader.int32()); + } + break; + case 2: // Chunked + this.address = reader.offset(); + this.dimensionSizes = []; + for (let i = 0; i < this.dimensionality - 1; i++) { + this.dimensionSizes.push(reader.int32()); + } + this.datasetElementSize = reader.int32(); + break; + default: + throw new hdf5.Error('Unsupported data layout class \'' + this.layoutClass + '\'.'); + } + break; + } + case 3: { + this.layoutClass = reader.byte(); + switch (this.layoutClass) { + case 1: // Contiguous + this.address = reader.offset(); + this.size = reader.length(); + break; + case 2: // Chunked + this.dimensionality = reader.byte(); + this.address = reader.offset(); + this.dimensionSizes = []; + for (let i = 0; i < this.dimensionality - 1; i++) { + this.dimensionSizes.push(reader.int32()); + } + this.datasetElementSize = reader.int32(); + break; + case 0: // Compact + default: + throw new hdf5.Error('Unsupported data layout class \'' + this.layoutClass + '\'.'); + } + break; + } + default: + throw new hdf5.Error('Unsupported data layout version \'' + version + '\'.'); + } + } +}; + +hdf5.GroupInfo = class { + + constructor(reader) { + const version = reader.byte(); + switch (version) { + case 0: { + const flags = reader.byte(); + if ((flags & 0x01) != 0) { + this.maxCompactLinks = reader.uint16(); + this.minDenseLinks = reader.uint16(); + } + if ((flags & 0x02) != 0) { + this.estimatedEntriesNumber = reader.uint16(); + this.estimatedLinkNameLengthEntires = reader.uint16(); + } + break; + } + default: + throw new hdf5.Error('Unsupported group info version \'' + version + '\'.'); + } + } +}; + +hdf5.FilterPipeline = class { + + constructor(reader) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#FilterMessage + const version = reader.byte(); + switch (version) { + case 1: { + this.filters = []; + const numberOfFilters = reader.byte(); + reader.skip(2); + reader.skip(4); + for (let i = 0; i < numberOfFilters; i++) { + this.filters.push(new hdf5.Filter(reader)); + reader.align(8); + } + break; + } + default: + throw new hdf5.Error('Unsupported filter pipeline message version \'' + version + '\'.'); + } + } +}; + +hdf5.Filter = class { + + constructor(reader) { + this.id = reader.int16(); + const nameLength = reader.int16(); + this.flags = reader.int16(); + const clientDataSize = reader.int16(); + this.name = reader.string(nameLength, 'ascii'); + this.clientData = reader.bytes(clientDataSize * 4); + } + + decode(data) { + switch (this.id) { + case 1: { // gzip + const rawData = data.subarray(2, data.length); // skip zlib header + return new zip.Inflater().inflateRaw(rawData); + } + default: + throw hdf5.Error("Unsupported filter '" + this.name + "'."); + } + } +}; + +hdf5.Attribute = class { + + constructor(reader) { + const version = reader.byte(); + switch (version) { + case 1: { + reader.skip(1); + const nameSize = reader.uint16(); + const datatypeSize = reader.uint16(); + const dataspaceSize = reader.uint16(); + this.name = reader.string(nameSize, 'utf-8'); + reader.align(8); + this._datatype = new hdf5.Datatype(reader.clone()); + reader.skip(datatypeSize); + reader.align(8); + this._dataspace = new hdf5.Dataspace(reader.clone()); + reader.skip(dataspaceSize); + reader.align(8); + this._data = this._dataspace.read(this._datatype, reader); + break; + } + case 3: { + reader.byte(); + const nameSize = reader.uint16(); + const datatypeSize = reader.uint16(); + const dataspaceSize = reader.uint16(); + const encoding = reader.byte() == 1 ? 'utf-8' : 'ascii'; + this.name = reader.string(nameSize, encoding); + this._datatype = new hdf5.Datatype(reader.clone()); + reader.skip(datatypeSize); + this._dataspace = new hdf5.Dataspace(reader.clone()); + reader.skip(dataspaceSize); + this._data = this._dataspace.read(this._datatype, reader); + break; + } + default: + throw new hdf5.Error('Unsupported attribute message version \'' + version + '\'.'); + } + } + + decodeValue(globalHeap) { + if (this._data) { + return this._dataspace.decode(this._datatype, this._data, globalHeap); + } + return null; + } +}; + +hdf5.ObjectHeaderContinuation = class { + + constructor(reader) { + this.offset = reader.offset(); + this.length = reader.length(); + } +}; + +hdf5.SymbolTable = class { + + constructor(reader) { + this._treeAddress = reader.offset(); + this._heapAddress = reader.offset(); + } +}; + +hdf5.ObjectModificationTime = class { + + constructor(reader, type) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#ModificationTimeMessage + switch (type) { + case 0x000E: { + this.year = reader.uint32(); + this.month = reader.uint16(); + this.day = reader.uint16(); + this.hour = reader.uint16(); + this.minute = reader.uint16(); + this.second = reader.uint16(); + reader.skip(2); + break; + } + case 0x0012: { + const version = reader.byte(); + reader.skip(3); + switch (version) { + case 1: + this.timestamp = reader.uint32(); + break; + default: + throw new hdf5.Error('Unsupported object modification time message version \'' + version + '\'.'); + } + break; + } + } + } +}; + +hdf5.AttributeInfo = class { + + constructor(reader) { + const version = reader.byte(); + switch (version) { + case 0: { + const flags = reader.byte(); + if ((flags & 1) != 0) { + this.maxCreationIndex = reader.uint64(); + } + this.fractalHeapAddress = reader.offset(); + this.attributeNameTreeAddress = reader.offset(); + if ((flags & 2) != 0) { + this.attributeCreationOrderTreeAddress = reader.offset(); + } + break; + } + default: + throw new hdf5.Error('Unsupported attribute info message version \'' + version + '\'.'); + } + } +}; + +hdf5.Tree = class { + + constructor(reader, dimensionality) { + // https://support.hdfgroup.org/HDF5/doc/H5.format.html#V1Btrees + if (!reader.match('TREE')) { + throw new hdf5.Error("Not a valid 'TREE' block."); + } + this.type = reader.byte(); + this.level = reader.byte(); + const entriesUsed = reader.uint16(); + reader.offset(); // address of left sibling + reader.offset(); // address of right sibling + this.nodes = []; + switch (this.type) { + case 0: // Group nodes + for (let i = 0; i < entriesUsed; i++) { + reader.length(); + const childPointer = reader.offset(); + if (this.level == 0) { + this.nodes.push(new hdf5.SymbolTableNode(reader.at(childPointer))); + } + else { + const tree = new hdf5.Tree(reader.at(childPointer)); + this.nodes = this.nodes.concat(tree.nodes); + } + } + break; + case 1: // Raw data chunk nodes + for (let i = 0; i < entriesUsed; i++) { + const size = reader.int32(); + const filterMask = reader.int32(); + const fields = []; + for (let j = 0; j < dimensionality; j++) { + fields.push(reader.uint64()); + } + const childPointer = reader.offset(); + if (this.level == 0) { + const data = reader.at(childPointer).bytes(size); + this.nodes.push({ data: data, fields: fields, filterMask: filterMask }); + } + else { + const tree = new hdf5.Tree(reader.at(childPointer), dimensionality); + this.nodes = this.nodes.concat(tree.nodes); + } + } + break; + default: + throw new hdf5.Error('Unsupported B-Tree node type \'' + this.type + '\'.'); + } + } +}; + +hdf5.Heap = class { + + constructor(reader) { + this._reader = reader; + if (!reader.match('HEAP')) { + throw new hdf5.Error("Not a valid 'HEAP' block."); + } + const version = reader.byte(); + switch (version) { + case 0: { + reader.skip(3); + this._dataSize = reader.length(); + this._offsetToHeadOfFreeList = reader.length(); + this._dataAddress = reader.offset(); + break; + } + default: { + throw new hdf5.Error('Unsupported Local Heap version \'' + version + '\'.'); + } + } + } + + getString(offset) { + const reader = this._reader.at(this._dataAddress + offset); + return reader.string(-1, 'utf-8'); + } +}; + +hdf5.GlobalHeap = class { + + constructor(reader) { + this._reader = reader; + this._collections = new Map(); + } + + get(globalHeapID) { + const address = globalHeapID.address; + if (!this._collections.has(address)) { + this._collections.set(address, new hdf5.GlobalHeapCollection(this._reader.at(address))); + } + return this._collections.get(globalHeapID.address).getObject(globalHeapID.objectIndex); + } +}; + +hdf5.GlobalHeapCollection = class { + + constructor(reader) { + const startPosition = reader.position; + if (!reader.match('GCOL')) { + throw new hdf5.Error("Not a valid 'GCOL' block."); + } + const version = reader.byte(); + switch (version) { + case 1: { + reader.skip(3); + this._objects = new Map(); + const size = reader.length(); + const endPosition = startPosition + size; + while (reader.position < endPosition) { + const index = reader.uint16(); + if (index == 0) { + break; + } + this._objects.set(index, new hdf5.GlobalHeapObject(reader)); + reader.align(8); + } + break; + } + default: { + throw new hdf5.Error('Unsupported global heap collection version \'' + version + '\'.'); + } + } + } + + getObject(objectIndex) { + if (this._objects.has(objectIndex)) { + return this._objects.get(objectIndex); + } + return null; + } +}; + +hdf5.GlobalHeapObject = class { + + constructor(reader) { + reader.uint16(); + reader.skip(4); + this.data = reader.bytes(reader.length()); + } +}; + +hdf5.GlobalHeapID = class { + + constructor(reader) { + this.address = reader.offset(); + this.objectIndex = reader.uint32(); + } +}; + +hdf5.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'HDF5 Error'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.File = hdf5.File; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/index.html b/frontend/packages/core/public/netron/index.html new file mode 100644 index 00000000..a3d08c4f --- /dev/null +++ b/frontend/packages/core/public/netron/index.html @@ -0,0 +1,73 @@ + + + + + + + Netron + + + + +
    + +
    + + + + + + + + + + + + + + + + + + diff --git a/frontend/packages/core/public/netron/index.js b/frontend/packages/core/public/netron/index.js new file mode 100644 index 00000000..11f0dfee --- /dev/null +++ b/frontend/packages/core/public/netron/index.js @@ -0,0 +1,468 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ +/* eslint "no-global-assign": ["error", {"exceptions": [ "TextDecoder", "TextEncoder", "URLSearchParams" ] } ] */ +/* global view */ + +var host = {}; + +host.BrowserHost = class { + constructor() { + window.eval = () => { + throw new Error('window.eval() not supported.'); + }; + this._document = window.document; + this._meta = {}; + for (const element of Array.from(this._document.getElementsByTagName('meta'))) { + if (element.content) { + this._meta[element.name] = this._meta[element.name] || []; + this._meta[element.name].push(element.content); + } + } + this._type = this._meta.type ? this._meta.type[0] : 'Browser'; + this._version = this._meta.version ? this._meta.version[0] : null; + } + + get document() { + return this._document; + } + + get version() { + return this._version; + } + + get type() { + return this._type; + } + + initialize(view) { + this._view = view; + return Promise.resolve(); + } + + start() { + window.addEventListener( + 'message', + event => { + const originalData = event.data; + if (originalData) { + const type = originalData.type; + const data = originalData.data; + switch (type) { + case 'change-files': + return this._changeFiles(data); + case 'zoom-in': + return this._view.zoomIn(); + case 'zoom-out': + return this._view.zoomOut(); + case 'zoom-reset': + return this._view.resetZoom(); + case 'toggle-attributes': + return this._view.toggleAttributes(data); + case 'toggle-initializers': + return this._view.toggleInitializers(data); + case 'toggle-names': + return this._view.toggleNames(data); + case 'export': + return this._view.export(`${document.title}.${data}`); + case 'search': + return this._view.find(data); + case 'select': + return this._view.select(data); + case 'show-model-properties': + return this._view.showModelProperties(); + case 'show-node-documentation': + return this._view.showNodeDocumentation(data); + } + } + }, + false + ); + + this.status('ready'); + } + + message(type, data) { + if (window.parent) { + window.parent.postMessage({type: type, data: data}); + } + } + + status(status) { + this.message('status', status); + } + + error(message, detail) { + alert((message == 'Error' ? '' : message + ' ') + detail); + } + + confirm(message, detail) { + return confirm(message + ' ' + detail); + } + + require(id) { + const url = this._url(id + '.js'); + window.__modules__ = window.__modules__ || {}; + if (window.__modules__[url]) { + return Promise.resolve(window.__exports__[url]); + } + return new Promise((resolve, reject) => { + window.module = {exports: {}}; + let script = document.createElement('script'); + script.setAttribute('id', id); + script.setAttribute('type', 'text/javascript'); + script.setAttribute('src', url); + script.onload = () => { + const exports = window.module.exports; + delete window.module; + window.__modules__[id] = exports; + resolve(exports); + }; + script.onerror = e => { + delete window.module; + reject(new Error("The script '" + e.target.src + "' failed to load.")); + }; + this.document.head.appendChild(script); + }); + } + + save(name, extension, defaultPath, callback) { + callback(defaultPath + '.' + extension); + } + + export(file, blob) { + let element = this.document.createElement('a'); + element.download = file; + element.href = URL.createObjectURL(blob); + this.document.body.appendChild(element); + element.click(); + this.document.body.removeChild(element); + } + + request(base, file, encoding) { + const url = base ? base + '/' + file : this._url(file); + return this._request(url, null, encoding); + } + + openURL(url) { + window.open(url, '_target'); + } + + _changeFiles(files) { + if (files && files.length) { + files = Array.from(files); + this._open( + files.find(file => this._view.accept(file.name)), + files + ); + } + } + + _request(url, headers, encoding, timeout) { + return new Promise((resolve, reject) => { + const request = new XMLHttpRequest(); + if (!encoding) { + request.responseType = 'arraybuffer'; + } + if (timeout) { + request.timeout = timeout; + } + const error = status => { + const err = new Error('The web request failed with status code ' + status + " at '" + url + "'."); + err.type = 'error'; + err.url = url; + return err; + }; + request.onload = () => { + if (request.status == 200) { + if (request.responseType == 'arraybuffer') { + resolve(new Uint8Array(request.response)); + } else { + resolve(request.responseText); + } + } else { + reject(error(request.status)); + } + }; + request.onerror = e => { + const err = error(request.status); + err.type = e.type; + reject(err); + }; + request.ontimeout = () => { + request.abort(); + const err = new Error("The web request timed out in '" + url + "'."); + err.type = 'timeout'; + err.url = url; + reject(err); + }; + request.open('GET', url, true); + if (headers) { + for (const name of Object.keys(headers)) { + request.setRequestHeader(name, headers[name]); + } + } + request.send(); + }); + } + + _url(file) { + let url = file; + if (window && window.location && window.location.href) { + let location = window.location.href.split('?').shift(); + if (location.endsWith('.html')) { + location = location.split('/').slice(0, -1).join('/'); + } + if (location.endsWith('/')) { + location = location.slice(0, -1); + } + url = location + '/' + file; + } + return url; + } + + _open(file, files) { + this.status('loading'); + const context = new BrowserFileContext(file, files); + context + .open() + .then(() => { + return this._view.open(context).then(model => { + if (this._view.actived) { + this.status('rendered'); + } + this.document.title = files[0].name; + return model; + }); + }) + .catch(error => { + this.error(error.name, error.message); + }); + } +}; + +if (typeof TextDecoder === 'undefined') { + TextDecoder = function TextDecoder(encoding) { + this._encoding = encoding; + }; + TextDecoder.prototype.decode = function decode(buffer) { + let result = ''; + const length = buffer.length; + let i = 0; + switch (this._encoding) { + case 'utf-8': + while (i < length) { + const c = buffer[i++]; + switch (c >> 4) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: { + result += String.fromCharCode(c); + break; + } + case 12: + case 13: { + const c2 = buffer[i++]; + result += String.fromCharCode(((c & 0x1f) << 6) | (c2 & 0x3f)); + break; + } + case 14: { + const c2 = buffer[i++]; + const c3 = buffer[i++]; + result += String.fromCharCode(((c & 0x0f) << 12) | ((c2 & 0x3f) << 6) | ((c3 & 0x3f) << 0)); + break; + } + } + } + break; + case 'ascii': + while (i < length) { + result += String.fromCharCode(buffer[i++]); + } + break; + } + return result; + }; +} + +if (typeof TextEncoder === 'undefined') { + // eslint-disable-next-line @typescript-eslint/no-empty-function + TextEncoder = function TextEncoder() {}; + TextEncoder.prototype.encode = function encode(str) { + 'use strict'; + const length = str.length; + let resPos = -1; + const resArr = typeof Uint8Array === 'undefined' ? new Array(length * 2) : new Uint8Array(length * 3); + for (let point = 0, nextcode = 0, i = 0; i !== length; ) { + point = str.charCodeAt(i); + i += 1; + if (point >= 0xd800 && point <= 0xdbff) { + if (i === length) { + resArr[(resPos += 1)] = 0xef; + resArr[(resPos += 1)] = 0xbf; + resArr[(resPos += 1)] = 0xbd; + break; + } + nextcode = str.charCodeAt(i); + if (nextcode >= 0xdc00 && nextcode <= 0xdfff) { + point = (point - 0xd800) * 0x400 + nextcode - 0xdc00 + 0x10000; + i += 1; + if (point > 0xffff) { + resArr[(resPos += 1)] = (0x1e << 3) | (point >>> 18); + resArr[(resPos += 1)] = (0x2 << 6) | ((point >>> 12) & 0x3f); + resArr[(resPos += 1)] = (0x2 << 6) | ((point >>> 6) & 0x3f); + resArr[(resPos += 1)] = (0x2 << 6) | (point & 0x3f); + continue; + } + } else { + resArr[(resPos += 1)] = 0xef; + resArr[(resPos += 1)] = 0xbf; + resArr[(resPos += 1)] = 0xbd; + continue; + } + } + if (point <= 0x007f) { + resArr[(resPos += 1)] = (0x0 << 7) | point; + } else if (point <= 0x07ff) { + resArr[(resPos += 1)] = (0x6 << 5) | (point >>> 6); + resArr[(resPos += 1)] = (0x2 << 6) | (point & 0x3f); + } else { + resArr[(resPos += 1)] = (0xe << 4) | (point >>> 12); + resArr[(resPos += 1)] = (0x2 << 6) | ((point >>> 6) & 0x3f); + resArr[(resPos += 1)] = (0x2 << 6) | (point & 0x3f); + } + } + if (typeof Uint8Array !== 'undefined') { + return new Uint8Array(resArr.buffer.slice(0, resPos + 1)); + } else { + return resArr.length === resPos + 1 ? resArr : resArr.slice(0, resPos + 1); + } + }; + TextEncoder.prototype.toString = function () { + return '[object TextEncoder]'; + }; + try { + Object.defineProperty(TextEncoder.prototype, 'encoding', { + get: function () { + if (Object.prototype.isPrototypeOf.call(TextEncoder.prototype, this)) { + return 'utf-8'; + } else { + throw TypeError('Illegal invocation'); + } + } + }); + } catch (e) { + TextEncoder.prototype.encoding = 'utf-8'; + } + if (typeof Symbol !== 'undefined') { + TextEncoder.prototype[Symbol.toStringTag] = 'TextEncoder'; + } +} + +if (typeof URLSearchParams === 'undefined') { + URLSearchParams = function URLSearchParams(search) { + const decode = str => { + return str.replace(/[ +]/g, '%20').replace(/(%[a-f0-9]{2})+/gi, match => { + return decodeURIComponent(match); + }); + }; + this._dict = {}; + if (typeof search === 'string') { + search = search.indexOf('?') === 0 ? search.substring(1) : search; + const properties = search.split('&'); + for (const property of properties) { + const index = property.indexOf('='); + const name = index > -1 ? decode(property.substring(0, index)) : decode(property); + const value = index > -1 ? decode(property.substring(index + 1)) : ''; + if (!Object.prototype.hasOwnProperty.call(this._dict, name)) { + this._dict[name] = []; + } + this._dict[name].push(value); + } + } + }; + URLSearchParams.prototype.get = function (name) { + return Object.prototype.hasOwnProperty.call(this._dict, name) ? this._dict[name][0] : null; + }; +} + +if (!HTMLCanvasElement.prototype.toBlob) { + HTMLCanvasElement.prototype.toBlob = function (callback, type, quality) { + setTimeout(() => { + const data = atob(this.toDataURL(type, quality).split(',')[1]); + const length = data.length; + const buffer = new Uint8Array(length); + for (let i = 0; i < length; i++) { + buffer[i] = data.charCodeAt(i); + } + callback(new Blob([buffer], {type: type || 'image/png'})); + }, 0); + }; +} + +class BrowserFileContext { + constructor(file, blobs) { + this._file = file; + this._blobs = {}; + for (const blob of blobs) { + this._blobs[blob.name] = blob; + } + } + + get identifier() { + return this._file.name; + } + + get buffer() { + return this._buffer; + } + + open() { + return this.request(this._file.name, null).then(data => { + this._buffer = data; + }); + } + + request(file, encoding) { + const blob = this._blobs[file]; + if (!blob) { + return Promise.reject(new Error("File not found '" + file + "'.")); + } + return new Promise((resolve, reject) => { + let reader = new FileReader(); + reader.onload = e => { + resolve(encoding ? e.target.result : new Uint8Array(e.target.result)); + }; + reader.onerror = e => { + e = e || window.event; + let message = ''; + switch (e.target.error.code) { + case e.target.error.NOT_FOUND_ERR: + message = "File not found '" + file + "'."; + break; + case e.target.error.NOT_READABLE_ERR: + message = "File not readable '" + file + "'."; + break; + case e.target.error.SECURITY_ERR: + message = "File access denied '" + file + "'."; + break; + default: + message = "File read '" + e.target.error.code.toString() + "' error '" + file + "'."; + break; + } + reject(new Error(message)); + }; + if (encoding === 'utf-8') { + reader.readAsText(blob, encoding); + } else { + reader.readAsArrayBuffer(blob); + } + }); + } +} + +window.__view__ = new view.View(new host.BrowserHost()); diff --git a/frontend/packages/core/public/netron/keras-metadata.json b/frontend/packages/core/public/netron/keras-metadata.json new file mode 100644 index 00000000..da8c6552 --- /dev/null +++ b/frontend/packages/core/public/netron/keras-metadata.json @@ -0,0 +1,3794 @@ +[ + { + "name": "Bidirectional", + "schema": { + "attributes": [ + { + "default": "concat", + "description": "Mode by which outputs of the\n forward and backward RNNs will be combined.\n One of {'sum', 'mul', 'concat', 'ave', None}.\n If None, the outputs will not be combined,\n they will be returned as a list.", + "name": "merge_mode" + }, + { + "description": "`Recurrent` instance.", + "name": "layer" + }, + { + "description": "Initial weights to load in the Bidirectional model\n", + "name": "weights" + } + ], + "category": "Wrapper", + "description": "Bidirectional wrapper for RNNs.\n", + "examples": [ + { + "code": "model = Sequential()\nmodel.add(Bidirectional(LSTM(10, return_sequences=True),\n input_shape=(5, 10)))\nmodel.add(Bidirectional(LSTM(10)))\nmodel.add(Dense(5))\nmodel.add(Activation('softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop')" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "TimeDistributed", + "schema": { + "attributes": [ + { + "description": "a layer instance.\n", + "name": "layer" + } + ], + "category": "Wrapper", + "description": "This wrapper applies a layer to every temporal slice of an input.\n\nThe input should be at least 3D, and the dimension of index one\nwill be considered to be the temporal dimension.\n\nConsider a batch of 32 samples,\nwhere each sample is a sequence of 10 vectors of 16 dimensions.\nThe batch input shape of the layer is then `(32, 10, 16)`,\nand the `input_shape`, not including the samples dimension, is `(10, 16)`.\n\nYou can then use `TimeDistributed` to apply a `Dense` layer\nto each of the 10 timesteps, independently:\n\n```python\n# as the first layer in a model\nmodel = Sequential()\nmodel.add(TimeDistributed(Dense(8), input_shape=(10, 16)))\n# now model.output_shape == (None, 10, 8)\n```\n\nThe output will then have shape `(32, 10, 8)`.\n\nIn subsequent layers, there is no need for the `input_shape`:\n\n```python\nmodel.add(TimeDistributed(Dense(32)))\n# now model.output_shape == (None, 10, 32)\n```\n\nThe output will then have shape `(32, 10, 32)`.\n\n`TimeDistributed` can be used with arbitrary layers, not just `Dense`,\nfor instance with a `Conv2D` layer:\n\n```python\nmodel = Sequential()\nmodel.add(TimeDistributed(Conv2D(64, (3, 3)),\n input_shape=(10, 299, 299, 3)))\n```\n", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Activation", + "schema": { + "attributes": [ + { + "description": "name of activation function to use\n (see: [activations](https://keras.io/activations)),\n or alternatively, a Theano or TensorFlow operation.\n", + "name": "activation" + } + ], + "category": "Activation", + "description": "Applies an activation function to an output.\n", + "inputs": [ + { + "description": "\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\nSame shape as input.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "ReLU", + "schema": { + "attributes": [ + { + "description": "float >= 0. Maximum activation value.", + "name": "max_value" + }, + { + "description": "float >= 0. Negative slope coefficient.", + "name": "negative_slope" + }, + { + "description": "float. Threshold value for thresholded activation.\n", + "name": "threshold" + } + ], + "category": "Activation", + "description": "Rectified Linear Unit activation function.\n\nWith default values, it returns element-wise `max(x, 0)`.\n\nOtherwise, it follows:\n`f(x) = max_value` for `x >= max_value`,\n`f(x) = x` for `threshold <= x < max_value`,\n`f(x) = negative_slope * (x - threshold)` otherwise.\n", + "inputs": [ + { + "description": "\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\nSame shape as the input.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "LeakyReLU", + "schema": { + "attributes": [ + { + "description": "float >= 0. Negative slope coefficient.\n", + "name": "alpha" + } + ], + "category": "Activation", + "description": "Leaky version of a Rectified Linear Unit.\n\nIt allows a small gradient when the unit is not active:\n`f(x) = alpha * x for x < 0`,\n`f(x) = x for x >= 0`.\n", + "inputs": [ + { + "description": "\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\nSame shape as the input.\n", + "name": "output" + } + ], + "package": "keras.layers", + "references": [ + { + "description": "[Rectifier Nonlinearities Improve Neural Network Acoustic Models]( https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf)" + } + ] + } + }, + { + "name": "PReLU", + "schema": { + "attributes": [ + { + "description": "initializer function for the weights.", + "name": "alpha_initializer" + }, + { + "description": "regularizer for the weights.", + "name": "alpha_regularizer", + "visible": false + }, + { + "description": "constraint for the weights.", + "name": "alpha_constraint" + }, + { + "description": "the axes along which to share learnable\n parameters for the activation function.\n For example, if the incoming feature maps\n are from a 2D convolution\n with output shape `(batch, height, width, channels)`,\n and you wish to share parameters across space\n so that each filter only has one set of parameters,\n set `shared_axes=[1, 2]`.\n", + "name": "shared_axes" + } + ], + "category": "Activation", + "description": "Parametric Rectified Linear Unit.\n\nIt follows:\n`f(x) = alpha * x for x < 0`,\n`f(x) = x for x >= 0`,\nwhere `alpha` is a learned array with the same shape as x.\n", + "inputs": [ + { + "description": "\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n", + "name": "input" + }, + { + "name": "params" + } + ], + "outputs": [ + { + "description": "\nSame shape as the input.\n", + "name": "output" + } + ], + "package": "keras.layers", + "references": [ + { + "description": "[Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852)" + } + ] + } + }, + { + "name": "ELU", + "schema": { + "attributes": [ + { + "description": "scale for the negative factor.\n", + "name": "alpha" + } + ], + "category": "Activation", + "description": "Exponential Linear Unit.\n\nIt follows:\n`f(x) = alpha * (exp(x) - 1.) for x < 0`,\n`f(x) = x for x >= 0`.\n", + "inputs": [ + { + "description": "\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\nSame shape as the input.\n", + "name": "output" + } + ], + "package": "keras.layers", + "references": [ + { + "description": "[Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289v1)" + } + ] + } + }, + { + "name": "ThresholdedReLU", + "schema": { + "attributes": [ + { + "description": "float >= 0. Threshold location of activation.\n", + "name": "theta" + } + ], + "category": "Activation", + "description": "Thresholded Rectified Linear Unit.\n\nIt follows:\n`f(x) = x for x > theta`,\n`f(x) = 0 otherwise`.\n", + "inputs": [ + { + "description": "\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\nSame shape as the input.\n", + "name": "output" + } + ], + "package": "keras.layers", + "references": [ + { + "description": "[Zero-Bias Autoencoders and the Benefits of Co-Adapting Features]( https://arxiv.org/abs/1402.3337)" + } + ] + } + }, + { + "name": "MaxPooling1D", + "schema": { + "attributes": [ + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, steps, features)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n", + "name": "data_format" + }, + { + "default": "valid", + "description": "One of `\"valid\"` or `\"same\"` (case-insensitive).", + "name": "padding" + }, + { + "default": [ + 2, + 2 + ], + "description": "Integer, size of the max pooling windows.", + "name": "pool_size" + }, + { + "default": [ + 2, + 2 + ], + "description": "Integer, or None. Factor by which to downscale.\n E.g. 2 will halve the input.\n If None, it will default to `pool_size`.", + "name": "strides" + } + ], + "category": "Pool", + "description": "Max pooling operation for temporal data.\n", + "inputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, steps, features)`\n- If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, steps)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, downsampled_steps, features)`\n- If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, downsampled_steps)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "MaxPooling2D", + "schema": { + "attributes": [ + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.\n", + "name": "data_format" + }, + { + "default": "valid", + "description": "One of `\"valid\"` or `\"same\"` (case-insensitive).", + "name": "padding" + }, + { + "default": [ + 2, + 2 + ], + "description": "integer or tuple of 2 integers,\n factors by which to downscale (vertical, horizontal).\n (2, 2) will halve the input in both spatial dimension.\n If only one integer is specified, the same window length\n will be used for both dimensions.", + "name": "pool_size" + }, + { + "default": [ + 2, + 2 + ], + "description": "Integer, tuple of 2 integers, or None.\n Strides values.\n If None, it will default to `pool_size`.", + "name": "strides" + } + ], + "category": "Pool", + "description": "Max pooling operation for spatial data.\n", + "inputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 4D tensor with shape:\n `(batch_size, rows, cols, channels)`\n- If `data_format='channels_first'`:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 4D tensor with shape:\n `(batch_size, pooled_rows, pooled_cols, channels)`\n- If `data_format='channels_first'`:\n 4D tensor with shape:\n `(batch_size, channels, pooled_rows, pooled_cols)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "MaxPooling3D", + "schema": { + "attributes": [ + { + "description": "Integer or tuple of 3 integers,\n factors by which to downscale (dim1, dim2, dim3).\n (2, 2, 2) will halve the size of the 3D input in each dimension.", + "name": "pool_size" + }, + { + "description": "Integer, tuple of 3 integers, or None. Strides values.\n If None, it will default to `pool_size`.", + "name": "strides" + }, + { + "description": "One of `\"valid\"` or `\"same\"` (case-insensitive).", + "name": "padding" + }, + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.\n", + "name": "data_format" + } + ], + "category": "Pool", + "description": "Max pooling operation for 3D data (spatial or spatio-temporal).\n", + "inputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n- If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`\n- If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "UpSampling1D", + "schema": { + "attributes": [ + { + "default": "channels_last", + "name": "data_format" + }, + { + "description": "integer. Upsampling factor.\n", + "name": "size" + } + ], + "category": "Layer", + "description": "Upsampling layer for 1D inputs.\n\nRepeats each temporal step `size` times along the time axis.\n", + "inputs": [ + { + "description": "\n3D tensor with shape: `(batch, steps, features)`.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n3D tensor with shape: `(batch, upsampled_steps, features)`.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "UpSampling2D", + "schema": { + "attributes": [ + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".", + "name": "data_format" + }, + { + "description": "int, or tuple of 2 integers.\n The upsampling factors for rows and columns.", + "name": "size" + }, + { + "description": "A string, one of `nearest` or `bilinear`.\n Note that CNTK does not support yet the `bilinear` upscaling\n and that with Theano, only `size=(2, 2)` is possible.\n", + "name": "interpolation" + } + ], + "category": "Layer", + "description": "Upsampling layer for 2D inputs.\n\nRepeats the rows and columns of the data\nby size[0] and size[1] respectively.\n", + "inputs": [ + { + "description": "\n4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, rows, cols, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, channels, rows, cols)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, upsampled_rows, upsampled_cols, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, channels, upsampled_rows, upsampled_cols)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "UpSampling3D", + "schema": { + "attributes": [ + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n", + "name": "data_format" + }, + { + "description": "int, or tuple of 3 integers.\n The upsampling factors for dim1, dim2 and dim3.", + "name": "size" + } + ], + "category": "Layer", + "description": "Upsampling layer for 3D inputs.\n\nRepeats the 1st, 2nd and 3rd dimensions\nof the data by size[0], size[1] and size[2] respectively.\n", + "inputs": [ + { + "description": "\n5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, dim1, dim2, dim3, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, channels, dim1, dim2, dim3)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "ZeroPadding1D", + "schema": { + "attributes": [ + { + "description": "int, or tuple of int (length 2), or dictionary.\n - If int:\n\n How many zeros to add at the beginning and end of\n the padding dimension (axis 1).\n\n - If tuple of int (length 2):\n\n How many zeros to add at the beginning and at the end of\n the padding dimension (`(left_pad, right_pad)`).\n", + "name": "padding" + } + ], + "category": "Tensor", + "description": "Zero-padding layer for 1D input (e.g. temporal sequence).\n", + "inputs": [ + { + "description": "\n3D tensor with shape `(batch, axis_to_pad, features)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n3D tensor with shape `(batch, padded_axis, features)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "ZeroPadding2D", + "schema": { + "attributes": [ + { + "description": "int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\n - If int: the same symmetric padding\n is applied to height and width.\n - If tuple of 2 ints:\n interpreted as two different\n symmetric padding values for height and width:\n `(symmetric_height_pad, symmetric_width_pad)`.\n - If tuple of 2 tuples of 2 ints:\n interpreted as\n `((top_pad, bottom_pad), (left_pad, right_pad))`", + "name": "padding" + }, + { + "description": "A string,\n one of `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n", + "name": "data_format" + } + ], + "category": "Tensor", + "description": "Zero-padding layer for 2D input (e.g. picture).\n\nThis layer can add rows and columns of zeros\nat the top, bottom, left and right side of an image tensor.\n", + "inputs": [ + { + "description": "\n4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, rows, cols, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, channels, rows, cols)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, padded_rows, padded_cols, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, channels, padded_rows, padded_cols)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "ZeroPadding3D", + "schema": { + "attributes": [ + { + "description": "int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.\n - If int: the same symmetric padding\n is applied to height and width.\n - If tuple of 3 ints:\n interpreted as three different\n symmetric padding values for depth, height, and width:\n `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.\n - If tuple of 3 tuples of 2 ints:\n interpreted as\n `((left_dim1_pad, right_dim1_pad),\n (left_dim2_pad, right_dim2_pad),\n (left_dim3_pad, right_dim3_pad))`", + "name": "padding" + }, + { + "description": "A string,\n one of `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n", + "name": "data_format" + } + ], + "category": "Tensor", + "description": "Zero-padding layer for 3D data (spatial or spatio-temporal).\n", + "inputs": [ + { + "description": "\n5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad,\n depth)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, depth,\n first_axis_to_pad, second_axis_to_pad, third_axis_to_pad)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, first_padded_axis, second_padded_axis, third_axis_to_pad,\n depth)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, depth,\n first_padded_axis, second_padded_axis, third_axis_to_pad)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "GlobalMaxPooling1D", + "schema": { + "attributes": [ + { + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, steps, features)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n", + "name": "data_format" + } + ], + "category": "Pool", + "description": "Global max pooling operation for temporal data.\n", + "inputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, steps, features)`\n- If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, steps)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n2D tensor with shape:\n`(batch_size, features)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "GlobalMaxPooling2D", + "schema": { + "attributes": [ + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.\n", + "name": "data_format" + } + ], + "category": "Pool", + "description": "Global max pooling operation for spatial data.\n", + "inputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 4D tensor with shape:\n `(batch_size, rows, cols, channels)`\n- If `data_format='channels_first'`:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n2D tensor with shape:\n`(batch_size, channels)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "GlobalAveragePooling1D", + "schema": { + "attributes": [ + { + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, steps, features)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n", + "name": "data_format" + } + ], + "category": "Pool", + "description": "Global average pooling operation for temporal data.\n", + "inputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, steps, features)`\n- If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, steps)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n2D tensor with shape:\n`(batch_size, features)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "GlobalAveragePooling2D", + "schema": { + "attributes": [ + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.\n", + "name": "data_format" + } + ], + "category": "Pool", + "description": "Global average pooling operation for spatial data.\n", + "inputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 4D tensor with shape:\n `(batch_size, rows, cols, channels)`\n- If `data_format='channels_first'`:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n2D tensor with shape:\n`(batch_size, channels)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "AveragePooling1D", + "schema": { + "attributes": [ + { + "description": "Integer, size of the average pooling windows.", + "name": "pool_size" + }, + { + "description": "Integer, or None. Factor by which to downscale.\n E.g. 2 will halve the input.\n If None, it will default to `pool_size`.", + "name": "strides" + }, + { + "description": "One of `\"valid\"` or `\"same\"` (case-insensitive).", + "name": "padding" + }, + { + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, steps, features)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, features, steps)`.\n", + "name": "data_format" + } + ], + "category": "Pool", + "description": "Average pooling for temporal data.\n", + "inputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, steps, features)`\n- If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, steps)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, downsampled_steps, features)`\n- If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, downsampled_steps)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "AveragePooling2D", + "schema": { + "attributes": [ + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.\n", + "name": "data_format" + }, + { + "description": "Integer or tuple of 2 integers,\n factors by which to downscale (vertical, horizontal).\n (2, 2) will halve the input in both spatial dimension.\n If only one integer is specified, the same window length\n will be used for both dimensions.", + "name": "pool_size" + }, + { + "description": "Integer, tuple of 2 integers, or None.\n Strides values.\n If None, it will default to `pool_size`.", + "name": "strides" + }, + { + "description": "One of `\"valid\"` or `\"same\"` (case-insensitive).", + "name": "padding" + } + ], + "category": "Pool", + "description": "Average pooling operation for spatial data.\n", + "inputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 4D tensor with shape:\n `(batch_size, rows, cols, channels)`\n- If `data_format='channels_first'`:\n 4D tensor with shape:\n `(batch_size, channels, rows, cols)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 4D tensor with shape:\n `(batch_size, pooled_rows, pooled_cols, channels)`\n- If `data_format='channels_first'`:\n 4D tensor with shape:\n `(batch_size, channels, pooled_rows, pooled_cols)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "AveragePooling3D", + "schema": { + "attributes": [ + { + "description": "Integer or tuple of 3 integers,\n factors by which to downscale (dim1, dim2, dim3).\n (2, 2, 2) will halve the size of the 3D input in each dimension.", + "name": "pool_size" + }, + { + "description": "Integer, tuple of 3 integers, or None. Strides values.\n If None, it will default to `pool_size`.", + "name": "strides" + }, + { + "description": "One of `\"valid\"` or `\"same\"` (case-insensitive).", + "name": "padding" + }, + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.\n", + "name": "data_format" + } + ], + "description": "Average pooling operation for 3D data (spatial or spatio-temporal).\n", + "inputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n- If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n- If `data_format='channels_last'`:\n 5D tensor with shape:\n `(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`\n- If `data_format='channels_first'`:\n 5D tensor with shape:\n `(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "BatchNormalization", + "schema": { + "attributes": [ + { + "default": -1, + "description": "Integer, the axis that should be normalized\n (typically the features axis).\n For instance, after a `Conv2D` layer with\n `data_format=\"channels_first\"`,\n set `axis=1` in `BatchNormalization`.", + "name": "axis" + }, + { + "default": 0.001, + "description": "Small float added to variance to avoid dividing by zero.", + "name": "epsilon" + }, + { + "default": 0.99, + "description": "Momentum for the moving mean and the moving variance.", + "name": "momentum" + }, + { + "default": true, + "description": "If True, multiply by `gamma`.\n If False, `gamma` is not used.\n When the next layer is linear (also e.g. `nn.relu`),\n this can be disabled since the scaling\n will be done by the next layer.", + "name": "scale", + "type": "boolean" + }, + { + "default": true, + "description": "If True, add offset of `beta` to normalized tensor.\n If False, `beta` is ignored.", + "name": "center" + }, + { + "default": { + "class_name": "Ones", + "config": {} + }, + "description": "Initializer for the gamma weight.", + "name": "gamma_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the moving mean.", + "name": "moving_mean_initializer", + "visible": false + }, + { + "default": { + "class_name": "Ones", + "config": {} + }, + "description": "Initializer for the moving variance.", + "name": "moving_variance_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the beta weight.", + "name": "beta_initializer", + "visible": false + }, + { + "description": "Optional regularizer for the beta weight.", + "name": "beta_regularizer", + "visible": false + }, + { + "description": "Optional regularizer for the gamma weight.", + "name": "gamma_regularizer", + "visible": false + }, + { + "description": "Optional constraint for the beta weight.", + "name": "beta_constraint" + }, + { + "description": "Optional constraint for the gamma weight.\n", + "name": "gamma_constraint" + } + ], + "category": "Normalization", + "description": "Batch normalization layer (Ioffe and Szegedy, 2014).\n\nNormalize the activations of the previous layer at each batch,\ni.e. applies a transformation that maintains the mean activation\nclose to 0 and the activation standard deviation close to 1.\n", + "inputs": [ + { + "description": "\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n", + "name": "input" + }, + { + "name": "gamma" + }, + { + "name": "beta" + }, + { + "name": "moving_mean" + }, + { + "name": "moving_variance" + } + ], + "outputs": [ + { + "description": "\nSame shape as input.\n", + "name": "output" + } + ], + "package": "keras.layers", + "references": [ + { + "description": "[Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167)" + } + ] + } + }, + { + "name": "BatchNorm", + "schema": { + "attributes": [ + { + "default": -1, + "name": "axis" + }, + { + "default": 0.001, + "name": "epsilon" + }, + { + "default": 0.99, + "name": "momentum" + }, + { + "default": true, + "name": "scale" + }, + { + "default": true, + "name": "center" + }, + { + "default": { + "class_name": "Ones", + "config": {} + }, + "name": "gamma_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "name": "moving_mean_initializer", + "visible": false + }, + { + "default": { + "class_name": "Ones", + "config": {} + }, + "name": "moving_variance_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "name": "beta_initializer", + "visible": false + }, + { + "name": "beta_regularizer", + "visible": false + }, + { + "name": "gamma_regularizer", + "visible": false + }, + { + "name": "beta_constraint" + }, + { + "name": "gamma_constraint" + } + ], + "category": "Normalization", + "inputs": [ + { + "name": "input" + }, + { + "name": "gamma" + }, + { + "name": "beta" + }, + { + "name": "running_mean" + }, + { + "name": "running_std" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "ActivityRegularization", + "schema": { + "attributes": [ + { + "description": "L1 regularization factor (positive float).", + "name": "l1" + }, + { + "description": "L2 regularization factor (positive float).\n", + "name": "l2" + } + ], + "description": "Layer that applies an update to the cost function based input activity.\n", + "inputs": [ + { + "description": "\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\nSame shape as input.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Masking", + "schema": { + "attributes": [ + { + "description": "Either None or mask value to skip\n", + "name": "mask_value" + } + ], + "description": "Masks a sequence by using a mask value to skip timesteps.\n\nIf all features for a given sample timestep are equal to `mask_value`,\nthen the sample timestep will be masked (skipped) in all downstream layers\n(as long as they support masking).\n\nIf any downstream layer does not support masking yet receives such\nan input mask, an exception will be raised.\n", + "examples": [ + { + "code": "model = Sequential()\nmodel.add(Masking(mask_value=0., input_shape=(timesteps, features)))\nmodel.add(LSTM(32))", + "summary": "Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,\nto be fed to an LSTM layer.\nYou want to mask sample #0 at timestep #3, and sample #2 at timestep #5,\nbecause you lack features for these sample timesteps. You can do:\n- set `x[0, 3, :] = 0.` and `x[2, 5, :] = 0.`\n- insert a `Masking` layer with `mask_value=0.` before the LSTM layer:" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Dense", + "schema": { + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "default": "linear", + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "type": "boolean" + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the `kernel` weights matrix\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).\n", + "name": "bias_constraint" + } + ], + "category": "Layer", + "description": "Just your regular densely-connected NN layer.\n\n`Dense` implements the operation:\n`output = activation(dot(input, kernel) + bias)`\nwhere `activation` is the element-wise activation function\npassed as the `activation` argument, `kernel` is a weights matrix\ncreated by the layer, and `bias` is a bias vector created by the layer\n(only applicable if `use_bias` is `True`).\n\nNote: if the input to the layer has a rank greater than 2, then\nit is flattened prior to the initial dot product with `kernel`.\n", + "examples": [ + { + "code": "# as first layer in a sequential model:\nmodel = Sequential()\nmodel.add(Dense(32, input_shape=(16,)))\n# now the model will take as input arrays of shape (*, 16)\n# and output arrays of shape (*, 32)\n\n# after the first layer, you don't need to specify\n# the size of the input anymore:\nmodel.add(Dense(32))" + } + ], + "inputs": [ + { + "description": "\nnD tensor with shape: `(batch_size, ..., input_dim)`.\nThe most common situation would be\na 2D input with shape `(batch_size, input_dim)`.\n", + "name": "input", + "type": "T" + }, + { + "name": "kernel", + "type": "T" + }, + { + "name": "bias", + "type": "T" + } + ], + "outputs": [ + { + "description": "\nnD tensor with shape: `(batch_size, ..., units)`.\nFor instance, for a 2D input with shape `(batch_size, input_dim)`,\nthe output would have shape `(batch_size, units)`.\n", + "name": "output", + "type": "T" + } + ], + "package": "keras.layers" + } + }, + { + "name": "LocallyConnected1D", + "schema": { + "attributes": [ + { + "description": "Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).", + "name": "filters" + }, + { + "description": "An integer or tuple/list of a single integer,\n specifying the length of the 1D convolution window.", + "name": "kernel_size" + }, + { + "description": "An integer or tuple/list of a single integer,\n specifying the stride length of the convolution.\n Specifying any `strides!=1` is incompatible with specifying\n any `dilation_rate!=1`.", + "name": "strides" + }, + { + "description": "Currently only supports `\"valid\"` (case-insensitive).\n `\"same\"` may be supported in the future.", + "name": "padding" + }, + { + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias" + }, + { + "description": "Initializer for the `kernel` weights matrix\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the kernel matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).\n", + "name": "bias_constraint" + }, + { + "default": "channels_last", + "description": "String, one of `channels_first`, `channels_last`.", + "name": "data_format" + } + ], + "category": "Layer", + "description": "Locally-connected layer for 1D inputs.\n\nThe `LocallyConnected1D` layer works similarly to\nthe `Conv1D` layer, except that weights are unshared,\nthat is, a different set of filters is applied at each different patch\nof the input.\n", + "examples": [ + { + "code": "# apply a unshared weight convolution 1d of length 3 to a sequence with\n# 10 timesteps, with 64 output filters\nmodel = Sequential()\nmodel.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))\n# now model.output_shape == (None, 8, 64)\n# add a new conv1d on top\nmodel.add(LocallyConnected1D(32, 3))\n# now model.output_shape == (None, 6, 32)" + } + ], + "inputs": [ + { + "description": "\n3D tensor with shape: `(batch_size, steps, input_dim)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n3D tensor with shape: `(batch_size, new_steps, filters)`\n`steps` value might have changed due to padding or strides.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "LocallyConnected2D", + "schema": { + "attributes": [ + { + "description": "Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).", + "name": "filters" + }, + { + "description": "An integer or tuple/list of 2 integers, specifying the\n width and height of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.", + "name": "kernel_size" + }, + { + "description": "An integer or tuple/list of 2 integers,\n specifying the strides of the convolution along the width and height.\n Can be a single integer to specify the same value for\n all spatial dimensions.", + "name": "strides" + }, + { + "description": "Currently only support `\"valid\"` (case-insensitive).\n `\"same\"` will be supported in future.", + "name": "padding" + }, + { + "default": "channels_last", + "description": "A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".", + "name": "data_format" + }, + { + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the `kernel` weights matrix\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer" + }, + { + "description": "Constraint function applied to the kernel matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).\n", + "name": "bias_constraint" + } + ], + "category": "Layer", + "description": "Locally-connected layer for 2D inputs.\n\nThe `LocallyConnected2D` layer works similarly\nto the `Conv2D` layer, except that weights are unshared,\nthat is, a different set of filters is applied at each\ndifferent patch of the input.\n", + "examples": [ + { + "code": "# apply a 3x3 unshared weights convolution with 64 output filters\n# on a 32x32 image with `data_format=\"channels_last\"`:\nmodel = Sequential()\nmodel.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))\n# now model.output_shape == (None, 30, 30, 64)\n# notice that this layer will consume (30*30)*(3*3*3*64)\n# + (30*30)*64 parameters\n\n# add a 3x3 unshared weights convolution on top, with 32 output filters:\nmodel.add(LocallyConnected2D(32, (3, 3)))\n# now model.output_shape == (None, 28, 28, 32)" + } + ], + "inputs": [ + { + "description": "\n4D tensor with shape:\n`(samples, channels, rows, cols)` if `data_format='channels_first'`\nor 4D tensor with shape:\n`(samples, rows, cols, channels)` if `data_format='channels_last'`.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n4D tensor with shape:\n`(samples, filters, new_rows, new_cols)` if data_format='channels_first'\nor 4D tensor with shape:\n`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.\n`rows` and `cols` values might have changed due to padding.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "LSTM", + "schema": { + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "default": "tanh", + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": "hard_sigmoid", + "description": "Activation function to use\n for the recurrent step\n (see [activations](https://keras.io/activations)).\n Default: sigmoid (`sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "recurrent_activation" + }, + { + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](https://keras.io/initializers)).", + "name": "recurrent_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "default": true, + "description": "Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al. (2015)](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).", + "name": "unit_forget_bias" + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint", + "visible": false + }, + { + "description": "Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "recurrent_constraint", + "visible": false + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).", + "name": "bias_constraint", + "visible": false + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.", + "name": "dropout" + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.", + "name": "recurrent_dropout" + }, + { + "default": 1, + "description": "Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.", + "name": "implementation" + }, + { + "default": false, + "description": "Boolean. Whether to return the last output\n in the output sequence, or the full sequence.", + "name": "return_sequences" + }, + { + "default": false, + "description": "Boolean. Whether to return the last state\n in addition to the output. The returned elements of the\n states list are the hidden state and the cell state, respectively.", + "name": "return_state" + }, + { + "default": false, + "description": "Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.", + "name": "go_backwards" + }, + { + "default": false, + "description": "Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.", + "name": "stateful" + }, + { + "default": false, + "description": "Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n", + "name": "unroll" + } + ], + "category": "Layer", + "description": "Long Short-Term Memory layer - Hochreiter 1997.\n", + "inputs": [ + { + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "recurrent_kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers", + "references": [ + { + "description": "[Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf)" + }, + { + "description": "[Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)" + }, + { + "description": "[Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)" + }, + { + "description": "[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/abs/1512.05287)" + } + ] + } + }, + { + "name": "GRU", + "schema": { + "attributes": [ + { + "default": "tanh", + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": "hard_sigmoid", + "description": "Activation function to use\n for the recurrent step\n (see [activations](https://keras.io/activations)).\n Default: sigmoid (`sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "recurrent_activation" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "default": { + "class_name": "Orthogonal", + "config": { + "gain": 1, + "seed": null + } + }, + "description": "Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](https://keras.io/initializers)).", + "name": "recurrent_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.", + "name": "dropout" + }, + { + "default": 1, + "description": "Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.", + "name": "implementation" + }, + { + "default": false, + "description": "Boolean. Whether to return the last output\n in the output sequence, or the full sequence.", + "name": "return_sequences" + }, + { + "default": false, + "description": "Boolean. Whether to return the last state\n in addition to the output.", + "name": "return_state" + }, + { + "default": false, + "description": "Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.", + "name": "go_backwards" + }, + { + "default": false, + "description": "Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.", + "name": "stateful" + }, + { + "default": false, + "description": "Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.", + "name": "unroll" + }, + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).", + "name": "bias_constraint" + }, + { + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.", + "name": "recurrent_dropout" + }, + { + "description": "hard sigmoid (`hard_sigmoid`).\nIf you pass `None`, no activation is applied\n(ie. \"linear\" activation: `a(x) = x`).", + "name": "Default" + }, + { + "description": "GRU convention (whether to apply reset gate after or\n before matrix multiplication). False = \"before\" (default),\n True = \"after\" (CuDNN compatible).\n", + "name": "reset_after" + } + ], + "category": "Layer", + "description": "Gated Recurrent Unit - Cho et al. 2014.\n\nThere are two variants. The default one is based on 1406.1078v3 and\nhas reset gate applied to hidden state before matrix multiplication. The\nother one is based on original 1406.1078v1 and has the order reversed.\n\nThe second variant is compatible with CuDNNGRU (GPU-only) and allows\ninference on CPU. Thus it has separate biases for `kernel` and\n`recurrent_kernel`. Use `'reset_after'=True` and\n`recurrent_activation='sigmoid'`.\n", + "inputs": [ + { + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "recurrent_kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers", + "references": [ + { + "description": "[Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation](https://arxiv.org/abs/1406.1078)" + }, + { + "description": "[On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)" + }, + { + "description": "[Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](https://arxiv.org/abs/1412.3555v1)" + }, + { + "description": "[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/abs/1512.05287)" + } + ] + } + }, + { + "name": "ConvLSTM2D", + "schema": { + "attributes": [ + { + "description": "Integer, the dimensionality of the output space\n (i.e. the number output of filters in the convolution).", + "name": "filters" + }, + { + "description": "An integer or tuple/list of n integers, specifying the\n dimensions of the convolution window.", + "name": "kernel_size" + }, + { + "description": "An integer or tuple/list of n integers,\n specifying the strides of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.", + "name": "strides" + }, + { + "description": "One of `\"valid\"` or `\"same\"` (case-insensitive).", + "name": "padding" + }, + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, time, ..., channels)`\n while `\"channels_first\"` corresponds to\n inputs with shape `(batch, time, channels, ...)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.", + "name": "data_format" + }, + { + "description": "An integer or tuple/list of n integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.", + "name": "dilation_rate" + }, + { + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n tanh is applied by default.", + "name": "activation" + }, + { + "description": "Activation function to use\n for the recurrent step\n (see [activations](https://keras.io/activations)).", + "name": "recurrent_activation" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](https://keras.io/initializers)).", + "name": "recurrent_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Use in combination with `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al. (2015)](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).", + "name": "unit_forget_bias" + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint", + "visible": false + }, + { + "description": "Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "recurrent_constraint", + "visible": false + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).", + "name": "bias_constraint", + "visible": false + }, + { + "description": "Boolean. Whether to return the last output\n in the output sequence, or the full sequence.", + "name": "return_sequences" + }, + { + "description": "Boolean (default False).\n If True, process the input sequence backwards.", + "name": "go_backwards" + }, + { + "description": "Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.", + "name": "stateful" + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.", + "name": "dropout" + }, + { + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n", + "name": "recurrent_dropout" + } + ], + "description": "Convolutional LSTM.\n\nIt is similar to an LSTM layer, but the input transformations\nand recurrent transformations are both convolutional.\n", + "inputs": [ + { + "description": "\n- if data_format='channels_first'\n 5D tensor with shape:\n `(samples, time, channels, rows, cols)`\n- if data_format='channels_last'\n 5D tensor with shape:\n `(samples, time, rows, cols, channels)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n- if `return_sequences`\n - if data_format='channels_first'\n 5D tensor with shape:\n `(samples, time, filters, output_row, output_col)`\n - if data_format='channels_last'\n 5D tensor with shape:\n `(samples, time, output_row, output_col, filters)`\n- else\n - if data_format='channels_first'\n 4D tensor with shape:\n `(samples, filters, output_row, output_col)`\n - if data_format='channels_last'\n 4D tensor with shape:\n `(samples, output_row, output_col, filters)`\n\n where o_row and o_col depend on the shape of the filter and\n the padding\n", + "name": "output" + } + ], + "package": "keras.layers", + "references": [ + { + "description": "[Convolutional LSTM Network: A Machine Learning Approach forPrecipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)The current implementation does not include the feedback loop on thecells output" + } + ] + } + }, + { + "name": "CuDNNGRU", + "schema": { + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](https://keras.io/initializers)).", + "name": "recurrent_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).", + "name": "bias_constraint" + }, + { + "description": "Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.", + "name": "return_sequences" + }, + { + "description": "Boolean. Whether to return the last state\n in addition to the output.", + "name": "return_state" + }, + { + "description": "Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n", + "name": "stateful" + } + ], + "description": "Fast GRU implementation backed by [CuDNN](https://developer.nvidia.com/cudnn).\n\nCan only be run on GPU, with the TensorFlow backend.\n", + "package": "keras.layers" + } + }, + { + "name": "CuDNNLSTM", + "schema": { + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer" + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](https://keras.io/initializers)).", + "name": "recurrent_initializer" + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer" + }, + { + "description": "Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al. (2015)](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).", + "name": "unit_forget_bias" + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer" + }, + { + "description": "Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "recurrent_regularizer" + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer" + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer" + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).", + "name": "bias_constraint" + }, + { + "description": "Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.", + "name": "return_sequences" + }, + { + "description": "Boolean. Whether to return the last state\n in addition to the output.", + "name": "return_state" + }, + { + "description": "Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n", + "name": "stateful" + } + ], + "description": "Fast LSTM implementation with [CuDNN](https://developer.nvidia.com/cudnn).\n\nCan only be run on GPU, with the TensorFlow backend.\n", + "package": "keras.layers" + } + }, + { + "name": "SimpleRNN", + "schema": { + "attributes": [ + { + "default": false, + "description": "Boolean. Whether to return the last output\n in the output sequence, or the full sequence.", + "name": "return_sequences" + }, + { + "default": false, + "description": "Boolean. Whether to return the last state\n in addition to the output.", + "name": "return_state" + }, + { + "default": false, + "description": "Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.", + "name": "go_backwards" + }, + { + "default": false, + "description": "Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.", + "name": "stateful" + }, + { + "default": false, + "description": "Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n", + "name": "unroll" + }, + { + "default": "tanh", + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "default": { + "class_name": "Orthogonal", + "config": { + "gain": 1, + "seed": null + } + }, + "description": "Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](https://keras.io/initializers)).", + "name": "recurrent_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.", + "name": "dropout" + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.", + "name": "recurrent_dropout" + }, + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).", + "name": "bias_constraint" + }, + { + "description": "hyperbolic tangent (`tanh`).\nIf you pass `None`, no activation is applied\n(ie. \"linear\" activation: `a(x) = x`).", + "name": "Default" + } + ], + "category": "Layer", + "description": "Fully-connected RNN where the output is to be fed back to input.\n", + "inputs": [ + { + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "recurrent_kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "RNN", + "schema": { + "attributes": [ + { + "default": false, + "description": "Boolean. Whether to return the last output\n in the output sequence, or the full sequence.", + "name": "return_sequences" + }, + { + "default": false, + "description": "Boolean. Whether to return the last state\n in addition to the output.", + "name": "return_state" + }, + { + "default": false, + "description": "Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.", + "name": "go_backwards" + }, + { + "default": false, + "description": "Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.", + "name": "stateful" + }, + { + "default": false, + "description": "Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.", + "name": "unroll" + }, + { + "description": "A RNN cell instance. A RNN cell is a class that has:\n - a `call(input_at_t, states_at_t)` method, returning\n `(output_at_t, states_at_t_plus_1)`. The call method of the\n cell can also take the optional argument `constants`, see\n section \"Note on passing external constants\" below.\n - a `state_size` attribute. This can be a single integer\n (single state) in which case it is\n the size of the recurrent state\n (which should be the same as the size of the cell output).\n This can also be a list/tuple of integers\n (one size per state).\n - a `output_size` attribute. This can be a single integer or a\n TensorShape, which represent the shape of the output. For\n backward compatible reason, if this attribute is not available\n for the cell, the value will be inferred by the first element\n of the `state_size`.\n\n It is also possible for `cell` to be a list of RNN cell instances,\n in which cases the cells get stacked one after the other in the RNN,\n implementing an efficient stacked RNN.\n", + "name": "cell" + }, + { + "description": "dimensionality of the input (integer).\n This argument (or alternatively,\n the keyword argument `input_shape`)\n is required when using this layer as the first layer in a model.", + "name": "input_dim" + }, + { + "description": "Length of input sequences, to be specified\n when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n Note that if the recurrent layer is not the first layer\n in your model, you would need to specify the input length\n at the level of the first layer\n (e.g. via the `input_shape` argument)\n", + "name": "input_length" + } + ], + "category": "Layer", + "description": "Base class for recurrent layers.\n\n**Masking**\n\nThis layer supports masking for input data with a variable number\nof timesteps. To introduce masks to your data,\nuse an [Embedding](embeddings.md) layer with the `mask_zero` parameter\nset to `True`.\n\n**Note on using statefulness in RNNs**\n\nYou can set RNN layers to be 'stateful', which means that the states\ncomputed for the samples in one batch will be reused as initial states\nfor the samples in the next batch. This assumes a one-to-one mapping\nbetween samples in different successive batches.\n\nTo enable statefulness:\n- specify `stateful=True` in the layer constructor.\n- specify a fixed batch size for your model, by passing\nif sequential model:\n`batch_input_shape=(...)` to the first layer in your model.\nelse for functional model with 1 or more Input layers:\n`batch_shape=(...)` to all the first layers in your model.\nThis is the expected shape of your inputs\n*including the batch size*.\nIt should be a tuple of integers, e.g. `(32, 10, 100)`.\n- specify `shuffle=False` when calling fit().\n\nTo reset the states of your model, call `.reset_states()` on either\na specific layer, or on your entire model.\n\n**Note on specifying the initial state of RNNs**\n\nYou can specify the initial state of RNN layers symbolically by\ncalling them with the keyword argument `initial_state`. The value of\n`initial_state` should be a tensor or list of tensors representing\nthe initial state of the RNN layer.\n\nYou can specify the initial state of RNN layers numerically by\ncalling `reset_states` with the keyword argument `states`. The value of\n`states` should be a numpy array or list of numpy arrays representing\nthe initial state of the RNN layer.\n\n**Note on passing external constants to RNNs**\n\nYou can pass \"external\" constants to the cell using the `constants`\nkeyword argument of `RNN.__call__` (as well as `RNN.call`) method. This\nrequires that the `cell.call` method accepts the same keyword argument\n`constants`. Such constants can be used to condition the cell\ntransformation on additional static inputs (not changing over time),\na.k.a. an attention mechanism.\n", + "examples": [ + { + "code": "# First, let's define a RNN Cell, as a layer subclass.\n\nclass MinimalRNNCell(keras.layers.Layer):\n\n def __init__(self, units, **kwargs):\n self.units = units\n self.state_size = units\n super(MinimalRNNCell, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.kernel = self.add_weight(shape=(input_shape[-1], self.units),\n initializer='uniform',\n name='kernel')\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units),\n initializer='uniform',\n name='recurrent_kernel')\n self.built = True\n\n def call(self, inputs, states):\n prev_output = states[0]\n h = K.dot(inputs, self.kernel)\n output = h + K.dot(prev_output, self.recurrent_kernel)\n return output, [output]\n\n# Let's use this cell in a RNN layer:\n\ncell = MinimalRNNCell(32)\nx = keras.Input((None, 5))\nlayer = RNN(cell)\ny = layer(x)\n\n# Here's how to use the cell to build a stacked RNN:\n\ncells = [MinimalRNNCell(32), MinimalRNNCell(64)]\nx = keras.Input((None, 5))\nlayer = RNN(cells)\ny = layer(x)" + } + ], + "inputs": [ + { + "description": "\n3D tensor with shape `(batch_size, timesteps, input_dim)`.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n- if `return_state`: a list of tensors. The first tensor is\n the output. The remaining tensors are the last states,\n each with shape `(batch_size, units)`. For example, the number of\n state tensors is 1 (for RNN and GRU) or 2 (for LSTM).\n- if `return_sequences`: 3D tensor with shape\n `(batch_size, timesteps, units)`.\n- else, 2D tensor with shape `(batch_size, units)`.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "SimpleRNNCell", + "schema": { + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](https://keras.io/initializers)).", + "name": "recurrent_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).", + "name": "bias_constraint" + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.", + "name": "dropout" + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n", + "name": "recurrent_dropout" + }, + { + "description": "hyperbolic tangent (`tanh`).\nIf you pass `None`, no activation is applied\n(ie. \"linear\" activation: `a(x) = x`).", + "name": "Default" + } + ], + "description": "Cell class for SimpleRNN.\n", + "package": "keras.layers" + } + }, + { + "name": "GRUCell", + "schema": { + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "description": "Activation function to use\n for the recurrent step\n (see [activations](https://keras.io/activations)).\n Default: sigmoid (`sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "recurrent_activation" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](https://keras.io/initializers)).", + "name": "recurrent_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "recurrent_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).", + "name": "bias_constraint" + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.", + "name": "dropout" + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.", + "name": "recurrent_dropout" + }, + { + "description": "Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.", + "name": "implementation" + }, + { + "description": "hard sigmoid (`hard_sigmoid`).\nIf you pass `None`, no activation is applied\n(ie. \"linear\" activation: `a(x) = x`).", + "name": "Default" + }, + { + "description": "GRU convention (whether to apply reset gate after or\n before matrix multiplication). False = \"before\" (default),\n True = \"after\" (CuDNN compatible).\n", + "name": "reset_after" + } + ], + "description": "Cell class for the GRU layer.\n", + "package": "keras.layers" + } + }, + { + "name": "LSTMCell", + "schema": { + "attributes": [ + { + "description": "Positive integer, dimensionality of the output space.", + "name": "units" + }, + { + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n Default: hyperbolic tangent (`tanh`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "description": "Activation function to use\n for the recurrent step\n (see [activations](https://keras.io/activations)).\n Default: sigmoid (`sigmoid`).\n If you pass `None`, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).x", + "name": "recurrent_activation" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias" + }, + { + "description": "Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer" + }, + { + "description": "Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state\n (see [initializers](https://keras.io/initializers)).", + "name": "recurrent_initializer" + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer" + }, + { + "description": "Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al. (2015)](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).", + "name": "unit_forget_bias" + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer" + }, + { + "description": "Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "recurrent_regularizer" + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer" + }, + { + "description": "Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "recurrent_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).", + "name": "bias_constraint" + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.", + "name": "dropout" + }, + { + "default": 0.0, + "description": "Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.", + "name": "recurrent_dropout" + }, + { + "description": "Implementation mode, either 1 or 2.\n Mode 1 will structure its operations as a larger number of\n smaller dot products and additions, whereas mode 2 will\n batch them into fewer, larger operations. These modes will\n have different performance profiles on different hardware and\n for different applications.\n", + "name": "implementation" + } + ], + "description": "Cell class for the LSTM layer.\n", + "package": "keras.layers" + } + }, + { + "name": "StackedRNNCells", + "schema": { + "attributes": [ + { + "description": "List of RNN cell instances.\n", + "name": "cells" + } + ], + "description": "Wrapper allowing a stack of RNN cells to behave as a single cell.\n\nUsed to implement efficient stacked RNNs.\n", + "examples": [ + { + "code": "cells = [\n keras.layers.LSTMCell(output_dim),\n keras.layers.LSTMCell(output_dim),\n keras.layers.LSTMCell(output_dim),\n]\n\ninputs = keras.Input((timesteps, input_dim))\nx = keras.layers.RNN(cells)(inputs)" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Conv1D", + "schema": { + "attributes": [ + { + "default": "linear", + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": "valid", + "description": "One of `\"valid\"`, `\"causal\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means \"no padding\".\n `\"same\"` results in padding the input such that\n the output has the same length as the original input.\n `\"causal\"` results in causal (dilated) convolutions,\n e.g. `output[t]` does not depend on `input[t + 1:]`.\n A zero padding is used such that\n the output has the same length as the original input.\n Useful when modeling temporal data where the model\n should not violate the temporal order. See\n [WaveNet: A Generative Model for Raw Audio, section 2.1](\n https://arxiv.org/abs/1609.03499).", + "name": "padding" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, steps, channels)`\n (default format for temporal data in Keras)\n while `\"channels_first\"` corresponds to inputs\n with shape `(batch, channels, steps)`.", + "name": "data_format" + }, + { + "default": [ + 1 + ], + "description": "An integer or tuple/list of a single integer,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.", + "name": "strides" + }, + { + "default": [ + 1 + ], + "description": "an integer or tuple/list of a single integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.", + "name": "dilation_rate" + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the `kernel` weights matrix\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).", + "name": "filters" + }, + { + "description": "An integer or tuple/list of a single integer,\n specifying the length of the 1D convolution window.", + "name": "kernel_size" + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the kernel matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).\n", + "name": "bias_constraint" + } + ], + "category": "Layer", + "description": "1D convolution layer (e.g. temporal convolution).\n\nThis layer creates a convolution kernel that is convolved\nwith the layer input over a single spatial (or temporal) dimension\nto produce a tensor of outputs.\nIf `use_bias` is True, a bias vector is created and added to the outputs.\nFinally, if `activation` is not `None`,\nit is applied to the outputs as well.\n\nWhen using this layer as the first layer in a model,\nprovide an `input_shape` argument (tuple of integers or `None`, does not\ninclude the batch axis), e.g. `input_shape=(10, 128)` for time series\nsequences of 10 time steps with 128 features per step in\n`data_format=\"channels_last\"`, or `(None, 128)` for variable-length\nsequences with 128 features per step.\n", + "inputs": [ + { + "description": "\n3D tensor with shape: `(batch, steps, channels)`\n", + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "description": "\n3D tensor with shape: `(batch, new_steps, filters)`\n`steps` value might have changed due to padding or strides.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Conv2D", + "schema": { + "attributes": [ + { + "default": "linear", + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": "valid", + "description": "one of `\"valid\"` or `\"same\"` (case-insensitive).\n Note that `\"same\"` is slightly inconsistent across backends with\n `strides` != 1, as described\n [here](https://github.com/keras-team/keras/pull/9473#issuecomment-372166860)", + "name": "padding" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".", + "name": "data_format" + }, + { + "default": [ + 1, + 1 + ], + "description": "An integer or tuple/list of 2 integers,\n specifying the strides of the convolution\n along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.", + "name": "strides" + }, + { + "default": [ + 1, + 1 + ], + "description": "an integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.", + "name": "dilation_rate" + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the `kernel` weights matrix\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).", + "name": "filters" + }, + { + "description": "An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.", + "name": "kernel_size" + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the kernel matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint", + "visible": false + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).\n", + "name": "bias_constraint", + "visible": false + } + ], + "category": "Layer", + "description": "2D convolution layer (e.g. spatial convolution over images).\n\nThis layer creates a convolution kernel that is convolved\nwith the layer input to produce a tensor of\noutputs. If `use_bias` is True,\na bias vector is created and added to the outputs. Finally, if\n`activation` is not `None`, it is applied to the outputs as well.\n\nWhen using this layer as the first layer in a model,\nprovide the keyword argument `input_shape`\n(tuple of integers, does not include the batch axis),\ne.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures\nin `data_format=\"channels_last\"`.\n", + "inputs": [ + { + "description": "\n4D tensor with shape:\n`(batch, channels, rows, cols)`\nif `data_format` is `\"channels_first\"`\nor 4D tensor with shape:\n`(batch, rows, cols, channels)`\nif `data_format` is `\"channels_last\"`.\n", + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "description": "\n4D tensor with shape:\n`(batch, filters, new_rows, new_cols)`\nif `data_format` is `\"channels_first\"`\nor 4D tensor with shape:\n`(batch, new_rows, new_cols, filters)`\nif `data_format` is `\"channels_last\"`.\n`rows` and `cols` values might have changed due to padding.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Conv3D", + "schema": { + "attributes": [ + { + "description": "Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).", + "name": "filters" + }, + { + "description": "An integer or tuple/list of 3 integers, specifying the\n depth, height and width of the 3D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.", + "name": "kernel_size" + }, + { + "description": "An integer or tuple/list of 3 integers,\n specifying the strides of the convolution along each spatial dimension.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.", + "name": "strides" + }, + { + "description": "one of `\"valid\"` or `\"same\"` (case-insensitive).", + "name": "padding" + }, + { + "description": "A string,\n one of `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".", + "name": "data_format" + }, + { + "description": "an integer or tuple/list of 3 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.", + "name": "dilation_rate" + }, + { + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "description": "Initializer for the `kernel` weights matrix\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the kernel matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).\n", + "name": "bias_constraint" + } + ], + "category": "Layer", + "description": "3D convolution layer (e.g. spatial convolution over volumes).\n\nThis layer creates a convolution kernel that is convolved\nwith the layer input to produce a tensor of\noutputs. If `use_bias` is True,\na bias vector is created and added to the outputs. Finally, if\n`activation` is not `None`, it is applied to the outputs as well.\n\nWhen using this layer as the first layer in a model,\nprovide the keyword argument `input_shape`\n(tuple of integers, does not include the batch axis),\ne.g. `input_shape=(128, 128, 128, 1)` for 128x128x128 volumes\nwith a single channel,\nin `data_format=\"channels_last\"`.\n", + "inputs": [ + { + "description": "\n5D tensor with shape:\n`(batch, channels, conv_dim1, conv_dim2, conv_dim3)`\nif `data_format` is `\"channels_first\"`\nor 5D tensor with shape:\n`(batch, conv_dim1, conv_dim2, conv_dim3, channels)`\nif `data_format` is `\"channels_last\"`.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n5D tensor with shape:\n`(batch, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)`\nif `data_format` is `\"channels_first\"`\nor 5D tensor with shape:\n`(batch, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)`\nif `data_format` is `\"channels_last\"`.\n`new_conv_dim1`, `new_conv_dim2` and `new_conv_dim3` values might have\nchanged due to padding.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Conv2DTranspose", + "schema": { + "attributes": [ + { + "description": "Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).", + "name": "filters" + }, + { + "description": "An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.", + "name": "kernel_size" + }, + { + "description": "An integer or tuple/list of 2 integers,\n specifying the strides of the convolution\n along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.", + "name": "strides" + }, + { + "description": "one of `\"valid\"` or `\"same\"` (case-insensitive).", + "name": "padding" + }, + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".", + "name": "data_format" + }, + { + "description": "an integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.", + "name": "dilation_rate" + }, + { + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the `kernel` weights matrix\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the kernel matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).\n", + "name": "bias_constraint" + }, + { + "description": "An integer or tuple/list of 2 integers,\n specifying the amount of padding along the height and width\n of the output tensor.\n Can be a single integer to specify the same value for all\n spatial dimensions.\n The amount of output padding along a given dimension must be\n lower than the stride along that same dimension.\n If set to `None` (default), the output shape is inferred.", + "name": "output_padding" + } + ], + "category": "Layer", + "description": "Transposed convolution layer (sometimes called Deconvolution).\n\nThe need for transposed convolutions generally arises\nfrom the desire to use a transformation going in the opposite direction\nof a normal convolution, i.e., from something that has the shape of the\noutput of some convolution to something that has the shape of its input\nwhile maintaining a connectivity pattern that is compatible with\nsaid convolution.\n\nWhen using this layer as the first layer in a model,\nprovide the keyword argument `input_shape`\n(tuple of integers, does not include the batch axis),\ne.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures\nin `data_format=\"channels_last\"`.\n", + "inputs": [ + { + "description": "\n4D tensor with shape:\n`(batch, channels, rows, cols)`\nif `data_format` is `\"channels_first\"`\nor 4D tensor with shape:\n`(batch, rows, cols, channels)`\nif `data_format` is `\"channels_last\"`.\n", + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "description": "\n4D tensor with shape:\n`(batch, filters, new_rows, new_cols)`\nif `data_format` is `\"channels_first\"`\nor 4D tensor with shape:\n`(batch, new_rows, new_cols, filters)`\nif `data_format` is `\"channels_last\"`.\n`rows` and `cols` values might have changed due to padding.\nIf `output_padding` is specified:\n\n```\nnew_rows = ((rows - 1) * strides[0] + kernel_size[0]\n - 2 * padding[0] + output_padding[0])\nnew_cols = ((cols - 1) * strides[1] + kernel_size[1]\n - 2 * padding[1] + output_padding[1])\n```\n", + "name": "output" + } + ], + "package": "keras.layers", + "references": [ + { + "description": "[A guide to convolution arithmetic for deep learning]( https://arxiv.org/abs/1603.07285v1)" + }, + { + "description": "[Deconvolutional Networks]( https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)" + } + ] + } + }, + { + "name": "Cropping1D", + "schema": { + "attributes": [ + { + "description": "int or tuple of int (length 2)\n How many units should be trimmed off at the beginning and end of\n the cropping dimension (axis 1).\n If a single int is provided,\n the same value will be used for both.\n", + "name": "cropping" + } + ], + "category": "Shape", + "description": "Cropping layer for 1D input (e.g. temporal sequence).\n\nIt crops along the time dimension (axis 1).\n", + "inputs": [ + { + "description": "\n3D tensor with shape `(batch, axis_to_crop, features)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n3D tensor with shape `(batch, cropped_axis, features)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Cropping2D", + "schema": { + "attributes": [ + { + "description": "int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.\n - If int: the same symmetric cropping\n is applied to height and width.\n - If tuple of 2 ints:\n interpreted as two different\n symmetric cropping values for height and width:\n `(symmetric_height_crop, symmetric_width_crop)`.\n - If tuple of 2 tuples of 2 ints:\n interpreted as\n `((top_crop, bottom_crop), (left_crop, right_crop))`", + "name": "cropping" + }, + { + "description": "A string,\n one of `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n", + "name": "data_format" + } + ], + "category": "Shape", + "description": "Cropping layer for 2D input (e.g. picture).\n\nIt crops along spatial dimensions, i.e. height and width.\n", + "examples": [ + { + "code": "# Crop the input 2D images or feature maps\nmodel = Sequential()\nmodel.add(Cropping2D(cropping=((2, 2), (4, 4)),\n input_shape=(28, 28, 3)))\n# now model.output_shape == (None, 24, 20, 3)\nmodel.add(Conv2D(64, (3, 3), padding='same'))\nmodel.add(Cropping2D(cropping=((2, 2), (2, 2))))\n# now model.output_shape == (None, 20, 16, 64)" + } + ], + "inputs": [ + { + "description": "\n4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, rows, cols, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, channels, rows, cols)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n4D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, cropped_rows, cropped_cols, channels)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, channels, cropped_rows, cropped_cols)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Cropping3D", + "schema": { + "attributes": [ + { + "description": "int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.\n - If int: the same symmetric cropping\n is applied to depth, height, and width.\n - If tuple of 3 ints:\n interpreted as three different\n symmetric cropping values for depth, height, and width:\n `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.\n - If tuple of 3 tuples of 2 ints:\n interpreted as\n `((left_dim1_crop, right_dim1_crop),\n (left_dim2_crop, right_dim2_crop),\n (left_dim3_crop, right_dim3_crop))`", + "name": "cropping" + }, + { + "description": "A string,\n one of `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n", + "name": "data_format" + } + ], + "category": "Shape", + "description": "Cropping layer for 3D data (e.g. spatial or spatio-temporal).\n", + "inputs": [ + { + "description": "\n5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,\n depth)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, depth,\n first_axis_to_crop, second_axis_to_crop, third_axis_to_crop)`\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n5D tensor with shape:\n- If `data_format` is `\"channels_last\"`:\n `(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis,\n depth)`\n- If `data_format` is `\"channels_first\"`:\n `(batch, depth,\n first_cropped_axis, second_cropped_axis, third_cropped_axis)`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "SeparableConv2D", + "schema": { + "attributes": [ + { + "default": "linear", + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": "valid", + "description": "one of `\"valid\"` or `\"same\"` (case-insensitive).", + "name": "padding" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".", + "name": "data_format" + }, + { + "default": [ + 1, + 1 + ], + "description": "An integer or tuple/list of 2 integers,\n specifying the strides of the convolution\n along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.", + "name": "strides" + }, + { + "default": [ + 1, + 1 + ], + "description": "An integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.", + "name": "dilation_rate" + }, + { + "default": 1, + "description": "The number of depthwise convolution output channels\n for each input channel.\n The total number of depthwise convolution output\n channels will be equal to `filters_in * depth_multiplier`.", + "name": "depth_multiplier" + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the pointwise kernel matrix\n (see [initializers](https://keras.io/initializers)).", + "name": "pointwise_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the depthwise kernel matrix\n (see [initializers](https://keras.io/initializers)).", + "name": "depthwise_initializer", + "visible": false + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).", + "name": "filters" + }, + { + "description": "An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.", + "name": "kernel_size" + }, + { + "description": "Regularizer function applied to\n the depthwise kernel matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "depthwise_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the pointwise kernel matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "pointwise_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the depthwise kernel matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "depthwise_constraint", + "visible": false + }, + { + "description": "Constraint function applied to\n the pointwise kernel matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "pointwise_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).\n", + "name": "bias_constraint" + } + ], + "category": "Layer", + "description": "Depthwise separable 2D convolution.\n\nSeparable convolution performs first\na depthwise spatial convolution\n(which acts on each input channel separately)\nfollowed by a pointwise convolution which mixes together the resulting\noutput channels. The `depth_multiplier` argument controls how many\noutput channels are generated per input channel in the depthwise step.\n\nIntuitively, separable convolutions can be understood as\na way to factorize a convolution kernel into two smaller kernels,\nor as an extreme version of an Inception block.\n", + "inputs": [ + { + "description": "\n4D tensor with shape:\n`(batch, channels, rows, cols)`\nif `data_format` is `\"channels_first\"`\nor 4D tensor with shape:\n`(batch, rows, cols, channels)`\nif `data_format` is `\"channels_last\"`.\n", + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "description": "\n4D tensor with shape:\n`(batch, filters, new_rows, new_cols)`\nif `data_format` is `\"channels_first\"`\nor 4D tensor with shape:\n`(batch, new_rows, new_cols, filters)`\nif `data_format` is `\"channels_last\"`.\n`rows` and `cols` values might have changed due to padding.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Convolution2D", + "schema": { + "attributes": [ + { + "default": "linear", + "description": "Activation function to use\n (see [activations](https://keras.io/activations)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).", + "name": "activation" + }, + { + "default": "valid", + "description": "one of `\"valid\"` or `\"same\"` (case-insensitive).\n Note that `\"same\"` is slightly inconsistent across backends with\n `strides` != 1, as described\n [here](https://github.com/keras-team/keras/pull/9473#issuecomment-372166860)", + "name": "padding" + }, + { + "default": true, + "description": "Boolean, whether the layer uses a bias vector.", + "name": "use_bias", + "visible": false + }, + { + "default": "channels_last", + "description": "A string,\n one of `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".", + "name": "data_format" + }, + { + "default": [ + 1, + 1 + ], + "description": "An integer or tuple/list of 2 integers,\n specifying the strides of the convolution\n along the height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.", + "name": "strides" + }, + { + "default": [ + 1, + 1 + ], + "description": "an integer or tuple/list of 2 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.", + "name": "dilation_rate" + }, + { + "default": 1, + "name": "depth_multiplier" + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "description": "Initializer for the bias vector\n (see [initializers](https://keras.io/initializers)).", + "name": "bias_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "description": "Initializer for the `kernel` weights matrix\n (see [initializers](https://keras.io/initializers)).", + "name": "kernel_initializer", + "visible": false + }, + { + "description": "Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).", + "name": "filters" + }, + { + "description": "An integer or tuple/list of 2 integers, specifying the\n height and width of the 2D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.", + "name": "kernel_size" + }, + { + "description": "Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "kernel_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to the bias vector\n (see [regularizer](https://keras.io/regularizers)).", + "name": "bias_regularizer", + "visible": false + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to the kernel matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "kernel_constraint" + }, + { + "description": "Constraint function applied to the bias vector\n (see [constraints](https://keras.io/constraints)).\n", + "name": "bias_constraint" + } + ], + "category": "Layer", + "description": "2D convolution layer (e.g. spatial convolution over images).\n\nThis layer creates a convolution kernel that is convolved\nwith the layer input to produce a tensor of\noutputs. If `use_bias` is True,\na bias vector is created and added to the outputs. Finally, if\n`activation` is not `None`, it is applied to the outputs as well.\n\nWhen using this layer as the first layer in a model,\nprovide the keyword argument `input_shape`\n(tuple of integers, does not include the batch axis),\ne.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures\nin `data_format=\"channels_last\"`.\n", + "inputs": [ + { + "description": "\n4D tensor with shape:\n`(batch, channels, rows, cols)`\nif `data_format` is `\"channels_first\"`\nor 4D tensor with shape:\n`(batch, rows, cols, channels)`\nif `data_format` is `\"channels_last\"`.\n", + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "description": "\n4D tensor with shape:\n`(batch, filters, new_rows, new_cols)`\nif `data_format` is `\"channels_first\"`\nor 4D tensor with shape:\n`(batch, new_rows, new_cols, filters)`\nif `data_format` is `\"channels_last\"`.\n`rows` and `cols` values might have changed due to padding.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "DepthwiseConv2D", + "schema": { + "attributes": [ + { + "default": "linear", + "name": "activation" + }, + { + "default": "valid", + "name": "padding" + }, + { + "default": true, + "name": "use_bias", + "visible": false + }, + { + "default": "channels_last", + "name": "data_format" + }, + { + "default": [ + 1, + 1 + ], + "name": "strides" + }, + { + "default": [ + 1, + 1 + ], + "name": "dilation_rate" + }, + { + "default": { + "class_name": "Zeros", + "config": {} + }, + "name": "bias_initializer", + "visible": false + }, + { + "default": { + "class_name": "VarianceScaling", + "config": { + "distribution": "uniform", + "mode": "fan_avg", + "scale": 1, + "seed": null + } + }, + "name": "depthwise_initializer", + "visible": false + }, + { + "default": 1, + "name": "depth_multiplier" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "kernel" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "Concatenate", + "schema": { + "attributes": [ + { + "description": "Axis along which to concatenate.", + "name": "axis" + }, + { + "description": "standard layer keyword arguments.\n", + "name": "**kwargs" + } + ], + "category": "Tensor", + "description": "Layer that concatenates a list of inputs.\n\nIt takes as input a list of tensors,\nall of the same shape except for the concatenation axis,\nand returns a single tensor, the concatenation of all inputs.\n", + "inputs": [ + { + "name": "inputs", + "option": "variadic" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Average", + "schema": { + "category": "Tensor", + "description": "Layer that averages a list of inputs.\n\nIt takes as input a list of tensors,\nall of the same shape, and returns\na single tensor (also of the same shape).\n", + "inputs": [ + { + "name": "inputs", + "option": "variadic" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Maximum", + "schema": { + "category": "Tensor", + "description": "Layer that computes the maximum (element-wise) a list of inputs.\n\nIt takes as input a list of tensors,\nall of the same shape, and returns\na single tensor (also of the same shape).\n", + "inputs": [ + { + "name": "inputs", + "option": "variadic" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Dot", + "schema": { + "attributes": [ + { + "description": "Integer or tuple of integers,\n axis or axes along which to take the dot product.", + "name": "axes" + }, + { + "description": "Whether to L2-normalize samples along the\n dot product axis before taking the dot product.\n If set to True, then the output of the dot product\n is the cosine proximity between the two samples.", + "name": "normalize" + }, + { + "description": "Standard layer keyword arguments.\n", + "name": "**kwargs" + } + ], + "description": "Layer that computes a dot product between samples in two tensors.\n\nE.g. if applied to a list of two tensors `a` and `b` of shape `(batch_size, n)`,\nthe output will be a tensor of shape `(batch_size, 1)`\nwhere each entry `i` will be the dot product between\n`a[i]` and `b[i]`.\n", + "inputs": [ + { + "name": "x" + }, + { + "name": "y" + } + ], + "outputs": [ + { + "name": "z" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Flatten", + "schema": { + "attributes": [ + { + "default": "channels_last", + "description": "A string,\n one of `'channels_last'` (default) or `'channels_first'`.\n The ordering of the dimensions in the inputs.\n The purpose of this argument is to preserve weight\n ordering when switching a model from one data format\n to another.\n `'channels_last'` corresponds to inputs with shape\n `(batch, ..., channels)` while `'channels_first'` corresponds to\n inputs with shape `(batch, channels, ...)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `'channels_last'`.\n", + "name": "data_format" + } + ], + "category": "Shape", + "description": "Flattens the input. Does not affect the batch size.\n", + "examples": [ + { + "code": "model = Sequential()\nmodel.add(Conv2D(64, (3, 3),\n input_shape=(3, 32, 32), padding='same',))\n# now: model.output_shape == (None, 64, 32, 32)\n\nmodel.add(Flatten())\n# now: model.output_shape == (None, 65536)" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Reshape", + "schema": { + "attributes": [ + { + "description": "target shape. Tuple of integers.\n Does not include the batch axis.\n", + "name": "target_shape" + } + ], + "category": "Shape", + "description": "Reshapes an output to a certain shape.\n", + "examples": [ + { + "code": "# as first layer in a Sequential model\nmodel = Sequential()\nmodel.add(Reshape((3, 4), input_shape=(12,)))\n# now: model.output_shape == (None, 3, 4)\n# note: `None` is the batch dimension\n\n# as intermediate layer in a Sequential model\nmodel.add(Reshape((6, 2)))\n# now: model.output_shape == (None, 6, 2)\n\n# also supports shape inference using `-1` as dimension\nmodel.add(Reshape((-1, 2, 2)))\n# now: model.output_shape == (None, 3, 2, 2)" + } + ], + "inputs": [ + { + "description": "\nArbitrary, although all dimensions in the input shaped must be fixed.\nUse the keyword argument `input_shape`\n(tuple of integers, does not include the batch axis)\nwhen using this layer as the first layer in a model.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n`(batch_size,) + target_shape`\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Permute", + "schema": { + "attributes": [ + { + "description": "Tuple of integers. Permutation pattern, does not include the\n samples dimension. Indexing starts at 1.\n For instance, `(2, 1)` permutes the first and second dimension\n of the input.\n", + "name": "dims" + } + ], + "category": "Shape", + "description": "Permutes the dimensions of the input according to a given pattern.\n\nUseful for e.g. connecting RNNs and convnets together.\n", + "examples": [ + { + "code": "model = Sequential()\nmodel.add(Permute((2, 1), input_shape=(10, 64)))\n# now: model.output_shape == (None, 64, 10)\n# note: `None` is the batch dimension" + } + ], + "inputs": [ + { + "description": "\nArbitrary. Use the keyword argument `input_shape`\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\nSame as the input shape, but with the dimensions re-ordered according\nto the specified pattern.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "RepeatVector", + "schema": { + "attributes": [ + { + "description": "integer, repetition factor.\n", + "name": "n" + } + ], + "category": "Shape", + "description": "Repeats the input n times.\n", + "examples": [ + { + "code": "model = Sequential()\nmodel.add(Dense(32, input_dim=32))\n# now: model.output_shape == (None, 32)\n# note: `None` is the batch dimension\n\nmodel.add(RepeatVector(3))\n# now: model.output_shape == (None, 3, 32)" + } + ], + "inputs": [ + { + "description": "\n2D tensor of shape `(num_samples, features)`.\n", + "name": "input" + } + ], + "outputs": [ + { + "description": "\n3D tensor of shape `(num_samples, n, features)`.\n", + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Dropout", + "schema": { + "attributes": [ + { + "description": "float between 0 and 1. Fraction of the input units to drop.", + "name": "rate" + }, + { + "description": "1D integer tensor representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)` and\n you want the dropout mask to be the same for all timesteps,\n you can use `noise_shape=(batch_size, 1, features)`.", + "name": "noise_shape" + }, + { + "description": "A Python integer to use as random seed.\n", + "name": "seed" + } + ], + "category": "Dropout", + "description": "Applies Dropout to the input.\n\nDropout consists in randomly setting\na fraction `rate` of input units to 0 at each update during training time,\nwhich helps prevent overfitting.\n", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers", + "references": [ + { + "description": "[Dropout: A Simple Way to Prevent Neural Networks from Overfitting]( http://www.jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)" + } + ] + } + }, + { + "name": "Embedding", + "schema": { + "attributes": [ + { + "default": false, + "description": "Whether or not the input value 0 is a special \"padding\"\n value that should be masked out.\n This is useful when using [recurrent layers](https://keras.io/layers/recurrent)\n which may take variable length input.\n If this is `True` then all subsequent layers\n in the model need to support masking or an exception will be raised.\n If mask_zero is set to True, as a consequence, index 0 cannot be\n used in the vocabulary (input_dim should equal size of\n vocabulary + 1).", + "name": "mask_zero" + }, + { + "default": { + "class_name": "RandomUniform", + "config": { + "maxval": 0.05, + "minval": -0.05, + "seed": null + } + }, + "description": "Initializer for the `embeddings` matrix\n (see [initializers](https://keras.io/initializers)).", + "name": "embeddings_initializer", + "visible": false + }, + { + "description": "int > 0. Size of the vocabulary,\n i.e. maximum integer index + 1.", + "name": "input_dim" + }, + { + "description": "int >= 0. Dimension of the dense embedding.", + "name": "output_dim" + }, + { + "description": "Regularizer function applied to\n the `embeddings` matrix\n (see [regularizer](https://keras.io/regularizers)).", + "name": "embeddings_regularizer", + "visible": false + }, + { + "description": "Constraint function applied to\n the `embeddings` matrix\n (see [constraints](https://keras.io/constraints)).", + "name": "embeddings_constraint" + }, + { + "description": "Length of input sequences, when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n", + "name": "input_length" + }, + { + "description": "Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](https://keras.io/regularizers)).", + "name": "activity_regularizer" + } + ], + "category": "Transform", + "description": "Turns positive integers (indexes) into dense vectors of fixed size.\neg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]\n\nThis layer can only be used as the first layer in a model.\n", + "examples": [ + { + "code": "model = Sequential()\nmodel.add(Embedding(1000, 64, input_length=10))\n# the model will take as input an integer matrix of size (batch, input_length).\n# the largest integer (i.e. word index) in the input should be\n# no larger than 999 (vocabulary size).\n# now model.output_shape == (None, 10, 64), where None is the batch dimension.\n\ninput_array = np.random.randint(1000, size=(32, 10))\n\nmodel.compile('rmsprop', 'mse')\noutput_array = model.predict(input_array)\nassert output_array.shape == (32, 10, 64)" + } + ], + "inputs": [ + { + "description": "\n2D tensor with shape: `(batch_size, sequence_length)`.\n", + "name": "input" + }, + { + "name": "embeddings" + } + ], + "outputs": [ + { + "description": "\n3D tensor with shape: `(batch_size, sequence_length, output_dim)`.\n", + "name": "output" + } + ], + "package": "keras.layers", + "references": [ + { + "description": "[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)" + } + ] + } + }, + { + "name": "Add", + "schema": { + "description": "Layer that adds a list of inputs.\n\nIt takes as input a list of tensors,\nall of the same shape, and returns\na single tensor (also of the same shape).\n", + "examples": [ + { + "code": "import keras\n\ninput1 = keras.layers.Input(shape=(16,))\nx1 = keras.layers.Dense(8, activation='relu')(input1)\ninput2 = keras.layers.Input(shape=(32,))\nx2 = keras.layers.Dense(8, activation='relu')(input2)\n# equivalent to added = keras.layers.add([x1, x2])\nadded = keras.layers.Add()([x1, x2])\n\nout = keras.layers.Dense(4)(added)\nmodel = keras.models.Model(inputs=[input1, input2], outputs=out)" + } + ], + "inputs": [ + { + "name": "inputs", + "option": "variadic" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Subtract", + "schema": { + "description": "Layer that subtracts two inputs.\n\nIt takes as input a list of tensors of size 2,\nboth of the same shape, and returns a single tensor, (inputs[0] - inputs[1]),\nalso of the same shape.\n", + "examples": [ + { + "code": "import keras\n\ninput1 = keras.layers.Input(shape=(16,))\nx1 = keras.layers.Dense(8, activation='relu')(input1)\ninput2 = keras.layers.Input(shape=(32,))\nx2 = keras.layers.Dense(8, activation='relu')(input2)\n# Equivalent to subtracted = keras.layers.subtract([x1, x2])\nsubtracted = keras.layers.Subtract()([x1, x2])\n\nout = keras.layers.Dense(4)(subtracted)\nmodel = keras.models.Model(inputs=[input1, input2], outputs=out)" + } + ], + "inputs": [ + { + "name": "x" + }, + { + "name": "y" + } + ], + "outputs": [ + { + "name": "z" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Multiply", + "schema": { + "description": "Layer that multiplies (element-wise) a list of inputs.\n\nIt takes as input a list of tensors,\nall of the same shape, and returns\na single tensor (also of the same shape).\n", + "inputs": [ + { + "name": "inputs", + "option": "variadic" + } + ], + "outputs": [ + { + "name": "output" + } + ], + "package": "keras.layers" + } + }, + { + "name": "Lambda", + "schema": { + "attributes": [ + { + "description": "The function to be evaluated.\n Takes input tensor or list of tensors as first argument.", + "name": "function" + }, + { + "description": "Expected output shape from function.\n Only relevant when using Theano.\n Can be a tuple or function.\n If a tuple, it only specifies the first dimension onward;\n sample dimension is assumed either the same as the input:\n `output_shape = (input_shape[0], ) + output_shape`\n or, the input is `None` and\n the sample dimension is also `None`:\n `output_shape = (None, ) + output_shape`\n If a function, it specifies the entire shape as a function of the\n input shape: `output_shape = f(input_shape)`", + "name": "output_shape" + }, + { + "description": "optional dictionary of keyword arguments to be passed\n to the function.\n", + "name": "arguments" + }, + { + "description": "Either None (indicating no masking) or a Tensor indicating the\n input mask for Embedding.", + "name": "mask" + } + ], + "description": "Wraps arbitrary expression as a `Layer` object.\n", + "examples": [ + { + "code": "# add a x -> x^2 layer\nmodel.add(Lambda(lambda x: x ** 2))" + }, + { + "code": "# add a layer that returns the concatenation\n# of the positive part of the input and\n# the opposite of the negative part\n\ndef antirectifier(x):\n x -= K.mean(x, axis=1, keepdims=True)\n x = K.l2_normalize(x, axis=1)\n pos = K.relu(x)\n neg = K.relu(-x)\n return K.concatenate([pos, neg], axis=1)\n\ndef antirectifier_output_shape(input_shape):\n shape = list(input_shape)\n assert len(shape) == 2 # only valid for 2D tensors\n shape[-1] *= 2\n return tuple(shape)\n\nmodel.add(Lambda(antirectifier,\n output_shape=antirectifier_output_shape))" + }, + { + "code": "# add a layer that returns the hadamard product\n# and sum of it from two input tensors\n\ndef hadamard_product_sum(tensors):\n out1 = tensors[0] * tensors[1]\n out2 = K.sum(out1, axis=-1)\n return [out1, out2]\n\ndef hadamard_product_sum_output_shape(input_shapes):\n shape1 = list(input_shapes[0])\n shape2 = list(input_shapes[1])\n assert shape1 == shape2 # else hadamard product isn't possible\n return [tuple(shape1), tuple(shape2[:-1])]\n\nx1 = Dense(32)(input_1)\nx2 = Dense(32)(input_2)\nlayer = Lambda(hadamard_product_sum, hadamard_product_sum_output_shape)\nx_hadamard, x_sum = layer([x1, x2])" + } + ], + "inputs": [ + { + "description": "\nArbitrary. Use the keyword argument input_shape\n(tuple of integers, does not include the samples axis)\nwhen using this layer as the first layer in a model.\n", + "name": "inputs", + "option": "variadic" + } + ], + "outputs": [ + { + "description": "\nSpecified by `output_shape` argument\n(or auto-inferred when using TensorFlow or CNTK).\n", + "name": "output" + } + ], + "package": "keras.layers" + } + } +] diff --git a/frontend/packages/core/public/netron/keras.js b/frontend/packages/core/public/netron/keras.js new file mode 100644 index 00000000..d1873f2a --- /dev/null +++ b/frontend/packages/core/public/netron/keras.js @@ -0,0 +1,1308 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var keras = keras || {}; +var base = base || require('./base'); + +keras.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'h5' || extension === 'hd5' || extension === 'hdf5' || extension === 'keras' || extension === 'model' || extension == 'pb' || extension == 'pth') { + const buffer = context.buffer; + const signature = [ 0x89, 0x48, 0x44, 0x46, 0x0D, 0x0A, 0x1A, 0x0A ]; + return buffer && buffer.length > signature.length && signature.every((v, i) => v === buffer[i]); + } + if (extension == 'json' && !identifier.endsWith('-symbol.json')) { + const json = context.text; + if (json.indexOf('"mxnet_version":', 0) == -1) { + try { + let root = keras.JsonParser.parse(json); + if (root && root.nodes && root.arg_nodes && root.heads) { + return false; + } + if (root && root.modelTopology) { + root = root.modelTopology; + } + if (root && root.model_config) { + root = root.model_config; + } + if (root && root.class_name) { + return true; + } + } + catch (err) { + // continue regardless of error + } + } + } + return false; + } + + open(context, host) { + return host.require('./hdf5').then((hdf5) => { + let format = 'Keras'; + let producer = ''; + let version = ''; + let backend = ''; + let model_config = null; + let rootGroup = null; + let weightsManifest = null; + const identifier = context.identifier; + try { + switch (identifier.split('.').pop().toLowerCase()) { + case 'keras': + case 'h5': + case 'hd5': + case 'hdf5': + case 'model': + case 'pb': + case 'pth': { + const file = new hdf5.File(context.buffer); + rootGroup = file.rootGroup; + if (!rootGroup.attribute('model_config') && !rootGroup.attribute('layer_names')) { + throw new keras.Error("File format is not Keras HDF5."); + } + const json = rootGroup.attribute('model_config'); + if (json) { + model_config = keras.JsonParser.parse(json); + } + backend = rootGroup.attribute('backend') || ''; + version = rootGroup.attribute('keras_version') || ''; + format = format + (version ? ' v' + version : ''); + break; + } + case 'json': { + model_config = keras.JsonParser.parse(context.text); + if (model_config.keras_version) { + version = model_config.keras_version; + format = format + (version ? (' v' + version) : ''); + } + if (model_config.backend) { + backend = model_config.backend; + } + if (model_config && model_config.modelTopology) { + weightsManifest = model_config.weightsManifest || null; + backend = model_config.modelTopology.backend; + version = model_config.modelTopology.keras_version; + format = format + (version ? (' v' + version) : ''); + format = 'TensorFlow.js ' + (model_config.format ? model_config.format : format); + producer = model_config.convertedBy || model_config.generatedBy || ''; + model_config = model_config.modelTopology; + } + if (model_config.model_config) { + model_config = model_config.model_config; + } + break; + } + } + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new keras.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + + if (!rootGroup && !model_config) { + throw new keras.Error('\'model_config\' is not present.'); + } + if (!rootGroup && !model_config.class_name) { + throw new keras.Error('\'class_name\' is not present.'); + } + + return keras.Metadata.open(host).then((metadata) => { + try { + return new keras.Model(metadata, format, producer, backend, model_config, rootGroup, weightsManifest); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new keras.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + }); + } +}; + +keras.Model = class { + + constructor(metadata, format, producer, backend, model_config, rootGroup, weightsManifest) { + this._format = format; + this._backend = backend; + this._producer = producer; + this._graphs = []; + + const weights = new keras.Weights(); + if (rootGroup) { + let model_weights_group = rootGroup.group('model_weights'); + if (!model_weights_group && rootGroup.attribute('layer_names')) { + model_weights_group = rootGroup; + } + if (model_weights_group) { + model_weights_group = new keras.Group(model_weights_group); + for (const layer_name of model_weights_group.attribute('layer_names')) { + const layer_weights = model_weights_group.group(layer_name); + if (layer_weights) { + const weight_names = layer_weights.attribute('weight_names'); + if (weight_names && weight_names.length > 0) { + for (const weight_name of weight_names) { + const weight = layer_weights.group(weight_name); + if (weight && weight.value) { + const variable = weight.value; + const tensor = new keras.Tensor(weight_name, variable.type, variable.shape, variable.littleEndian, variable.data, ''); + if (model_config) { + weights.add(layer_name, tensor); + } + else { + const components = weight_name.split('/'); + components.pop(); + const name = (components.length == 0 || components[0] !== layer_name) ? [ layer_name ].concat(components).join('/') : components.join('/'); + weights.add(name, tensor); + } + } + } + } + } + } + } + } + else if (weightsManifest) { + for (const manifest of weightsManifest) { + for (const weight of manifest.weights) { + const tensor = new keras.Tensor(weight.name, weight.dtype, weight.shape, false, null, manifest.paths.join(';')); + weights.add('', tensor); + } + } + } + + this._graphs = [ new keras.Graph(metadata, model_config, weights) ]; + } + + get name() { + return null; + } + + get description() { + return null; + } + + get format() { + return this._format; + } + + get producer() { + return this._producer; + } + + get runtime() { + return this._backend; + } + + get graphs() { + return this._graphs; + } +}; + +keras.Graph = class { + + constructor(metadata, model, weights) { + this._metadata = metadata; + this._inputs = []; + this._outputs = []; + this._nodes = []; + this._groups = false; + + if (model) { + this._name = model.name || (model.config && model.config.name ? model.config.name : ''); + switch (model.class_name) { + case 'AllCNN': + case 'Sequential': + this._loadSequential(model.config, weights, '', null, null); + break; + case 'Model': + this._loadModel(model.config, weights, '', null, null); + break; + default: + throw new keras.Error('\'' + model.class_name + '\' is not supported.'); + } + } + else if (weights) { + for (const layer of weights.keys()) { + if (weights.get('', layer).length <= 6) { + const node = new keras.Node(metadata, 'Weights', { name: layer }, [], [], '', weights); + this._nodes.push(node); + } + } + } + } + + get name() { + return this._name; + } + + get groups() { + return this._groups ? true : false; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + _loadModel(config, weights, group, inputs, outputs) { + if (group) { + this._groups = true; + } + const nodeMap = new Map(); + if (config.layers) { + for (const layer of config.layers) { + if (layer.name) { + if (!nodeMap.has(layer.name)) { + nodeMap.set(layer.name, layer); + layer._inputs = []; + layer._outputs = []; + } + } + } + for (const layer of config.layers) { + if (layer.inbound_nodes) { + for (const inbound_node of layer.inbound_nodes) { + for (const inbound_connection of inbound_node) { + let inputName = inbound_connection[0]; + const inputNode = nodeMap.get(inputName); + if (inputNode) { + const inputIndex = inbound_connection[2]; + if (inputIndex != 0) { + inputName += ':' + inputIndex.toString(); + } + while (inputIndex >= inputNode._outputs.length) { + inputNode._outputs.push(''); + } + inputNode._outputs[inputIndex] = inputName; + } + layer._inputs.push(inputName); + } + } + } + } + } + const input_layers = config.input_layers; + if (input_layers) { + for (let i = 0; i < input_layers.length; i++) { + const input_layer = input_layers[i]; + const name = input_layer[0]; + let type = null; + const node = nodeMap.get(name); + if (node && node.class_name == 'InputLayer') { + type = this._getInputType(node); + nodeMap.delete(name); + } + if (inputs && i < inputs.length) { + if (config.layers) { + for (const layer of config.layers) { + if (layer._inputs) { + layer._inputs = layer._inputs.map((input) => { + return input === name ? inputs[i] : input; + }); + } + } + } + } + else { + this._inputs.push(new keras.Parameter(name, true, [ new keras.Argument(name, type, null) ])); + } + } + } + const inputMap = new Map(); + const output_layers = config.output_layers; + if (output_layers) { + for (let j = 0; j < output_layers.length; j++) { + const output_layer = output_layers[j]; + let outputName = output_layer[0]; + const outputNode = nodeMap.get(outputName); + let addGraphOutput = true; + if (outputs && j < outputs.length) { + inputMap.set(outputName, outputs[j]); + outputName = outputs[j]; + addGraphOutput = false; + } + if (outputNode) { + const outputIndex = output_layer[2]; + if (outputIndex != 0) { + outputName += ':' + outputIndex.toString(); + } + while (outputIndex >= outputNode._outputs.length) { + outputNode._outputs.push(''); + } + outputNode._outputs[outputIndex] = outputName; + } + if (addGraphOutput) { + this._outputs.push(new keras.Parameter(outputName, true, [ new keras.Argument(outputName, null, null) ])); + } + } + } + + if (config.layers) { + for (const layer of config.layers) { + if (nodeMap.has(layer.name)) { + this._loadNode(layer, layer._inputs, layer._outputs, weights, group, inputMap); + } + } + } + } + + _loadSequential(config, weights, group, inputs, outputs) { + if (group) { + this._groups = true; + } + const inputName = 'input'; + let inputType = null; + let argument = inputName; + let index = 0; + const layers = config.layers ? config.layers : config; + for (const layer of layers) { + let name = index.toString(); + let nodeInputs = [ argument ]; + if (index == 0) { + if (inputs && inputs.length > 0) { + nodeInputs = [ inputs[0] ]; + } + else { + inputType = this._getInputType(layer); + } + } + index++; + if (layer.config && layer.config.name) { + name = layer.config.name; + } + argument = name; + let nodeOutputs = [ argument ]; + if (index == layers.length) { + if (outputs && outputs.length > 0) { + nodeOutputs = [ outputs[0] ]; + argument = null; + } + } + + this._loadNode(layer, nodeInputs, nodeOutputs, weights, group); + } + if (!inputs) { + this._inputs.push(new keras.Parameter(inputName, true, [ new keras.Argument(inputName, inputType, null) ])); + } + if (argument) { + this._outputs.push(new keras.Parameter(argument, true, [ new keras.Argument(argument, null, null) ])); + } + } + + _loadNode(layer, inputs, outputs, weights, group, inputMap) { + const class_name = layer.class_name; + switch (class_name) { + case 'Sequential': { + const name = layer.name || (layer.config ? layer.config.name : ''); + this._loadSequential(layer.config, weights, (group ? group + '/' : '') + name, inputs, outputs); + break; + } + case 'Model': { + const name = layer.name || (layer.config ? layer.config.name : ''); + this._loadModel(layer.config, weights, (group ? group + '/' : '') + name, inputs, outputs); + break; + } + default: { + inputs = inputs.map((input) => inputMap && inputMap.has(input) ? inputMap.get(input) : input); + const node = new keras.Node(this._metadata, class_name, layer.config, inputs, outputs, group, weights); + this._nodes.push(node); + break; + } + } + } + + _getInputType(layer) { + if (layer && layer.config) { + let dataType = '?'; + let shape = []; + const config = layer.config; + if (config.dtype) { + dataType = config.dtype; + delete config.dtype; + } + if (config.batch_input_shape) { + shape = config.batch_input_shape.map(s => s == null ? '?' : s); + delete config.batch_input_shape; + } + return new keras.TensorType(dataType, new keras.TensorShape(shape)); + } + return null; + } +}; + +keras.Parameter = class { + + constructor(name, visible, args) { + this._name = name; + this._visible = visible; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get arguments() { + return this._arguments; + } +}; + +keras.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new keras.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name= name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +keras.Node = class { + + constructor(metadata, type, config, inputs, outputs, group, weights) { + this._group = group || ''; + this._metadata = metadata; + this._type = type; + const name = config && config.name ? config.name : ''; + this._name = (this._group ? this._group + '/' : '') + name; + this._inputs = []; + this._outputs = []; + this._attributes = []; + + let names = [ name ]; + if ((type == 'Bidirectional' || type == 'TimeDistributed') && (config && config.layer)) { + const inner = config.layer; + delete config.layer; + this._inner = new keras.Node(this._metadata, inner.class_name, inner.config, [], [], null, null); + if (type == 'Bidirectional' && inner.config.name) { + names = [ name + '/forward_' + inner.config.name, name + '/backward_' + inner.config.name ]; + if (!group) { + group = name; + } + } + } + + const initializers = {}; + if (weights) { + for (const name of names) { + for (const initializer of weights.get(group, name)) { + inputs.push(initializer.name); + initializers[initializer.name] = initializer; + } + } + } + + if (config) { + for (const name of Object.keys(config)) { + const value = config[name]; + if (name != 'name' && value != null) { + this._attributes.push(new keras.Attribute(metadata.attribute(this.type, name), name, value)); + } + } + } + + const schema = this._metadata.type(this.type); + const innerType = this.inner ? this.inner.type : null; + const innerSchema = innerType ? this._metadata.type(innerType) : null; + let inputIndex = 0; + while (inputs.length > 0) { + let variadic = false; + let inputName = null; + let visible = true; + if (!innerSchema || inputIndex == 0) { + if (schema && schema.inputs && inputIndex < schema.inputs.length) { + const input = schema.inputs[inputIndex]; + inputName = input.name; + if (type === 'BatchNormalization' && inputName === 'gamma' && config.scale === false) { + inputIndex++; + continue; + } + visible = input.visible == false ? false : true; + if (schema.inputs[inputIndex].option == 'variadic') { + variadic = true; + } + } + } + else { + switch (type) { + case 'Bidirectional': { + let innerIndex = inputIndex; + if (innerSchema && innerSchema.inputs) { + if (innerIndex < innerSchema.inputs.length) { + inputName = 'forward_' + innerSchema.inputs[innerIndex].name; + } + else { + innerIndex = innerIndex - innerSchema.inputs.length + 1; + if (innerIndex < innerSchema.inputs.length) { + inputName = 'backward_' + innerSchema.inputs[innerIndex].name; + } + } + } + visible = false; + break; + } + case 'TimeDistributed': + if (innerSchema && innerSchema.inputs && inputIndex < innerSchema.inputs.length) { + inputName = innerSchema.inputs[inputIndex].name; + } + break; + } + } + const input = !variadic ? [ inputs.shift() ] : inputs.splice(0, inputs.length); + const inputArguments = input.map((id) => { + return new keras.Argument(id, null, initializers[id]); + }); + if (!inputName && inputArguments.length == 1 && inputArguments[0].initializer && inputArguments[0].initializer.name) { + const parts = inputArguments[0].initializer.name.split('/').pop().split(':').shift().split('_'); + const inputName1 = parts.pop(); + const inputName2 = parts.length > 0 ? [ parts.pop(), inputName1 ].join('_') : ''; + const inputNames = new Set([ 'recurrent_kernel', 'running_mean', 'running_std', 'moving_mean', 'moving_variance' ]); + inputName = inputNames.has(inputName2) ? inputName2 : inputName1; + } + this._inputs.push(new keras.Parameter(inputName || inputIndex.toString(), visible, inputArguments)); + inputIndex++; + } + + this._outputs = outputs.map((output, outputIndex) => { + const outputName = + (schema && schema.outputs && outputIndex < schema.outputs.length && schema.outputs[outputIndex] && schema.outputs[outputIndex].name) ? + schema.outputs[outputIndex].name : + outputIndex.toString(); + return new keras.Parameter(outputName, true, [ new keras.Argument(output, null, null) ]); + }); + } + + get type() { + return this._type; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get name() { + return this._name; + } + + get group() { + return this._group; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } + + get inner() { + return this._inner; + } +}; + +keras.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + this._value = value; + + if (typeof value == 'object' && value.class_name && value.config) { + this._value = keras.Attribute._convert(value); + } + + switch (name) { + case 'trainable': + this._type = 'boolean'; + this._visible = false; + break; + case 'dtype': + this._visible = false; + break; + default: { + if (schema) { + if (schema.type) { + this._type = schema.type; + } + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + if (keras.Attribute._isEquivalent(schema.default, value)) { + this._visible = false; + } + } + } + break; + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } + + static _convert(value) { + if (Array.isArray(value) || value !== Object(value)) { + return value; + } + const obj = {}; + if (value.class_name) { + obj.__type__ = value.class_name; + } + for (const key of Object.keys(value.config)) { + obj[key] = keras.Attribute._convert(value.config[key]); + } + return obj; + } + + static _isEquivalent(a, b) { + if (a === b) { + return a !== 0 || 1 / a === 1 / b; + } + if (a == null || b == null) { + return false; + } + if (a !== a) { + return b !== b; + } + const type = typeof a; + if (type !== 'function' && type !== 'object' && typeof b != 'object') { + return false; + } + const className = toString.call(a); + if (className !== toString.call(b)) { + return false; + } + switch (className) { + case '[object RegExp]': + case '[object String]': + return '' + a === '' + b; + case '[object Number]': + if (+a !== +a) { + return +b !== +b; + } + return +a === 0 ? 1 / +a === 1 / b : +a === +b; + case '[object Date]': + case '[object Boolean]': + return +a === +b; + case '[object Array]': { + let length = a.length; + if (length !== b.length) { + return false; + } + while (length--) { + if (!keras.Attribute._isEquivalent(a[length], b[length])) { + return false; + } + } + return true; + } + } + + const keys = Object.keys(a); + let size = keys.length; + if (Object.keys(b).length != size) { + return false; + } + while (size--) { + const key = keys[size]; + if (!(Object.prototype.hasOwnProperty.call(b, key) && keras.Attribute._isEquivalent(a[key], b[key]))) { + return false; + } + } + return true; + } +}; + +keras.Tensor = class { + + constructor(name, type, shape, littleEndian, data, reference) { + this._name = name; + this._type = new keras.TensorType(type, new keras.TensorShape(shape)); + this._littleEndian = littleEndian; + this._data = data; + this._reference = reference; + } + + get kind() { + return 'Weights'; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get reference() { + return this._reference; + } + + get state() { + return this._context().state; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return keras.Tensor._stringify(value, '', ' '); + } + + _context() { + const context = {}; + context.index = 0; + context.count = 0; + context.state = null; + if (this._reference) { + context.state = 'Tensor reference not implemented.'; + return context; + } + if (!this._data) { + context.state = 'Tensor data is empty.'; + return context; + } + switch (this._type.dataType) { + case 'float16': + context.precision = 16; + break; + case 'float32': + context.precision = 32; + break; + case 'float64': + context.precision = 64; + break; + default: + context.state = 'Tensor data type is not supported.'; + break; + } + context.dimensions = this._type.shape.dimensions; + context.littleEndian = this._littleEndian; + context.rawData = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + return context; + } + + _decode(context, dimension) { + const results = []; + const size = context.dimensions[dimension]; + const littleEndian = context.littleEndian; + if (dimension == context.dimensions.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + if (context.rawData) { + switch (context.precision) { + case 16: + results.push(context.rawData.getFloat16(context.index, littleEndian)); + context.index += 2; + break; + case 32: + results.push(context.rawData.getFloat32(context.index, littleEndian)); + context.index += 4; + break; + case 64: + results.push(context.rawData.getFloat64(context.index, littleEndian)); + context.index += 8; + break; + } + context.count++; + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + return results; + } + + static _stringify(value, indentation, indent) { + if (Array.isArray(value)) { + const result = []; + result.push(indentation + '['); + const items = value.map((item) => keras.Tensor._stringify(item, indentation + indent, indent)); + if (items.length > 0) { + result.push(items.join(',\n')); + } + result.push(indentation + ']'); + return result.join('\n'); + } + if (typeof value == 'string') { + return indentation + value; + } + if (value == Infinity) { + return indentation + 'Infinity'; + } + if (value == -Infinity) { + return indentation + '-Infinity'; + } + if (isNaN(value)) { + return indentation + 'NaN'; + } + return indentation + value.toString(); + } +}; + +keras.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +keras.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return this._dimensions ? ('[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']') : ''; + } +}; + +keras.Metadata = class { + + static open(host) { + if (keras.Metadata._metadata) { + return Promise.resolve(keras.Metadata._metadata); + } + return host.request(null, 'keras-metadata.json', 'utf-8').then((data) => { + keras.Metadata._metadata = new keras.Metadata(data); + return keras.Metadata._metadata; + }).catch(() => { + keras.Metadata._metadata = new keras.Metadata(null); + return keras.Metadata._metadatas; + }); + } + + constructor(data) { + this._map = new Map(); + this._attributeCache = new Map(); + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map.set(item.name, item.schema); + } + } + } + } + } + + type(name) { + return this._map.get(name); + } + + attribute(type, name) { + const key = type + ':' + name; + if (!this._attributeCache.has(key)) { + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + this._attributeCache.set(type + ':' + attribute.name, attribute); + } + } + if (!this._attributeCache.has(key)) { + this._attributeCache.set(key, null); + } + } + return this._attributeCache.get(key); + } +}; + +keras.Group = class { + + constructor(group) { + this._group = group; + } + + attribute(name) { + let value = this._group.attribute(name); + if (!value) { + if (this._group.attribute(name + '0')) { + let index = 0; + value = []; + for (;;) { + let chunk = this._group.attribute(name + index.toString()); + if (!chunk) { + break; + } + value = value.concat(chunk); + index++; + } + } + } + return value; + } + + group(name) { + let value = this._group.group(name); + if (value) { + return new keras.Group(value); + } + return null; + } + + get value() { + return this._group.value; + } +}; + +keras.JsonParser = class { + + static parse(text) { + if (text && text.indexOf('NaN') !== -1) { + try { + return JSON.parse(text); + } + catch (err) { + try { + return new keras.JsonParser(text)._read(); + } + catch (err) { + // continue regardless of error + } + } + } + return JSON.parse(text); + } + + constructor(text) { + this._text = text; + this._position = 0; + this._ch = ' '; + this._escape = { '"': '"', '\\': '\\', '/': '/', b: '\b', f: '\f', n: '\n', r: '\r', t: '\t' }; + } + + _read() { + const result = this._value(); + this._whitespace(); + if (this._ch) { + this._error("Syntax error"); + } + return result; + } + + _next() { + return this._ch = this._text.charAt(this._position++); + } + + _expect(text) { + for (let i = 0; i < text.length; i++) { + if (text[i] !== this._ch) { + this._error("Expected '" + text[i] + "' instead of '" + this._ch + "'"); + } + this._ch = this._text.charAt(this._position++); + } + } + + _whitespace() { + while (this._ch && this._ch <= ' ') { + this._next(); + } + } + + _number() { + let value = ''; + if (this._ch === '-') { + value = '-'; + this._expect('-'); + } + if (this._ch === 'I') { + this._expect('Infinity'); + return -Infinity; + } + while (this._ch >= '0' && this._ch <= '9') { + value += this._ch; + this._next(); + } + if (this._ch === '.') { + value += '.'; + while (this._next() && this._ch >= '0' && this._ch <= '9') { + value += this._ch; + } + } + if (this._ch === 'e' || this._ch === 'E') { + value += this._ch; + this._next(); + if (this._ch === '-' || this._ch === '+') { + value += this._ch; + this._next(); + } + while (this._ch >= '0' && this._ch <= '9') { + value += this._ch; + this._next(); + } + } + return +value; + } + + _string() { + let hex; + let i; + let value = ''; + let uffff; + if (this._ch === '"') { + while (this._next()) { + if (this._ch === '"') { + this._next(); + return value; + } + if (this._ch === '\\') { + this._next(); + if (this._ch === 'u') { + uffff = 0; + for (i = 0; i < 4; i ++) { + hex = parseInt(this._next(), 16); + if (!isFinite(hex)) { + break; + } + uffff = uffff * 16 + hex; + } + value += String.fromCharCode(uffff); + } + else if (this._escape[this._ch]) { + value += this._escape[this._ch]; + } + else { + break; + } + } + else { + value += this._ch; + } + } + } + this._error("Invalid string"); + } + + _literal() { + switch (this._ch) { + case 't': this._expect('true'); return true; + case 'f': this._expect('false'); return false; + case 'n': this._expect('null'); return null; + case 'N': this._expect('NaN'); return NaN; + case 'I': this._expect('Infinity'); return Infinity; + } + this._error("Unexpected '" + this._ch + "'"); + } + + _array() { + let arr = []; + if (this._ch === '[') { + this._expect('['); + this._whitespace(); + if (this._ch === ']') { + this._expect(']'); + return arr; + } + while (this._ch) { + arr.push(this._value()); + this._whitespace(); + if (this._ch === ']') { + this._expect(']'); + return arr; + } + this._expect(','); + this._whitespace(); + } + } + this._error("Invalid array"); + } + + _object() { + let key; + let obj = {}; + if (this._ch === '{') { + this._expect('{'); + this._whitespace(); + if (this._ch === '}') { + this._expect('}'); + return obj; // empty object + } + while (this._ch) { + key = this._string(); + this._whitespace(); + this._expect(':'); + if (Object.hasOwnProperty.call(obj, key)) { + this._error('Duplicate key "' + key + '"'); + } + obj[key] = this._value(); + this._whitespace(); + if (this._ch === '}') { + this._expect('}'); + return obj; + } + this._expect(','); + this._whitespace(); + } + } + this._error("Invalid object"); + } + + _value() { + this._whitespace(); + switch (this._ch) { + case '{': return this._object(); + case '[': return this._array(); + case '"': return this._string(); + case '-': return this._number(); + default: return this._ch >= '0' && this._ch <= '9' ? this._number() : this._literal(); + } + } + + _error(message) { + throw new Error(message + ' at ' + this._position + '.'); + } +}; + +keras.Weights = class { + + constructor() { + this._map = new Map(); + } + + add(layer_name, tensor) { + if (!this._map.has(layer_name)) { + this._map.set(layer_name, []); + } + this._map.get(layer_name).push(tensor); + } + + get(group, name) { + if (group) { + const list = this._map.get(group.split('/').shift()); + if (list) { + const match1 = list.filter((tensor) => tensor.name.startsWith(name + '/')); + if (match1.length > 0) { + return match1; + } + const match2 = list.filter((tensor) => tensor.name.startsWith(group + '/' + name + '/')); + if (match2.length > 0) { + return match2; + } + } + } + else { + const match1 = this._map.get(name); + if (match1 && match1.length > 0) { + return match1; + } + const match2 = this._map.get(''); + if (match2 && match2.length > 0) { + const match3 = match2.filter((tensor) => tensor.name.startsWith((group ? group + '/' : '') + name + '/')); + if (match3.length > 0) { + return match3; + } + } + } + return []; + } + + keys() { + return this._map.keys(); + } +}; + +keras.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Keras model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = keras.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/mediapipe.js b/frontend/packages/core/public/netron/mediapipe.js new file mode 100644 index 00000000..fca6e920 --- /dev/null +++ b/frontend/packages/core/public/netron/mediapipe.js @@ -0,0 +1,359 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var mediapipe = mediapipe || {}; +var prototxt = prototxt || require('protobufjs/ext/prototxt'); + +mediapipe.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'pbtxt') { + const tags = context.tags('pbtxt'); + const text = context.text; + if (tags.has('node') && (text.indexOf('input_stream:') !== -1 || text.indexOf('input_side_packet:') !== -1 || text.indexOf('output_stream:') !== -1)) { + return true; + } + } + return false; + } + + open(context, host) { + const identifier = context.identifier; + try { + const reader = prototxt.TextReader.create(context.text); + const root = new mediapipe.Object(reader); + return Promise.resolve(new mediapipe.Model(root)); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + return Promise.reject(new mediapipe.Error(message.replace(/\.$/, '') + " in '" + identifier + "'.")); + } + } +}; + +mediapipe.Model = class { + + constructor(root) { + this._graphs = [ new mediapipe.Graph(root) ]; + } + + get format() { + return 'MediaPipe'; + } + + get graphs() { + return this._graphs; + } +}; + +mediapipe.Graph = class { + + constructor(root) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + + if (root) { + if (root.input_stream) { + const inputs = Array.isArray(root.input_stream) ? root.input_stream : [ root.input_stream ]; + for (const input of inputs) { + let parts = input.split(':'); + const type = (parts.length > 1) ? parts.shift() : ''; + const name = parts.shift(); + this._inputs.push(new mediapipe.Parameter(name, [ + new mediapipe.Argument(name, type, null) + ])); + } + } + if (root.output_stream) { + const outputs = Array.isArray(root.output_stream) ? root.output_stream : [ root.output_stream ]; + for (const output of outputs) { + let parts = output.split(':'); + const type = (parts.length > 1) ? parts.shift() : ''; + const name = parts.shift(); + this._outputs.push(new mediapipe.Parameter(name, [ + new mediapipe.Argument(name, type, null) + ])); + } + } + if (root.input_side_packet) { + const inputs = Array.isArray(root.input_side_packet) ? root.input_side_packet : [ root.input_side_packet ]; + for (const input of inputs) { + let parts = input.split(':'); + const type = (parts.length > 1) ? parts.shift() : ''; + const name = parts.shift(); + this._inputs.push(new mediapipe.Parameter(name, [ + new mediapipe.Argument(name, type, null) + ])); + } + } + if (root.output_side_packet) { + const outputs = Array.isArray(root.output_side_packet) ? root.output_side_packet : [ root.output_side_packet ]; + for (const output of outputs) { + let parts = output.split(':'); + const type = (parts.length > 1) ? parts.shift() : ''; + const name = parts.shift(); + this._outputs.push(new mediapipe.Parameter(output, [ + new mediapipe.Argument(name, type, null) + ])); + } + } + if (root.node) { + const nodes = Array.isArray(root.node) ? root.node : [ root.node ]; + for (const node of nodes) { + this._nodes.push(new mediapipe.Node(node)); + } + } + } + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +mediapipe.Node = class { + + constructor(node) { + this._type = node.calculator || '?'; + this._type = this._type.replace(/Calculator$/, ''); + this._inputs = []; + this._outputs = []; + this._attributes = []; + + if (node.input_stream) { + let args = []; + const inputs = Array.isArray(node.input_stream) ? node.input_stream : [ node.input_stream ]; + for (const input of inputs) { + let parts = input.split(':'); + const type = (parts.length > 1) ? parts.shift() : ''; + const name = parts.shift(); + args.push(new mediapipe.Argument(name, type, null)); + } + this._inputs.push(new mediapipe.Parameter('input_stream', args)); + } + if (node.output_stream) { + let args = []; + const outputs = Array.isArray(node.output_stream) ? node.output_stream : [ node.output_stream ]; + for (const output of outputs) { + let parts = output.split(':'); + const type = (parts.length > 1) ? parts.shift() : ''; + const name = parts.shift(); + args.push(new mediapipe.Argument(name, type, null)); + } + this._outputs.push(new mediapipe.Parameter('output_stream', args)); + } + if (node.input_side_packet) { + let args = []; + const inputs = Array.isArray(node.input_side_packet) ? node.input_side_packet : [ node.input_side_packet ]; + for (const input of inputs) { + let parts = input.split(':'); + const type = (parts.length > 1) ? parts.shift() : ''; + const name = parts.shift(); + args.push(new mediapipe.Argument(name, type, null)); + } + this._inputs.push(new mediapipe.Parameter('input_side_packet', args)); + } + if (node.output_side_packet) { + let args = []; + const outputs = Array.isArray(node.output_side_packet) ? node.output_side_packet : [ node.output_side_packet ]; + for (const output of outputs) { + let parts = output.split(':'); + const type = (parts.length > 1) ? parts.shift() : ''; + const name = parts.shift(); + args.push(new mediapipe.Argument(name, type, null)); + } + this._outputs.push(new mediapipe.Parameter('output_side_packet', args)); + } + let options = node.options || node.node_options || null; + if (options) { + for (const key of Object.keys(options)) { + if (key === '__type__') { + continue; + } + const value = options[key]; + this._attributes.push(new mediapipe.Attribute(key, value)); + } + } + } + + get name() { + return ''; + } + + get type() { + return this._type; + } + + get metadata() { + return null; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } +}; + +mediapipe.Attribute = class { + + constructor(name, value) { + this._name = name; + this._value = value; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return true; + } +}; + +mediapipe.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +mediapipe.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new mediapipe.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._type) { + return this._type; + } + if (this._initializer) { + return this._initializer.type; + } + return null; + } + + get initializer() { + return this._initializer; + } +}; + + +mediapipe.Object = class { + + constructor(reader) { + reader.start(); + + let close = false; + const type = reader.peek(); + if (type.startsWith('[') && type.endsWith(']')) { + this.__type__ = reader.read().substring(0, type.length - 1); + reader.match(':'); + reader.start(); + close = true; + } + let arrayTags = new Set(); + while (!reader.end()) { + var tag = reader.tag(); + var next = reader.peek(); + var obj = null; + if (next === '{') { + obj = new mediapipe.Object(reader); + } + else if (next.startsWith('"') && next.endsWith('"')) { + obj = reader.read().substring(1, next.length - 1); + } + else if (next === 'true' || next === 'false') { + obj = reader.read(); + } + else if (reader.first()) { + obj = []; + while (!reader.last()) { + const data = reader.read(); + if (!isNaN(data)) { + obj.push(parseFloat(data)); + } + } + } + else if (!isNaN(next)) { + obj = parseFloat(reader.read()); + } + else { + obj = reader.read(); + } + if (this[tag] && (!Array.isArray(this[tag]) || arrayTags.has(tag))) { + this[tag] = [ this[tag] ]; + arrayTags.delete(tag); + } + if (this[tag]) { + this[tag].push(obj); + } + else { + if (Array.isArray(obj)) { + arrayTags.add(tag); + } + this[tag] = obj; + } + } + if (close) { + reader.expect('}'); + } + } +}; + +mediapipe.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading MediaPipe model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = mediapipe.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/mlnet-metadata.json b/frontend/packages/core/public/netron/mlnet-metadata.json new file mode 100644 index 00000000..121ec202 --- /dev/null +++ b/frontend/packages/core/public/netron/mlnet-metadata.json @@ -0,0 +1,92 @@ +[ + { + "name": "ImageLoaderTransform", + "schema": { + "description": "Load images from files.", + "attributes": [ + { "name": "ImageFolder", "type": "string", "description": "Folder where to search for images" } + ] + } + }, + { + "name": "ImageScalerTransform", + "schema": { + "description": "Scales an image to specified dimensions using one of the three scale types: isotropic with padding, isotropic with cropping or anisotropic. In case of isotropic padding, transparent color is used to pad resulting image.", + "attributes": [ + { "name": "Width" }, + { "name": "Height" }, + { "name": "Resizing", "type": "ImageResizingTransformer.ResizingKind" }, + { "name": "Anchor", "type": "ImageResizingTransformer.Anchor" } + ] + } + }, + { + "name": "ImagePixelExtractor", + "schema": { + "description": "Scales an image to specified dimensions using one of the three scale types: isotropic with padding, isotropic with cropping or anisotropic. In case of isotropic padding, transparent color is used to pad resulting image.", + "attributes": [ + { "name": "ColorsToExtract", "type": "ImagePixelExtractingTransformer.ColorBits" }, + { "name": "OrderOfExtraction", "type": "ImagePixelExtractingTransformer.ColorsOrder" }, + { "name": "Planes", "type": "uint8" }, + { "name": "OutputAsFloatArray", "type": "boolean" }, + { "name": "OffsetImage", "type": "float32" }, + { "name": "ScaleImage", "type": "float32" }, + { "name": "InterleavePixelColors", "type": "boolean" } + ] + } + }, + { + "name": "TensorFlowTransform", + "schema": { + "description": "Transforms the data using the TensorFlow model.", + "attributes": [ + { "name": "IsFrozen", "type": "boolean" }, + { "name": "AddBatchDimensionInput", "type": "boolean" } + ] + } + }, + { + "name": "TextNormalizerTransform", + "schema": { + "description": "A text normalization transform that allows normalizing text case, removing diacritical marks, punctuation marks and/or numbers. The transform operates on text input as well as vector of tokens/text (vector of ReadOnlyMemory).", + "attributes": [ + { "name": "CaseMode", "type": "TextNormalizingTransformer.CaseMode" }, + { "name": "KeepDiacritics", "type": "boolean" }, + { "name": "KeepPunctuations", "type": "boolean" }, + { "name": "KeepNumbers", "type": "boolean" } + ] + } + }, + { + "name": "CharToken", + "schema": { + "description": "Character-oriented tokenizer where text is considered a sequence of characters.", + "attributes": [ + { "name": "UseMarkerChars", "type": "boolean" }, + { "name": "IsSeparatorStartEnd", "type": "boolean" } + ] + } + }, + { + "name": "ConcatTransform", + "schema": { + "category": "Tensor", + "description": "Concatenates one or more columns of the same item type." + } + }, + { + "name": "CopyTransform", + "schema": { + "category": "Tensor", + "description": "Duplicates columns from the dataset." + } + }, + { + "name": "SSAModel", + "schema": { + "attributes": [ + { "name": "UseMarkerChars", "type": "boolean" } + ] + } + } +] \ No newline at end of file diff --git a/frontend/packages/core/public/netron/mlnet.js b/frontend/packages/core/public/netron/mlnet.js new file mode 100644 index 00000000..5cc1f4da --- /dev/null +++ b/frontend/packages/core/public/netron/mlnet.js @@ -0,0 +1,2581 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental + +var mlnet = mlnet || {}; +var zip = zip || require('./zip'); + +mlnet.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'zip') { + const entries = context.entries('zip'); + if (entries.length > 0) { + const root = new Set([ 'TransformerChain', 'Predictor']); + if (entries.some((e) => root.has(e.name.split('\\').shift().split('/').shift()))) { + return true; + } + } + } + return false; + } + + open(context, host) { + const identifier = context.identifier; + return mlnet.Metadata.open(host).then((metadata) => { + try { + const reader = new mlnet.ModelReader(context.entries('zip')); + return new mlnet.Model(metadata, reader); + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new mlnet.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + } +}; + +mlnet.Model = class { + + constructor(metadata, reader) { + this._format = "ML.NET"; + if (reader.version && reader.version.length > 0) { + this._format += ' v' + reader.version; + } + this._graphs = []; + this._graphs.push(new mlnet.Graph(metadata, reader)); + } + + get format() { + return this._format; + } + + get graphs() { + return this._graphs; + } +}; + +mlnet.Graph = class { + + constructor(metadata, reader) { + + this._inputs = []; + this._outputs = []; + this._nodes = []; + this._groups = false; + + if (reader.schema && reader.schema.inputs) { + for (const input of reader.schema.inputs) { + this._inputs.push(new mlnet.Parameter(input.name, [ + new mlnet.Argument(input.name, new mlnet.TensorType(input.type)) + ])); + } + } + + let scope = new Map(); + if (reader.dataLoaderModel) { + this._loadTransformer(metadata, scope, '', reader.dataLoaderModel); + } + if (reader.predictor) { + this._loadTransformer(metadata, scope, '', reader.predictor); + } + if (reader.transformerChain) { + this._loadTransformer(metadata, scope, '', reader.transformerChain); + } + } + + _loadTransformer(metadata, scope, group, transformer) { + switch (transformer.__type__) { + case 'TransformerChain': + case 'Text': + this._loadChain(metadata, scope, transformer.__name__, transformer.chain); + break; + default: + this._createNode(metadata, scope, group, transformer); + break; + } + } + + _loadChain(metadata, scope, name, chain) { + this._groups = true; + const group = name.split('/').splice(1).join('/'); + for (const childTransformer of chain) { + this._loadTransformer(metadata, scope, group, childTransformer); + } + } + + _createNode(metadata, scope, group, transformer) { + + if (transformer.inputs && transformer.outputs) { + for (const input of transformer.inputs) { + input.name = scope[input.name] ? scope[input.name].argument : input.name; + } + for (const output of transformer.outputs) { + if (scope[output.name]) { + scope[output.name].counter++; + const next = output.name + '\n' + scope[output.name].counter.toString(); // custom argument id + scope[output.name].argument = next; + output.name = next; + } + else { + scope[output.name] = { + argument: output.name, + counter: 0 + }; + } + } + } + + this._nodes.push(new mlnet.Node(metadata, group, transformer)); + } + + get groups() { + return this._groups; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +mlnet.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +mlnet.Argument = class { + + constructor(name, type) { + if (typeof name !== 'string') { + throw new mlnet.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } +}; + +mlnet.Node = class { + + constructor(metadata, group, transformer) { + this._metadata = metadata; + this._group = group; + this._name = transformer.__name__; + this._type = transformer.__type__; + this._inputs = []; + this._outputs = []; + this._attributes = []; + + if (transformer.inputs) { + let i = 0; + for (const input of transformer.inputs) { + this._inputs.push(new mlnet.Parameter(i.toString(), [ + new mlnet.Argument(input.name) + ])); + i++; + } + } + + if (transformer.outputs) { + let i = 0; + for (const output of transformer.outputs) { + this._outputs.push(new mlnet.Parameter(i.toString(), [ + new mlnet.Argument(output.name) + ])); + i++; + } + } + + for (const key of Object.keys(transformer).filter((key) => !key.startsWith('_') && key !== 'inputs' && key !== 'outputs')) { + const schema = metadata.attribute(this._type, this._name); + this._attributes.push(new mlnet.Attribute(schema, key, transformer[key])); + } + } + + get group() { + return this._group; + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } +}; + +mlnet.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + this._value = value; + if (schema) { + if (schema.type) { + this._type = schema.type; + } + if (this._type) { + let type = mlnet; + let id = this._type.split('.'); + while (type && id.length > 0) { + type = type[id.shift()]; + } + if (type) { + mlnet.Attribute._reverseMap = mlnet.Attribute._reverseMap || {}; + let reverse = mlnet.Attribute._reverseMap[this._type]; + if (!reverse) { + reverse = {}; + for (const key of Object.keys(type)) { + reverse[type[key.toString()]] = key; + } + mlnet.Attribute._reverseMap[this._type] = reverse; + } + if (Object.prototype.hasOwnProperty.call(reverse, this._value)) { + this._value = reverse[this._value]; + } + } + } + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return true; + } +}; + +mlnet.TensorType = class { + + constructor(codec) { + + mlnet.TensorType._map = mlnet.TensorType._map || new Map([ + [ 'Byte', 'uint8' ], + [ 'Boolean', 'boolean' ], + [ 'Single', 'float32' ], + [ 'Double', 'float64' ], + [ 'UInt32', 'uint32' ], + [ 'TextSpan', 'string' ] + ]); + + this._dataType = '?'; + this._shape = new mlnet.TensorShape(null); + + if (mlnet.TensorType._map.has(codec.name)) { + this._dataType = mlnet.TensorType._map.get(codec.name); + } + else if (codec.name == 'VBuffer') { + if (mlnet.TensorType._map.has(codec.itemType.name)) { + this._dataType = mlnet.TensorType._map.get(codec.itemType.name); + } + else { + throw new mlnet.Error("Unknown data type '" + codec.itemType.name + "'."); + } + this._shape = new mlnet.TensorShape(codec.dims); + } + else if (codec.name == 'Key2') { + this._dataType = 'key2'; + } + else { + throw new mlnet.Error("Unknown data type '" + codec.name + "'."); + } + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +mlnet.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return '[' + this._dimensions.join(',') + ']'; + } +}; + +mlnet.Metadata = class { + + static open(host) { + if (mlnet.Metadata._metadata) { + return Promise.resolve(mlnet.Metadata._metadata); + } + return host.request(null, 'mlnet-metadata.json', 'utf-8').then((data) => { + mlnet.Metadata._metadata = new mlnet.Metadata(data); + return mlnet.Metadata._metadata; + }).catch(() => { + mlnet.Metadata._metadata = new mlnet.Metadata(null); + return mlnet.Metadata._metadatas; + }); + } + + constructor(data) { + this._map = {}; + this._attributeCache = {}; + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map[item.name] = item.schema; + } + } + } + } + } + + type(name) { + return this._map[name] || null; + } + + attribute(type, name) { + let map = this._attributeCache[type]; + if (!map) { + map = {}; + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[type] = map; + } + return map[name] || null; + } +}; + +mlnet.ModelReader = class { + + constructor(entries) { + + let catalog = new mlnet.ComponentCatalog(); + catalog.register('AffineNormExec', mlnet.AffineNormSerializationUtils); + catalog.register('AnomalyPredXfer', mlnet.AnomalyPredictionTransformer); + catalog.register('BinaryPredXfer', mlnet.BinaryPredictionTransformer); + catalog.register('BinaryLoader', mlnet.BinaryLoader); + catalog.register('CaliPredExec', mlnet.CalibratedPredictor); + catalog.register('CdfNormalizeFunction', mlnet.CdfColumnFunction); + catalog.register('CharToken', mlnet.TokenizingByCharactersTransformer); + catalog.register('ChooseColumnsTransform', mlnet.ColumnSelectingTransformer); + catalog.register('ClusteringPredXfer', mlnet.ClusteringPredictionTransformer); + catalog.register('ConcatTransform', mlnet.ColumnConcatenatingTransformer); + catalog.register('CopyTransform', mlnet.ColumnCopyingTransformer); + catalog.register('ConvertTransform', mlnet.TypeConvertingTransformer); + catalog.register('CSharpTransform', mlnet.CSharpTransform); + catalog.register('DropColumnsTransform', mlnet.DropColumnsTransform); + catalog.register('FAFMPredXfer', mlnet.FieldAwareFactorizationMachinePredictionTransformer); + catalog.register('FastForestBinaryExec', mlnet.FastForestClassificationPredictor); + catalog.register('FastTreeBinaryExec', mlnet.FastTreeBinaryModelParameters); + catalog.register('FastTreeTweedieExec', mlnet.FastTreeTweedieModelParameters); + catalog.register('FastTreeRankerExec', mlnet.FastTreeRankingModelParameters); + catalog.register('FastTreeRegressionExec', mlnet.FastTreeRegressionModelParameters); + catalog.register('FeatWCaliPredExec', mlnet.FeatureWeightsCalibratedModelParameters); + catalog.register('FieldAwareFactMacPredict', mlnet.FieldAwareFactorizationMachineModelParameters); + catalog.register('GcnTransform', mlnet.LpNormNormalizingTransformer); + catalog.register('GenericScoreTransform', mlnet.GenericScoreTransform); + catalog.register('IidChangePointDetector', mlnet.IidChangePointDetector); + catalog.register('IidSpikeDetector', mlnet.IidSpikeDetector); + catalog.register('ImageClassificationTrans', mlnet.ImageClassificationTransformer); + catalog.register('ImageClassificationPred', mlnet.ImageClassificationModelParameters); + catalog.register('ImageLoaderTransform', mlnet.ImageLoadingTransformer); + catalog.register('ImageScalerTransform', mlnet.ImageResizingTransformer); + catalog.register('ImagePixelExtractor', mlnet.ImagePixelExtractingTransformer); + catalog.register('KeyToValueTransform', mlnet.KeyToValueMappingTransformer); + catalog.register('KeyToVectorTransform', mlnet.KeyToVectorMappingTransformer); + catalog.register('KMeansPredictor', mlnet.KMeansModelParameters); + catalog.register('LinearRegressionExec', mlnet.LinearRegressionModelParameters); + catalog.register('LightGBMRegressionExec', mlnet.LightGbmRegressionModelParameters); + catalog.register('LightGBMBinaryExec', mlnet.LightGbmBinaryModelParameters); + catalog.register('Linear2CExec', mlnet.LinearBinaryModelParameters); + catalog.register('LinearModelStats', mlnet.LinearModelParameterStatistics); + catalog.register('MaFactPredXf', mlnet.MatrixFactorizationPredictionTransformer); + catalog.register('MFPredictor', mlnet.MatrixFactorizationModelParameters); + catalog.register('MulticlassLinear', mlnet.LinearMulticlassModelParameters); + catalog.register('MultiClassLRExec', mlnet.MaximumEntropyModelParameters); + catalog.register('MultiClassNaiveBayesPred', mlnet.NaiveBayesMulticlassModelParameters); + catalog.register('MultiClassNetPredictor', mlnet.MultiClassNetPredictor); + catalog.register('MulticlassPredXfer', mlnet.MulticlassPredictionTransformer); + catalog.register('NgramTransform', mlnet.NgramExtractingTransformer); + catalog.register('NgramHashTransform', mlnet.NgramHashingTransformer); + catalog.register('NltTokenizeTransform', mlnet.NltTokenizeTransform); + catalog.register('Normalizer', mlnet.NormalizingTransformer); + catalog.register('NormalizeTransform', mlnet.NormalizeTransform); + catalog.register('OnnxTransform', mlnet.OnnxTransformer); + catalog.register('OptColTransform', mlnet.OptionalColumnTransform); + catalog.register('OVAExec', mlnet.OneVersusAllModelParameters); + catalog.register('pcaAnomExec', mlnet.PcaModelParameters); + catalog.register('PcaTransform', mlnet.PrincipalComponentAnalysisTransformer); + catalog.register('PipeDataLoader', mlnet.CompositeDataLoader); + catalog.register('PlattCaliExec', mlnet.PlattCalibrator); + catalog.register('PMixCaliPredExec', mlnet.ParameterMixingCalibratedModelParameters); + catalog.register('PoissonRegressionExec', mlnet.PoissonRegressionModelParameters); + catalog.register('ProtonNNMCPred', mlnet.ProtonNNMCPred); + catalog.register('RegressionPredXfer', mlnet.RegressionPredictionTransformer); + catalog.register('RowToRowMapper', mlnet.RowToRowMapperTransform); + catalog.register('SsaForecasting', mlnet.SsaForecastingTransformer); + catalog.register('SSAModel', mlnet.AdaptiveSingularSpectrumSequenceModelerInternal); + catalog.register('SelectColumnsTransform', mlnet.ColumnSelectingTransformer); + catalog.register('StopWordsTransform', mlnet.StopWordsTransform); + catalog.register('TensorFlowTransform', mlnet.TensorFlowTransformer); + catalog.register('TermLookupTransform', mlnet.ValueMappingTransformer); + catalog.register('TermTransform', mlnet.ValueToKeyMappingTransformer); + catalog.register('TermManager', mlnet.TermManager); + catalog.register('Text', mlnet.TextFeaturizingEstimator); + catalog.register('TextLoader', mlnet.TextLoader); + catalog.register('TextNormalizerTransform', mlnet.TextNormalizingTransformer); + catalog.register('TokenizeTextTransform', mlnet.WordTokenizingTransformer); + catalog.register('TransformerChain', mlnet.TransformerChain); + catalog.register('ValueMappingTransformer', mlnet.ValueMappingTransformer); + catalog.register('XGBoostMulticlass', mlnet.XGBoostMulticlass); + + const root = new mlnet.ModelHeader(catalog, entries, '', null); + + const version = root.openText('TrainingInfo/Version.txt'); + if (version) { + this.version = version.split(' ').shift().split('\r').shift(); + } + + const schemaReader = root.openBinary('Schema'); + if (schemaReader) { + this.schema = new mlnet.BinaryLoader(null, schemaReader).schema; + } + + const transformerChain = root.open('TransformerChain'); + if (transformerChain) { + this.transformerChain = transformerChain; + } + + const dataLoaderModel = root.open('DataLoaderModel'); + if (dataLoaderModel) { + this.dataLoaderModel = dataLoaderModel; + } + + const predictor = root.open('Predictor'); + if (predictor) { + this.predictor = predictor; + } + } +}; + +mlnet.ComponentCatalog = class { + + constructor() { + this._map = new Map(); + } + + register(signature, type) { + this._map.set(signature, type); + } + + create(signature, context) { + if (!this._map.has(signature)) { + throw new mlnet.Error("Unknown loader signature '" + signature + "'."); + } + const type = this._map.get(signature); + return Reflect.construct(type, [ context ]); + } +}; + +mlnet.ModelHeader = class { + + constructor(catalog, entries, directory, data) { + + this._entries = entries; + this._catalog = catalog; + this._directory = directory; + + if (data) { + const reader = new mlnet.Reader(data); + + const textDecoder = new TextDecoder('ascii'); + reader.assert('ML\0MODEL'); + this.versionWritten = reader.uint32(); + this.versionReadable = reader.uint32(); + + const modelBlockOffset = reader.uint64(); + /* let modelBlockSize = */ reader.uint64(); + const stringTableOffset = reader.uint64(); + const stringTableSize = reader.uint64(); + const stringCharsOffset = reader.uint64(); + /* v stringCharsSize = */ reader.uint64(); + this.modelSignature = textDecoder.decode(reader.bytes(8)); + this.modelVersionWritten = reader.uint32(); + this.modelVersionReadable = reader.uint32(); + this.loaderSignature = textDecoder.decode(reader.bytes(24).filter((c) => c != 0)); + this.loaderSignatureAlt = textDecoder.decode(reader.bytes(24).filter((c) => c != 0)); + const tailOffset = reader.uint64(); + /* let tailLimit = */ reader.uint64(); + const assemblyNameOffset = reader.uint64(); + const assemblyNameSize = reader.uint32(); + if (stringTableOffset != 0 && stringCharsOffset != 0) { + reader.position = stringTableOffset; + const stringCount = stringTableSize >> 3; + const stringSizes = []; + let previousStringSize = 0; + for (let i = 0; i < stringCount; i++) { + const stringSize = reader.uint64(); + stringSizes.push(stringSize - previousStringSize); + previousStringSize = stringSize; + } + reader.position = stringCharsOffset; + this.strings = []; + for (let i = 0; i < stringCount; i++) { + const cch = stringSizes[i] >> 1; + let sb = ''; + for (let ich = 0; ich < cch; ich++) { + sb += String.fromCharCode(reader.uint16()); + } + this.strings.push(sb); + } + } + if (assemblyNameOffset != 0) { + reader.position = assemblyNameOffset; + this.assemblyName = textDecoder.decode(reader.bytes(assemblyNameSize)); + } + reader.position = tailOffset; + reader.assert('LEDOM\0LM'); + + this._reader = reader; + this._reader.position = modelBlockOffset; + } + } + + get reader() { + return this._reader; + } + + string(empty) { + const id = this.reader.int32(); + if (empty === null && id < 0) { + return null; + } + return this.strings[id]; + } + + open(name) { + const dir = this._directory.length > 0 ? this._directory + '/' : this._directory; + name = dir + name; + const entryName = name + '/Model.key'; + const entry = this._entries.find((entry) => entry.name == entryName || entry.name == entryName.replace(/\//g, '\\')); + if (entry) { + const context = new mlnet.ModelHeader(this._catalog, this._entries, name, entry.data); + let value = this._catalog.create(context.loaderSignature, context); + value.__type__ = value.__type__ || context.loaderSignature; + value.__name__ = name; + return value; + } + return null; + } + + openBinary(name) { + const dir = this._directory.length > 0 ? this._directory + '/' : this._directory; + name = dir + name; + const entry = this._entries.find((entry) => entry.name == name || entry.name == name.replace(/\//g, '\\')); + return entry ? new mlnet.Reader(entry.data) : null; + } + + openText(name) { + const dir = this._directory.length > 0 ? this._directory + '/' : this._directory; + name = dir + name; + const entry = this._entries.find((entry) => entry.name.split('\\').join('/') == name); + if (entry) { + return new TextDecoder().decode(entry.data); + } + return null; + } + + check(signature, verWrittenCur, verWeCanReadBack) { + return signature === this.modelSignature && verWrittenCur >= this.modelVersionReadable && verWeCanReadBack <= this.modelVersionWritten; + } +}; + +mlnet.Reader = class { + + constructor(buffer) { + this._buffer = buffer; + this._dataView = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._position = 0; + } + + set position(value) { + this._position = value; + } + + get position() { + return this._position; + } + + skip(offset) { + this._position += offset; + if (this._position > this._buffer.length) { + throw new mlnet.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + match(text) { + const position = this._position; + for (let i = 0; i < text.length; i++) { + if (this.byte() != text.charCodeAt(i)) { + this._position = position; + return false; + } + } + return true; + } + + assert(text) { + if (!this.match(text)) { + throw new mlnet.Error("Invalid '" + text.split('\0').join('') + "' signature."); + } + } + + boolean() { + return this.byte() != 0 ? true : false; + } + + booleans(count) { + let values = []; + for (let i = 0; i < count; i++) { + values.push(this.boolean()); + } + return values; + } + + byte() { + const position = this._position; + this.skip(1); + return this._dataView.getUint8(position); + } + + bytes(length) { + const position = this._position; + this.skip(length); + return this._buffer.subarray(position, this._position); + } + + int16() { + const position = this._position; + this.skip(2); + return this._dataView.getInt16(position, true); + } + + uint16() { + const position = this._position; + this.skip(2); + return this._dataView.getUint16(position, true); + } + + int32() { + const position = this._position; + this.skip(4); + return this._dataView.getInt32(position, true); + } + + int32s(count) { + let values = []; + for (let i = 0; i < count; i++) { + values.push(this.int32()); + } + return values; + } + + uint32() { + const position = this._position; + this.skip(4); + return this._dataView.getUint32(position, true); + } + + uint32s(count) { + let values = []; + for (let i = 0; i < count; i++) { + values.push(this.uint32()); + } + return values; + } + + int64() { + const low = this.uint32(); + const hi = this.uint32(); + if (low == 0xffffffff && hi == 0x7fffffff) { + return Number.MAX_SAFE_INTEGER; + } + if (hi == -1) { + return -low; + } + if (hi != 0) { + throw new mlnet.Error('Value not in 48-bit range.'); + } + return (hi << 32) | low; + } + + uint64() { + const low = this.uint32(); + const hi = this.uint32(); + if (hi == 0) { + return low; + } + if (hi > 1048576) { + throw new mlnet.Error('Value not in 48-bit range.'); + } + return (hi * 4294967296) + low; + } + + float32() { + const position = this._position; + this.skip(4); + return this._dataView.getFloat32(position, true); + } + + float32s(count) { + let values = []; + for (let i = 0; i < count; i++) { + values.push(this.float32()); + } + return values; + } + + float64() { + const position = this._position; + this.skip(8); + return this._dataView.getFloat64(position, true); + } + + float64s(count) { + let values = []; + for (let i = 0; i < count; i++) { + values.push(this.float64()); + } + return values; + } + + string() { + const size = this.leb128(); + const buffer = this.bytes(size); + return new TextDecoder('utf-8').decode(buffer); + } + + leb128() { + let result = 0; + let shift = 0; + let value; + do { + value = this.byte(); + result |= (value & 0x7F) << shift; + shift += 7; + } while ((value & 0x80) != 0); + return result; + } +}; + +mlnet.BinaryLoader = class { // 'BINLOADR' + + constructor(context, reader) { + if (context) { + if (context.modelVersionWritten >= 0x00010002) { + this.Threads = context.reader.int32(); + this.GeneratedRowIndexName = context.string(null); + } + this.ShuffleBlocks = context.modelVersionWritten >= 0x00010003 ? context.reader.float64() : 4; + reader = context.openBinary('Schema.idv'); + } + // https://github.com/dotnet/machinelearning/blob/master/docs/code/IdvFileFormat.md + reader.assert('CML\0DVB\0'); + reader.bytes(8); // version + reader.bytes(8); // compatibleVersion + const tableOfContentsOffset = reader.uint64(); + const tailOffset = reader.int64(); + reader.int64(); // rowCount + const columnCount = reader.int32(); + reader.position = tailOffset; + reader.assert('\0BVD\0LMC'); + reader.position = tableOfContentsOffset; + this.schema = {}; + this.schema.inputs = []; + for (let c = 0; c < columnCount; c ++) { + let input = {}; + input.name = reader.string(); + input.type = new mlnet.Codec(reader); + input.compression = reader.byte(); // None = 0, Deflate = 1 + input.rowsPerBlock = reader.leb128(); + input.lookupOffset = reader.int64(); + input.metadataTocOffset = reader.int64(); + this.schema.inputs.push(input); + } + } +}; + +mlnet.TransformerChain = class { + + constructor(context) { + const reader = context.reader; + const length = reader.int32(); + this.scopes = []; + this.chain = []; + for (let i = 0; i < length; i++) { + this.scopes.push(reader.int32()); // 0x01 = Training, 0x02 = Testing, 0x04 = Scoring + const dirName = 'Transform_' + ('00' + i).slice(-3); + const transformer = context.open(dirName); + this.chain.push(transformer); + } + } +}; + +mlnet.TransformBase = class { + + constructor(/* context */) { + + } +}; + +mlnet.RowToRowTransformBase = class extends mlnet.TransformBase { + + constructor(context) { + super(context); + } +}; + +mlnet.RowToRowTransformerBase = class { + + constructor(/* context */) { + } +}; + +mlnet.RowToRowMapperTransformBase = class extends mlnet.RowToRowTransformBase { + + constructor(context) { + super(context); + } +}; + +mlnet.OneToOneTransformerBase = class { + + constructor(context) { + const reader = context.reader; + const n = reader.int32(); + this.inputs = []; + this.outputs = []; + for (let i = 0; i < n; i++) { + const output = context.string(); + const input = context.string(); + this.outputs.push({ name: output }); + this.inputs.push({ name: input }); + } + } +}; + +mlnet.ColumnCopyingTransformer = class { + + constructor(context) { + const reader = context.reader; + const length = reader.uint32(); + this.inputs = []; + this.outputs = []; + for (let i = 0; i < length; i++) { + this.outputs.push({ name: context.string() }); + this.inputs.push({ name: context.string() }); + } + } +}; + +mlnet.ColumnConcatenatingTransformer = class { + + constructor(context) { + const reader = context.reader; + if (context.modelVersionReadable >= 0x00010003) { + const count = reader.int32(); + for (let i = 0; i < count; i++) { + this.outputs = []; + this.outputs.push({ name: context.string() }); + const n = reader.int32(); + this.inputs = []; + for (let j = 0; j < n; j++) { + let input = { + name: context.string() + }; + const alias = context.string(null); + if (alias) { + input.alias = alias; + } + this.inputs.push(input); + } + } + } + else { + this.precision = reader.int32(); + const n = reader.int32(); + let names = []; + let inputs = []; + for (let i = 0; i < n; i++) { + names.push(context.string()); + const numSources = reader.int32(); + let input = []; + for (let j = 0; j < numSources; j++) { + input.push(context.string()); + } + inputs.push(input); + } + let aliases = []; + if (context.modelVersionReadable >= 0x00010002) { + for (let i = 0; i < n; i++) { + /* let length = */ inputs[i].length; + let alias = {}; + aliases.push(alias); + if (context.modelVersionReadable >= 0x00010002) { + for (;;) { + const j = reader.int32(); + if (j == -1) { + break; + } + alias[j] = context.string(); + } + } + } + } + + if (n > 1) { + throw new mlnet.Error(''); + } + + this.outputs = []; + for (let i = 0; i < n; i++) { + this.outputs.push({ + name: names[i] + }); + this.inputs = inputs[i]; + } + } + } +}; + +mlnet.PredictionTransformerBase = class { + + constructor(context) { + this.Model = context.open('Model'); + const trainSchemaReader = context.openBinary('TrainSchema'); + if (trainSchemaReader) { + new mlnet.BinaryLoader(null, trainSchemaReader).schema; + } + } +}; + +mlnet.MatrixFactorizationModelParameters = class { + + constructor(context) { + const reader = context.reader; + this.NumberOfRows = reader.int32(); + if (context.modelVersionWritten < 0x00010002) { + reader.uint64(); // mMin + } + this.NumberOfColumns = reader.int32(); + if (context.modelVersionWritten < 0x00010002) { + reader.uint64(); // nMin + } + this.ApproximationRank = reader.int32(); + + this._leftFactorMatrix = reader.float32s(this.NumberOfRows * this.ApproximationRank); + this._rightFactorMatrix = reader.float32s(this.NumberOfColumns * this.ApproximationRank); + } +}; + +mlnet.MatrixFactorizationPredictionTransformer = class extends mlnet.PredictionTransformerBase { + + constructor(context) { + super(context); + this.MatrixColumnIndexColumnName = context.string(); + this.MatrixRowIndexColumnName = context.string(); + // TODO + } +}; + +mlnet.FieldAwareFactorizationMachinePredictionTransformer = class extends mlnet.PredictionTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.inputs = []; + for (let i = 0; i < this.FieldCount; i++) { + this.inputs.push({ name: context.string() }); + } + this.Threshold = reader.float32(); + this.ThresholdColumn = context.string(); + this.inputs.push({ name: this.ThresholdColumn }); + } +}; + +mlnet.SingleFeaturePredictionTransformerBase = class extends mlnet.PredictionTransformerBase { + + constructor(context) { + super(context); + const featureColumn = context.string(null); + this.inputs = []; + this.inputs.push({ name: featureColumn }); + this.outputs = []; + this.outputs.push({ name: featureColumn }); + } +}; + +mlnet.ClusteringPredictionTransformer = class extends mlnet.SingleFeaturePredictionTransformerBase { + + constructor(context) { + super(context); + } +}; + +mlnet.AnomalyPredictionTransformer = class extends mlnet.SingleFeaturePredictionTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Threshold = reader.float32(); + this.ThresholdColumn = context.string(); + } +}; + +mlnet.AffineNormSerializationUtils = class { + + constructor(context) { + const reader = context.reader; + /* cbFloat = */ reader.int32(); + this.NumFeatures = reader.int32(); + const morphCount = reader.int32(); + if (morphCount == -1) { + this.ScalesSparse = reader.float32s(reader.int32()); + this.OffsetsSparse = reader.float32s(reader.int32()); + } + else { + // debugger; + } + } +}; + +mlnet.RegressionPredictionTransformer = class extends mlnet.SingleFeaturePredictionTransformerBase { + + constructor(context) { + super(context); + } +}; + +mlnet.BinaryPredictionTransformer = class extends mlnet.SingleFeaturePredictionTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Threshold = reader.float32(); + this.ThresholdColumn = context.string(); + } +}; + +mlnet.MulticlassPredictionTransformer = class extends mlnet.SingleFeaturePredictionTransformerBase { + + constructor(context) { + super(context); + this.TrainLabelColumn = context.string(null); + this.inputs.push({ name: this.TrainLabelColumn }); + } +}; + +mlnet.PredictorBase = class { + + constructor(context) { + const reader = context.reader; + if (reader.int32() != 4) { + throw new mlnet.Error('Invalid float type size.'); + } + } +}; + +mlnet.ModelParametersBase = class { + + constructor(context) { + const reader = context.reader; + const cbFloat = reader.int32(); + if (cbFloat !== 4) { + throw new mlnet.Error('This file was saved by an incompatible version.'); + } + } +}; + +mlnet.ImageClassificationModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.classCount = reader.int32(); + this.imagePreprocessorTensorInput = reader.string(); + this.imagePreprocessorTensorOutput = reader.string(); + this.graphInputTensor = reader.string(); + this.graphOutputTensor = reader.string(); + this.modelFile = 'TFModel'; + // const modelBytes = context.openBinary('TFModel'); + // first uint32 is size of TensorFlow model + // inputType = new VectorDataViewType(uint8); + // outputType = new VectorDataViewType(float32, classCount); + } +}; + +mlnet.NaiveBayesMulticlassModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this._labelHistogram = reader.int32s(reader.int32()); + this._featureCount = reader.int32(); + this._featureHistogram = []; + for (let i = 0; i < this._labelHistogram.length; i++) { + if (this._labelHistogram[i] > 0) { + this._featureHistogram.push(reader.int32s(this._featureCount)); + } + } + this._absentFeaturesLogProb = reader.float64s(this._labelHistogram.length); + } +}; + +mlnet.LinearModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Bias = reader.float32(); + /* let len = */ reader.int32(); + this.Indices = reader.int32s(reader.int32()); + this.Weights = reader.float32s(reader.int32()); + } +}; + +mlnet.LinearBinaryModelParameters = class extends mlnet.LinearModelParameters { + + constructor(context) { + super(context); + if (context.modelVersionWritten > 0x00020001) { + this.Statistics = context.open('ModelStats'); + } + } +}; + +mlnet.ModelStatisticsBase = class { + + constructor(context) { + const reader = context.reader; + this.ParametersCount = reader.int32(); + this.TrainingExampleCount = reader.int64(); + this.Deviance = reader.float32(); + this.NullDeviance = reader.float32(); + + } +}; + +mlnet.LinearModelParameterStatistics = class extends mlnet.ModelStatisticsBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (context.modelVersionWritten < 0x00010002) { + if (!reader.boolean()) { + return; + } + } + const stdErrorValues = reader.float32s(this.ParametersCount); + const length = reader.int32(); + if (length == this.ParametersCount) { + this._coeffStdError = stdErrorValues; + } + else { + this.stdErrorIndices = reader.int32s(this.ParametersCount); + this._coeffStdError = stdErrorValues; + } + this._bias = reader.float32(); + const isWeightsDense = reader.byte(); + const weightsLength = reader.int32(); + const weightsValues = reader.float32s(weightsLength); + + if (isWeightsDense) { + this._weights = weightsValues; + } + else { + this.weightsIndices = reader.int32s(weightsLength); + } + } +}; + +mlnet.LinearMulticlassModelParametersBase = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + const numberOfFeatures = reader.int32(); + const numberOfClasses = reader.int32(); + this.Biases = reader.float32s(numberOfClasses); + const numStarts = reader.int32(); + if (numStarts == 0) { + /* let numIndices = */ reader.int32(); + /* let numWeights = */ reader.int32(); + this.Weights = []; + for (let i = 0; i < numberOfClasses; i++) { + const w = reader.float32s(numberOfFeatures); + this.Weights.push(w); + } + } + else { + + const starts = reader.int32s(reader.int32()); + /* let numIndices = */ reader.int32(); + let indices = []; + for (let i = 0; i < numberOfClasses; i++) { + indices.push(reader.int32s(starts[i + 1] - starts[i])); + } + /* let numValues = */ reader.int32(); + this.Weights = []; + for (let i = 0; i < numberOfClasses; i++) { + const values = reader.float32s(starts[i + 1] - starts[i]); + this.Weights.push(values); + } + } + + const labelNamesReader = context.openBinary('LabelNames'); + if (labelNamesReader) { + this.LabelNames = []; + for (let i = 0; i < numberOfClasses; i++) { + const id = labelNamesReader.int32(); + this.LabelNames.push(context.strings[id]); + } + } + + const statistics = context.open('ModelStats'); + if (statistics) { + this.Statistics = statistics; + } + } +}; + +mlnet.LinearMulticlassModelParameters = class extends mlnet.LinearMulticlassModelParametersBase { + + constructor(context) { + super(context); + } +}; + +mlnet.RegressionModelParameters = class extends mlnet.LinearModelParameters { + + constructor(context) { + super(context); + } +}; + +mlnet.PoissonRegressionModelParameters = class extends mlnet.RegressionModelParameters { + + constructor(context) { + super(context); + } +}; + +mlnet.LinearRegressionModelParameters = class extends mlnet.RegressionModelParameters { + + constructor(context) { + super(context); + } +}; + +mlnet.MaximumEntropyModelParameters = class extends mlnet.LinearMulticlassModelParametersBase { + + constructor(context) { + super(context); + } +}; + +mlnet.TokenizingByCharactersTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.UseMarkerChars = reader.boolean(); + this.IsSeparatorStartEnd = context.modelVersionReadable < 0x00010002 ? true : reader.boolean(); + } +}; + +mlnet.SequencePool = class { + + constructor(reader) { + this.idLim = reader.int32(); + this.start = reader.int32s(this.idLim + 1); + this.bytes = reader.bytes(this.start[this.idLim]); + } +}; + +mlnet.NgramExtractingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (this.inputs.length == 1) { + this._option(context, reader, this); + } + else { + // debugger; + } + } + + _option(context, reader, option) { + const readWeighting = context.modelVersionReadable >= 0x00010002; + option.NgramLength = reader.int32(); + option.SkipLength = reader.int32(); + if (readWeighting) { + option.Weighting = reader.int32(); + } + option.NonEmptyLevels = reader.booleans(option.NgramLength); + option.NgramMap = new mlnet.SequencePool(reader); + if (readWeighting) { + option.InvDocFreqs = reader.float64s(reader.int32()); + } + } +}; + +// mlnet.NgramExtractingTransformer.WeightingCriteria + +mlnet.NgramHashingTransformer = class extends mlnet.RowToRowTransformerBase { + + constructor(context) { + super(context); + const loadLegacy = context.modelVersionWritten < 0x00010003; + const reader = context.reader; + if (loadLegacy) { + reader.int32(); // cbFloat + } + this.inputs = []; + this.outputs = []; + const columnsLength = reader.int32(); + if (loadLegacy) { + /* TODO + for (let i = 0; i < columnsLength; i++) { + this.Columns.push(new NgramHashingEstimator.ColumnOptions(context)); + } */ + } + else { + for (let i = 0; i < columnsLength; i++) { + this.outputs.push(context.string()); + let csrc = reader.int32(); + for (let j = 0; j < csrc; j++) { + let src = context.string(); + this.inputs.push(src); + // TODO inputs[i][j] = src; + } + } + } + } +}; + +mlnet.WordTokenizingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (this.inputs.length == 1) { + this.Separators = []; + const count = reader.int32(); + for (let i = 0; i < count; i++) { + this.Separators.push(String.fromCharCode(reader.int16())); + } + } + else { + // debugger; + } + } +}; + +mlnet.TextNormalizingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.CaseMode = reader.byte(); + this.KeepDiacritics = reader.boolean(); + this.KeepPunctuations = reader.boolean(); + this.KeepNumbers = reader.boolean(); + } +}; + +mlnet.TextNormalizingTransformer.CaseMode = { + Lower: 0, + Upper: 1, + None: 2 +}; + +mlnet.PrincipalComponentAnalysisTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (context.modelVersionReadable === 0x00010001) { + if (reader.int32() !== 4) { + throw new mlnet.Error('This file was saved by an incompatible version.'); + } + } + this.TransformInfos = []; + for (let i = 0; i < this.inputs.length; i++) { + let option = {}; + option.Dimension = reader.int32(); + option.Rank = reader.int32(); + option.Eigenvectors = []; + for (let j = 0; j < option.Rank; j++) { + option.Eigenvectors.push(reader.float32s(option.Dimension)); + } + option.MeanProjected = reader.float32s(reader.int32()); + this.TransformInfos.push(option); + } + } +}; + +mlnet.LpNormNormalizingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + + if (context.modelVersionWritten <= 0x00010002) { + /* cbFloat */ reader.int32(); + } + // let normKindSerialized = context.modelVersionWritten >= 0x00010002; + if (this.inputs.length == 1) { + this.EnsureZeroMean = reader.boolean(); + this.Norm = reader.byte(); + this.Scale = reader.float32(); + } + else { + // debugger; + } + } +}; + +mlnet.KeyToVectorMappingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (context.modelVersionWritten == 0x00010001) { + /* cbFloat = */ reader.int32(); + } + const columnsLength = this.inputs.length; + this.Bags = reader.booleans(columnsLength); + } +}; + +mlnet.TypeConvertingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + // debugger; + } +}; + +mlnet.ImageLoadingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + this.ImageFolder = context.string(null); + } +}; + +mlnet.ImageResizingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (this.inputs.length == 1) { + this._option(reader, this); + } + else { + this.Options = []; + for (let i = 0; i < this.inputs.length; i++) { + let option = {}; + this._option(reader, option); + this.Options.push(option); + } + } + } + + _option(reader, option) { + option.Width = reader.int32(); + option.Height = reader.int32(); + option.Resizing = reader.byte(); + option.Anchor = reader.byte(); + } +}; + +mlnet.ImageResizingTransformer.ResizingKind = { + IsoPad: 0, + IsoCrop: 1, + Fill: 2 +}; + +mlnet.ImageResizingTransformer.Anchor = { + Right: 0, + Left: 1, + Top: 2, + Bottom: 3, + Center: 4 +}; + +mlnet.ImagePixelExtractingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (this.inputs.length == 1) { + this._option(context, reader, this); + } + else { + this.Options = []; + for (let i = 0; i < this.inputs.length; i++) { + let option = {}; + this._option(context, reader, option); + this.Options.push(option); + } + } + } + + _option(context, reader, option) { + option.ColorsToExtract = reader.byte(); + option.OrderOfExtraction = context.modelVersionWritten <= 0x00010002 ? mlnet.ImagePixelExtractingTransformer.ColorsOrder.ARGB : reader.byte(); + let planes = option.ColorsToExtract; + planes = (planes & 0x05) + ((planes >> 1) & 0x05); + planes = (planes & 0x03) + ((planes >> 2) & 0x03); + option.Planes = planes & 0xFF; + option.OutputAsFloatArray = reader.boolean(); + option.OffsetImage = reader.float32(); + option.ScaleImage = reader.float32(); + option.InterleavePixelColors = reader.boolean(); + } +}; + +mlnet.ImagePixelExtractingTransformer.ColorBits = { + Alpha: 0x01, + Red: 0x02, + Green: 0x04, + Blue: 0x08, + Rgb: 0x0E, + All: 0x0F +}; + +mlnet.ImagePixelExtractingTransformer.ColorsOrder = { + ARGB: 1, + ARBG: 2, + ABRG: 3, + ABGR: 4, + AGRB: 5, + AGBR: 6 +}; + +mlnet.NormalizingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Options = []; + for (let i = 0; i < this.inputs.length; i++) { + let isVector = false; + let shape = 0; + let itemKind = ''; + if (context.modelVersionWritten < 0x00010002) { + isVector = reader.boolean(); + shape = [ reader.int32() ]; + itemKind = reader.byte(); + } + else { + isVector = reader.boolean(); + itemKind = reader.byte(); + shape = reader.int32s(reader.int32()); + } + let itemType = ''; + switch (itemKind) { + case 9: itemType = 'float32'; break; + case 10: itemType = 'float64'; break; + default: throw new mlnet.Error("Unknown NormalizingTransformer item kind '" + itemKind + "'."); + } + const type = itemType + (!isVector ? '' : '[' + shape.map((dim) => dim.toString()).join(',') + ']'); + const name = 'Normalizer_' + ('00' + i).slice(-3); + const func = context.open(name); + this.Options.push({ type: type, func: func }); + } + } +}; + +mlnet.KeyToValueMappingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + } +}; + +mlnet.ValueToKeyMappingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + if (context.modelVersionWritten >= 0x00010003) { + this.textMetadata = reader.booleans(this.outputs.length + this.inputs.length); + } + else { + this.textMetadata = []; + for (let i = 0; i < this.columnPairs.length; i++) { + this.textMetadata.push(false); + } + } + const vocabulary = context.open('Vocabulary'); + if (vocabulary) { + this.termMap = vocabulary.termMap; + } + } +}; + +mlnet.TermMap = class { + + constructor(context) { + const reader = context.reader; + const mtype = reader.byte(); + switch (mtype) { + case 0: { // Text + this.values = []; + const cstr = reader.int32(); + for (let i = 0; i < cstr; i++) { + this.values.push(context.string()); + } + break; + } + case 1: { // Codec + const codec = new mlnet.Codec(reader); + const count = reader.int32(); + this.values = codec.read(reader, count); + break; + } + default: + throw new mlnet.Error("Unknown term map type '" + mtype.toString() + "'."); + } + } +}; + +mlnet.TermManager = class { + + constructor(context) { + const reader = context.reader; + const cmap = reader.int32(); + this.termMap = []; + if (context.modelVersionWritten >= 0x00010002) { + for (let i = 0; i < cmap; ++i) { + this.termMap.push(new mlnet.TermMap(context)); + // debugger; + // termMap[i] = TermMap.Load(c, host, CodecFactory); + } + } + else { + throw new mlnet.Error('Unsupported TermManager version.'); + // for (let i = 0; i < cmap; ++i) { + // debugger; + // // termMap[i] = TermMap.TextImpl.Create(c, host) + // } + } + } +}; + + +mlnet.ValueMappingTransformer = class extends mlnet.OneToOneTransformerBase { + + constructor(context) { + super(context); + this.keyColumnName = 'Key'; + if (context.check('TXTLOOKT', 0x00010002, 0x00010002)) { + this.keyColumnName = 'Term'; + } + // TODO + } +}; + +mlnet.KeyToVectorTransform = class { + + constructor(/* context */) { + } +}; + +mlnet.GenericScoreTransform = class { + + constructor(/* context */) { + } +}; + +mlnet.CompositeDataLoader = class { + + constructor(context) { + /* let loader = */ context.open('Loader'); + const reader = context.reader; + // LoadTransforms + reader.int32(); // floatSize + const cxf = reader.int32(); + const tagData = []; + for (let i = 0; i < cxf; i++) { + let tag = ''; + let args = null; + if (context.modelVersionReadable >= 0x00010002) { + tag = context.string(); + args = context.string(null); + } + tagData.push([ tag, args ]); + } + this.chain = []; + for (let j = 0; j < cxf; j++) { + const name = 'Transform_' + ('00' + j).slice(-3); + const transform = context.open(name); + this.chain.push(transform); + } + } +}; + +mlnet.RowToRowMapperTransform = class extends mlnet.RowToRowTransformBase { + + constructor(context) { + super(context); + const mapper = context.open('Mapper'); + this.__type__ = mapper.__type__; + for (const key of Object.keys(mapper)) { + this[key] = mapper[key]; + } + } +}; + +mlnet.ImageClassificationTransformer = class extends mlnet.RowToRowTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.addBatchDimensionInput = reader.boolean(); + const numInputs = reader.int32(); + this.inputs = []; + for (let i = 0; i < numInputs; i++) { + this.inputs.push({ name: context.string() }); + } + this.outputs = []; + const numOutputs = reader.int32(); + for (let i = 0; i < numOutputs; i++) { + this.outputs.push({ name: context.string() }); + } + this.labelColumn = reader.string(); + this.checkpointName = reader.string(); + this.arch = reader.int32(); // Architecture + this.scoreColumnName = reader.string(); + this.predictedColumnName = reader.string(); + this.learningRate = reader.float32(); + this.classCount = reader.int32(); + this.keyValueAnnotations = []; + for (let i = 0; i < this.classCount; i++) { + this.keyValueAnnotations.push(context.string()); + } + this.predictionTensorName = reader.string(); + this.softMaxTensorName = reader.string(); + this.jpegDataTensorName = reader.string(); + this.resizeTensorName = reader.string(); + } +}; + +mlnet.OnnxTransformer = class extends mlnet.RowToRowTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.modelFile = 'OnnxModel'; + // const modelBytes = context.openBinary('OnnxModel'); + // first uint32 is size of .onnx model + const numInputs = context.modelVersionWritten > 0x00010001 ? reader.int32() : 1; + this.inputs = []; + for (let i = 0; i < numInputs; i++) { + this.inputs.push({ name: context.string() }); + } + const numOutputs = context.modelVersionWritten > 0x00010001 ? reader.int32() : 1; + this.outputs = []; + for (let i = 0; i < numOutputs; i++) { + this.outputs.push({ name: context.string() }); + } + if (context.modelVersionWritten > 0x0001000C) { + const customShapeInfosLength = reader.int32(); + this.LoadedCustomShapeInfos = []; + for (let i = 0; i < customShapeInfosLength; i++) { + this.LoadedCustomShapeInfos.push({ + name: context.string(), + shape: reader.int32s(reader.int32()) + }); + } + } + } +}; + +mlnet.OptionalColumnTransform = class extends mlnet.RowToRowMapperTransformBase { + + constructor(context) { + super(context); + } +}; + +mlnet.TensorFlowTransformer = class extends mlnet.RowToRowTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.IsFrozen = context.modelVersionReadable >= 0x00010002 ? reader.boolean() : true; + this.AddBatchDimensionInput = context.modelVersionReadable >= 0x00010003 ? reader.boolean() : true; + const numInputs = reader.int32(); + this.inputs = []; + for (let i = 0; i < numInputs; i++) { + this.inputs.push({ name: context.string() }); + } + const numOutputs = context.modelVersionReadable >= 0x00010002 ? reader.int32() : 1; + this.outputs = []; + for (let i = 0; i < numOutputs; i++) { + this.outputs.push({ name: context.string() }); + } + } +}; + +mlnet.OneVersusAllModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.UseDist = reader.boolean(); + const len = reader.int32(); + this.chain = []; + for (let i = 0; i < len; i++) { + const name = 'SubPredictor_' + ('00' + i).slice(-3); + const predictor = context.open(name); + this.chain.push(predictor); + } + } +}; + +mlnet.TextFeaturizingEstimator = class { + + constructor(context) { + + if (context.modelVersionReadable === 0x00010001) { + const reader = context.reader; + const n = reader.int32(); + this.chain = []; + /* let loader = */ context.open('Loader'); + for (let i = 0; i < n; i++) { + const name = 'Step_' + ('00' + i).slice(-3); + const transformer = context.open(name); + this.chain.push(transformer); + // debugger; + } + + // throw new mlnet.Error('Unsupported TextFeaturizingEstimator format.'); + } + else { + let chain = context.open('Chain'); + this.chain = chain.chain; + } + } +}; + +mlnet.TextLoader = class { + + constructor(context) { + const reader = context.reader; + reader.int32(); // floatSize + this.MaxRows = reader.int64(); + this.Flags = reader.uint32(); + this.InputSize = reader.int32(); + const separatorCount = reader.int32(); + this.Separators = []; + for (let i = 0; i < separatorCount; i++) { + this.Separators.push(String.fromCharCode(reader.uint16())); + } + this.Bindinds = new mlnet.TextLoader.Bindinds(context); + } +}; + +mlnet.TextLoader.Bindinds = class { + + constructor(context) { + let reader = context.reader; + let cinfo = reader.int32(); + for (let i = 0; i < cinfo; i++) { + // debugger; + } + } +}; + +mlnet.CalibratedPredictorBase = class { + + constructor(predictor, calibrator) { + this.SubPredictor = predictor; + this.Calibrator = calibrator; + } +}; + +mlnet.ValueMapperCalibratedPredictorBase = class extends mlnet.CalibratedPredictorBase { + + constructor(predictor, calibrator) { + super(predictor, calibrator); + } +}; + +mlnet.CalibratedModelParametersBase = class { + + constructor(context) { + this.Predictor = context.open('Predictor'); + this.Calibrator = context.open('Calibrator'); + } +}; + +mlnet.ValueMapperCalibratedModelParametersBase = class extends mlnet.CalibratedModelParametersBase { + + constructor(context) { + super(context); + // debugger; + } +}; + +mlnet.CalibratedPredictor = class extends mlnet.ValueMapperCalibratedPredictorBase { + + constructor(context) { + let predictor = context.open('Predictor'); + let calibrator = context.open('Calibrator'); + super(predictor, calibrator); + } +}; + +mlnet.ParameterMixingCalibratedModelParameters = class extends mlnet.ValueMapperCalibratedModelParametersBase { + + constructor(context) { + super(context); + } +}; + +mlnet.FieldAwareFactorizationMachineModelParameters = class { + + constructor(context) { + let reader = context.reader; + this.Norm = reader.boolean(); + this.FieldCount = reader.int32(); + this.FeatureCount = reader.int32(); + this.LatentDim = reader.int32(); + this.LinearWeights = reader.float32s(reader.int32()); + this.LatentWeights = reader.float32s(reader.int32()); + } +}; + +mlnet.KMeansModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.k = reader.int32(); + this.Dimensionality = reader.int32(); + this.Centroids = []; + for (let i = 0; i < this.k; i++) { + const count = context.modelVersionWritten >= 0x00010002 ? reader.int32() : this.Dimensionality; + const indices = count < this.Dimensionality ? reader.int32s(count) : null; + const values = reader.float32s(count); + this.Centroids.push({ indices: indices, values: values }); + } + // input type = float32[dimensionality] + // output type = float32[k] + } +}; + +mlnet.PcaModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Dimension = reader.int32(); + this.Rank = reader.int32(); + const center = reader.boolean(); + if (center) { + this.Mean = reader.float32s(this.Dimension); + } + else { + this.Mean = []; + } + this.EigenVectors = []; + for (let i = 0; i < this.Rank; ++i) { + this.EigenVectors.push(reader.float32s(this.Dimension)); + } + // input type -> float32[Dimension] + } +}; + +mlnet.TreeEnsembleModelParameters = class extends mlnet.ModelParametersBase { + + constructor(context) { + super(context); + const reader = context.reader; + const usingDefaultValues = context.modelVersionWritten >= this.VerDefaultValueSerialized; + const categoricalSplits = context.modelVersionWritten >= this.VerCategoricalSplitSerialized; + this.TrainedEnsemble = new mlnet.InternalTreeEnsemble(context, usingDefaultValues, categoricalSplits); + this.InnerOptions = context.string(null); + if (context.modelVersionWritten >= this.verNumFeaturesSerialized) { + this.NumFeatures = reader.int32(); + } + + // input type -> float32[NumFeatures] + // output type -> float32 + } +}; + +mlnet.InternalTreeEnsemble = class { + + constructor(context, usingDefaultValues, categoricalSplits) { + const reader = context.reader; + this.Trees = []; + const numTrees = reader.int32(); + for (let i = 0; i < numTrees; i++) { + switch (reader.byte()) { + case mlnet.InternalTreeEnsemble.TreeType.Regression: + this.Trees.push(new mlnet.InternalRegressionTree(context, usingDefaultValues, categoricalSplits)); + break; + case mlnet.InternalTreeEnsemble.TreeType.FastForest: + this.Trees.push(new mlnet.InternalQuantileRegressionTree(context, usingDefaultValues, categoricalSplits)); + break; + case mlnet.InternalTreeEnsemble.TreeType.Affine: + // Affine regression trees do not actually work, nor is it clear how they ever + // could have worked within TLC, so the chance of this happening seems remote. + throw new mlnet.Error('Affine regression trees unsupported'); + default: + throw new mlnet.Error('Unknown ensemble tree type.'); + } + } + this.Bias = reader.float64(); + this.FirstInputInitializationContent = context.string(null); + } +}; + +mlnet.InternalRegressionTree = class { + + constructor(context, usingDefaultValue, categoricalSplits) { + const reader = context.reader; + this.NumLeaves = reader.int32(); + this.MaxOuptut = reader.float64(); + this.Weight = reader.float64(); + this.LteChild = reader.int32s(reader.int32()); + this.GtChild = reader.int32s(reader.int32()); + this.SplitFeatures = reader.int32s(reader.int32()); + if (categoricalSplits) { + const categoricalNodeIndices = reader.int32s(reader.int32()); + if (categoricalNodeIndices.length > 0) { + this.CategoricalSplitFeatures = []; + this.CategoricalSplitFeatureRanges = []; + for (const index of categoricalNodeIndices) { + this.CategoricalSplitFeatures[index] = reader.int32s(reader.int32()); + this.CategoricalSplitFeatureRanges[index] = reader.int32s(2); + } + } + } + this.Thresholds = reader.uint32s(reader.int32()); + this.RawThresholds = reader.float32s(reader.int32()); + this.DefaultValueForMissing = usingDefaultValue ? reader.float32s(reader.int32()) : null; + this.LeafValues = reader.float64s(reader.int32()); + + this.SplitGain = reader.float64s(reader.int32()); + this.GainPValue = reader.float64s(reader.int32()); + this.PreviousLeafValue = reader.float64s(reader.int32()); + } +}; + +mlnet.InternalTreeEnsemble.TreeType = { + Regression: 0, + Affine: 1, + FastForest: 2 +}; + +mlnet.TreeEnsembleModelParametersBasedOnRegressionTree = class extends mlnet.TreeEnsembleModelParameters { + + constructor(context) { + super(context); + } +}; + +mlnet.FastTreeTweedieModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { return 0x00010001; } + get VerDefaultValueSerialized() { return 0x00010002; } + get VerCategoricalSplitSerialized() { return 0x00010003; } +}; + +mlnet.FastTreeRankingModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { return 0x00010002; } + get VerDefaultValueSerialized() { return 0x00010004; } + get VerCategoricalSplitSerialized() { return 0x00010005; } +}; + +mlnet.FastTreeBinaryModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { return 0x00010002; } + get VerDefaultValueSerialized() { return 0x00010004; } + get VerCategoricalSplitSerialized() { return 0x00010005; } +}; + +mlnet.FastTreeRegressionModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { return 0x00010002; } + get VerDefaultValueSerialized() { return 0x00010004; } + get VerCategoricalSplitSerialized() { return 0x00010005; } +}; + +mlnet.LightGbmRegressionModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { return 0x00010002; } + get VerDefaultValueSerialized() { return 0x00010004; } + get VerCategoricalSplitSerialized() { return 0x00010005; } +}; + +mlnet.LightGbmBinaryModelParameters = class extends mlnet.TreeEnsembleModelParametersBasedOnRegressionTree { + + constructor(context) { + super(context); + } + + get VerNumFeaturesSerialized() { return 0x00010002; } + get VerDefaultValueSerialized() { return 0x00010004; } + get VerCategoricalSplitSerialized() { return 0x00010005; } +}; + +mlnet.FeatureWeightsCalibratedModelParameters = class extends mlnet.ValueMapperCalibratedModelParametersBase { + + constructor(context) { + super(context); + // debugger; + } +}; + +mlnet.FastTreePredictionWrapper = class { + + constructor(/* context */) { + } +}; + +mlnet.FastForestClassificationPredictor = class extends mlnet.FastTreePredictionWrapper { + constructor(context) { + super(context); + } +}; + +mlnet.PlattCalibrator = class { + + constructor(context) { + const reader = context.reader; + this.ParamA = reader.float64(); + this.ParamB = reader.float64(); + } +}; + +mlnet.Codec = class { + + constructor(reader) { + this.name = reader.string(); + const size = reader.leb128(); + const data = reader.bytes(size); + reader = new mlnet.Reader(data); + + switch (this.name) { + case 'Boolean': break; + case 'Single': break; + case 'Double': break; + case 'Byte': break; + case 'Int32': break; + case 'UInt32': break; + case 'Int64': break; + case 'TextSpan': break; + case 'VBuffer': + this.itemType = new mlnet.Codec(reader); + this.dims = reader.int32s(reader.int32()); + break; + case 'Key': + case 'Key2': + this.itemType = new mlnet.Codec(reader); + this.count = reader.uint64(); + break; + default: + throw new mlnet.Error("Unknown codec '" + this.name + "'."); + } + } + + read(reader, count) { + let values = []; + switch (this.name) { + case 'Single': + for (let i = 0; i < count; i++) { + values.push(reader.float32()); + } + break; + case 'Int32': + for (let i = 0; i < count; i++) { + values.push(reader.int32()); + } + break; + case 'Int64': + for (let i = 0; i < count; i++) { + values.push(reader.int64()); + } + break; + default: + throw new mlnet.Error("Unknown codec read operation '" + this.name + "'."); + } + return values; + } +}; + +mlnet.SequentialTransformerBase = class { + + constructor(context) { + const reader = context.reader; + this.WindowSize = reader.int32(); + this.InitialWindowSize = reader.int32(); + this.inputs = []; + this.inputs.push({ name: context.string() }); + this.outputs = []; + this.outputs.push({ name: context.string() }); + this.ConfidenceLowerBoundColumn = reader.string(); + this.ConfidenceUpperBoundColumn = reader.string(); + this.Type = new mlnet.Codec(reader); + } +}; + +mlnet.AnomalyDetectionStateBase = class { + + constructor(context) { + const reader = context.reader; + this.LogMartingaleUpdateBuffer = mlnet.AnomalyDetectionStateBase._deserializeFixedSizeQueueDouble(reader); + this.RawScoreBuffer = mlnet.AnomalyDetectionStateBase._deserializeFixedSizeQueueDouble(reader); + this.LogMartingaleValue = reader.float64(); + this.SumSquaredDist = reader.float64(); + this.MartingaleAlertCounter = reader.int32(); + } + + static _deserializeFixedSizeQueueDouble(reader) { + /* let capacity = */ reader.int32(); + const count = reader.int32(); + let queue = []; + for (let i = 0; i < count; i++) { + queue.push(reader.float64()); + } + return queue; + } +}; + +mlnet.SequentialAnomalyDetectionTransformBase = class extends mlnet.SequentialTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.Martingale = reader.byte(); + this.ThresholdScore = reader.byte(); + this.Side = reader.byte(); + this.PowerMartingaleEpsilon = reader.float64(); + this.AlertThreshold = reader.float64(); + this.State = new mlnet.AnomalyDetectionStateBase(context); + } +}; + +mlnet.TimeSeriesUtils = class { + + static deserializeFixedSizeQueueSingle(reader) { + /* const capacity = */ reader.int32(); + const count = reader.int32(); + let queue = []; + for (let i = 0; i < count; i++) { + queue.push(reader.float32()); + } + return queue; + } +}; + +mlnet.IidAnomalyDetectionBase = class extends mlnet.SequentialAnomalyDetectionTransformBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.WindowedBuffer = mlnet.TimeSeriesUtils.deserializeFixedSizeQueueSingle(reader); + this.InitialWindowedBuffer = mlnet.TimeSeriesUtils.deserializeFixedSizeQueueSingle(reader); + } +}; + +mlnet.IidAnomalyDetectionBaseWrapper = class { + + constructor(context) { + const internalTransform = new mlnet.IidAnomalyDetectionBase(context); + for (const key of Object.keys(internalTransform)) { + this[key] = internalTransform[key]; + } + } +}; + +mlnet.IidChangePointDetector = class extends mlnet.IidAnomalyDetectionBaseWrapper { + + constructor(context) { + super(context); + } +}; + +mlnet.IidSpikeDetector = class extends mlnet.IidAnomalyDetectionBaseWrapper { + + constructor(context) { + super(context); + } +}; + +mlnet.SequenceModelerBase = class { + + constructor(/* context */) { + } +}; + +mlnet.RankSelectionMethod = { + Fixed: 0, + Exact: 1, + Fact: 2 +}; + +mlnet.AdaptiveSingularSpectrumSequenceModelerInternal = class extends mlnet.SequenceModelerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this._seriesLength = reader.int32(); + this._windowSize = reader.int32(); + this._trainSize = reader.int32(); + this._rank = reader.int32(); + this._discountFactor = reader.float32(); + this._rankSelectionMethod = reader.byte(); // RankSelectionMethod + const isWeightSet = reader.byte(); + this._alpha = reader.float32s(reader.int32()); + if (context.modelVersionReadable >= 0x00010002) { + this._state = reader.float32s(reader.int32()); + } + this.ShouldComputeForecastIntervals = reader.byte(); + this._observationNoiseVariance = reader.float32(); + this._autoregressionNoiseVariance = reader.float32(); + this._observationNoiseMean = reader.float32(); + this._autoregressionNoiseMean = reader.float32(); + if (context.modelVersionReadable >= 0x00010002) { + this._nextPrediction = reader.float32(); + } + this._maxRank = reader.int32(); + this._shouldStablize = reader.byte(); + this._shouldMaintainInfo = reader.byte(); + this._maxTrendRatio = reader.float64(); + if (isWeightSet) { + this._wTrans = reader.float32s(reader.int32()); + this._y = reader.float32s(reader.int32()); + } + this._buffer = mlnet.TimeSeriesUtils.deserializeFixedSizeQueueSingle(reader); + } +}; + +mlnet.SequentialForecastingTransformBase = class extends mlnet.SequentialTransformerBase { + + constructor(context) { + super(context); + const reader = context.reader; + this._outputLength = reader.int32(); + } +}; + +mlnet.SsaForecastingBaseWrapper = class extends mlnet.SequentialForecastingTransformBase { + + constructor(context) { + super(context); + const reader = context.reader; + this.IsAdaptive = reader.boolean(); + this.Horizon = reader.int32(); + this.ConfidenceLevel = reader.float32(); + this.WindowedBuffer = mlnet.TimeSeriesUtils.deserializeFixedSizeQueueSingle(reader); + this.InitialWindowedBuffer = mlnet.TimeSeriesUtils.deserializeFixedSizeQueueSingle(reader); + this.Model = context.open('SSA'); + } +}; + +mlnet.SsaForecastingTransformer = class extends mlnet.SsaForecastingBaseWrapper { + + constructor(context) { + super(context); + } +}; + +mlnet.ColumnSelectingTransformer = class { + + constructor(context) { + const reader = context.reader; + if (context.check('DRPCOLST', 0x00010002, 0x00010002)) { + throw new mlnet.Error("'LoadDropColumnsTransform' not supported."); + } + else if (context.check('CHSCOLSF', 0x00010001, 0x00010001)) { + reader.int32(); // cbFloat + this.KeepHidden = this._getHiddenOption(reader.byte()); + const count = reader.int32(); + this.inputs = []; + for (let colIdx = 0; colIdx < count; colIdx++) { + const dst = context.string(); + this.inputs.push(dst); + context.string(); // src + this._getHiddenOption(reader.byte()); // colKeepHidden + } + } + else { + const keepColumns = reader.boolean(); + this.KeepHidden = reader.boolean(); + this.IgnoreMissing = reader.boolean(); + const length = reader.int32(); + this.inputs = []; + for (let i = 0; i < length; i++) { + this.inputs.push({ name: context.string() }); + } + if (keepColumns) { + this.ColumnsToKeep = this.inputs; + } + else { + this.ColumnsToDrop = this.inputs; + } + } + } + + _getHiddenOption(value) { + switch (value) { + case 1: return true; + case 2: return false; + default: throw new mlnet.Error('Unsupported hide option specified'); + } + } +}; + +mlnet.XGBoostMulticlass = class {}; + +mlnet.NltTokenizeTransform = class {}; + +mlnet.DropColumnsTransform = class {}; + +mlnet.StopWordsTransform = class {}; + +mlnet.CSharpTransform = class {}; + +mlnet.GenericScoreTransform = class {}; + +mlnet.NormalizeTransform = class {}; + +mlnet.CdfColumnFunction = class { + + constructor(/* context, typeSrc */) { + // TODO + } +}; + +mlnet.MultiClassNetPredictor = class {}; + +mlnet.ProtonNNMCPred = class {}; + +mlnet.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'ML.NET Error'; + } +}; + +if (module && module.exports) { + module.exports.ModelFactory = mlnet.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/mnn-metadata.json b/frontend/packages/core/public/netron/mnn-metadata.json new file mode 100644 index 00000000..811c7fc9 --- /dev/null +++ b/frontend/packages/core/public/netron/mnn-metadata.json @@ -0,0 +1,862 @@ +[ + { + "name": "AbsVal", + "schema": { + "operator": 0 + } + }, + { + "name": "QuantizedAdd", + "schema": { + "operator": 1, + "attributes": [ + { "name": "activationType", "type": "FusedActivation" } + ] + } + }, + { + "name": "ArgMax", + "schema": { + "operator": 2 + } + }, + { + "name": "AsString", + "schema": { + "operator": 3, + "category": "Transform" + } + }, + { + "name": "BatchNorm", + "schema": { + "operator": 4, + "category": "Normalization" + } + }, + { + "name": "BatchToSpaceND", + "schema": { + "operator": 5, + "category": "Shape" + } + }, + { + "name": "Bias", + "schema": { + "operator": 6, + "category": "Layer" + } + }, + { + "name": "BinaryOp", + "schema": { + "operator": 7, + "attributes": [ + { "name": "T", "type": "DataType" } + ] + } + }, + { + "name": "Bnll", + "schema": { + "operator": 8 + } + }, + { + "name": "Cast", + "schema": { + "operator": 9 + } + }, + { + "name": "Concat", + "schema": { + "operator": 10, + "category": "Tensor" + } + }, + { + "name": "Const", + "schema": { + "operator": 11, + "category": "Constant" + } + }, + { + "name": "Convolution", + "schema": { + "operator": 12, + "category": "Layer", + "attributes": [ + { "name": "padMode", "type": "PadMode" } + ] + } + }, + { + "name": "ConvolutionDepthwise", + "schema": { + "operator": 13, + "category": "Layer", + "attributes": [ + { "name": "padMode", "type": "PadMode" } + ] + } + }, + { + "name": "Crop", + "schema": { + "operator": 14, + "category": "Data" + } + }, + { + "name": "CropAndResize", + "schema": { + "operator": 15, + "category": "Shape" + } + }, + { + "name": "Cubic", + "schema": { + "operator": 16, + "category": "Layer" + } + }, + { + "name": "Deconvolution", + "schema": { + "operator": 17, + "category": "Layer" + } + }, + { + "name": "DeconvolutionDepthwise", + "schema": { + "operator": 18, + "category": "Layer" + } + }, + { + "name": "Dequantize", + "schema": { + "operator": 19 + } + }, + { + "name": "DetectionOutput", + "schema": { + "operator": 20 + } + }, + { + "name": "Dropout", + "schema": { + "operator": 21, + "category": "Dropout" + } + }, + { + "name": "Eltwise", + "schema": { + "operator": 22 + } + }, + { + "name": "ELU", + "schema": { + "operator": 23 + } + }, + { + "name": "Embed", + "schema": { + "operator": 24, + "category": "Transform" + } + }, + { + "name": "Exp", + "schema": { + "operator": 25 + } + }, + { + "name": "ExpandDims", + "schema": { + "operator": 26 + } + }, + { + "name": "Fill", + "schema": { + "operator": 27, + "category": "Data" + } + }, + { + "name": "Flatten", + "schema": { + "operator": 28, + "category": "Shape" + } + }, + { + "name": "FloorMod", + "schema": { + "operator": 29, + "category": "Layer" + } + }, + { + "name": "Gather", + "schema": { + "operator": 30, + "category": "Data" + } + }, + { + "name": "GatherV2", + "schema": { + "operator": 31, + "category": "Data" + } + }, + { + "name": "Im2Seq", + "schema": { + "operator": 32, + "category": "Transform" + } + }, + { + "name": "InnerProduct", + "schema": { + "operator": 33, + "category": "Layer" + } + }, + { + "name": "Input", + "schema": { + "operator": 34 + } + }, + { + "name": "Interp", + "schema": { + "operator": 35 + } + }, + { + "name": "Log", + "schema": { + "operator": 36, + "category": "Layer" + } + }, + { + "name": "LRN", + "schema": { + "operator": 37, + "category": "Normalization" + } + }, + { + "name": "LSTM", + "schema": { + "operator": 38, + "category": "Layer" + } + }, + { + "name": "MatMul", + "schema": { + "operator": 39 + } + }, + { + "name": "MVN", + "schema": { + "operator": 40 + } + }, + { + "name": "NonMaxSuppression", + "schema": { + "operator": 41, + "category": "Layer" + } + }, + { + "name": "NonMaxSuppressionV2", + "schema": { + "operator": 42, + "category": "Layer" + } + }, + { + "name": "Normalize", + "schema": { + "operator": 43, + "category": "Normalization" + } + }, + { + "name": "Pack", + "schema": { + "operator": 44 + } + }, + { + "name": "Padding", + "schema": { + "operator": 45, + "category": "Tensor" + } + }, + { + "name": "Permute", + "schema": { + "operator": 46, + "category": "Shape" + } + }, + { + "name": "Pooling", + "schema": { + "operator": 47, + "category": "Pool", + "attributes": [ + { "name": "isGlobal", "type": "boolean", "default": false }, + { "name": "type", "type": "PoolType" }, + { "name": "padType", "type": "PoolPadType" }, + { "name": "dataType", "type": "DataType" }, + { "name": "ceilModel", "type": "boolean", "default": true } + ] + } + }, + { + "name": "Power", + "schema": { + "operator": 48 + } + }, + { + "name": "PReLU", + "schema": { + "operator": 49, + "category": "Activation" + } + }, + { + "name": "PriorBox", + "schema": { + "operator": 50 + } + }, + { + "name": "Proposal", + "schema": { + "operator": 51 + } + }, + { + "name": "QuantizedAvgPool", + "schema": { + "operator": 52, + "category": "Pool" + } + }, + { + "name": "QuantizedBiasAdd", + "schema": { + "operator": 53 + } + }, + { + "name": "QuantizedConcat", + "schema": { + "operator": 54, + "category": "Tensor" + } + }, + { + "name": "QuantizedDepthwiseConv2D", + "schema": { + "operator": 55, + "category": "Layer" + } + }, + { + "name": "QuantizedLogistic", + "schema": { + "operator": 56, + "category": "Activation" + } + }, + { + "name": "QuantizedMatMul", + "schema": { + "operator": 57 + } + }, + { + "name": "QuantizedMaxPool", + "schema": { + "operator": 58, + "category": "Pool" + } + }, + { + "name": "QuantizedRelu", + "schema": { + "operator": 59, + "category": "Activation" + } + }, + { + "name": "QuantizedRelu6", + "schema": { + "operator": 60, + "category": "Activation" + } + }, + { + "name": "QuantizedReshape", + "schema": { + "operator": 61, + "category": "Shape" + } + }, + { + "name": "QuantizedSoftmax", + "schema": { + "operator": 62, + "category": "Activation" + } + }, + { + "name": "QuantizeMaxMin", + "schema": { + "operator": 63 + } + }, + { + "name": "QuantizeV2", + "schema": { + "operator": 64 + } + }, + { + "name": "Range", + "schema": { + "operator": 65 + } + }, + { + "name": "Rank", + "schema": { + "operator": 66 + } + }, + { + "name": "ReduceJoin", + "schema": { + "operator": 67 + } + }, + { + "name": "Reduction", + "schema": { + "operator": 68 + } + }, + { + "name": "ReLU", + "schema": { + "operator": 69, + "category": "Activation" + } + }, + { + "name": "ReLU6", + "schema": { + "operator": 70, + "category": "Activation" + } + }, + { + "name": "RequantizationRange", + "schema": { + "operator": 71 + } + }, + { + "name": "Requantize", + "schema": { + "operator": 72 + } + }, + { + "name": "Reshape", + "schema": { + "operator": 73, + "category": "Shape" + } + }, + { + "name": "Resize", + "schema": { + "operator": 74, + "category": "Shape" + } + }, + { + "name": "RNN", + "schema": { + "operator": 75, + "category": "Layer" + } + }, + { + "name": "ROIPooling", + "schema": { + "operator": 76, + "category": "Pool" + } + }, + { + "name": "Scale", + "schema": { + "operator": 77, + "category": "Layer" + } + }, + { + "name": "Selu", + "schema": { + "operator": 78, + "category": "Activation" + } + }, + { + "name": "Seq2Out", + "schema": { + "operator": 79, + "category": "Transform" + } + }, + { + "name": "Shape", + "schema": { + "operator": 80, + "category": "Shape" + } + }, + { + "name": "Sigmoid", + "schema": { + "operator": 81, + "category": "Layer" + } + }, + { + "name": "Size", + "schema": { + "operator": 82, + "category": "Activation" + } + }, + { + "name": "Slice", + "schema": { + "operator": 83, + "category": "Tensor" + } + }, + { + "name": "SliceTf", + "schema": { + "operator": 84 + } + }, + { + "name": "Softmax", + "schema": { + "operator": 85, + "category": "Activation" + } + }, + { + "name": "SpaceToBatchND", + "schema": { + "operator": 86, + "category": "Shape" + } + }, + { + "name": "SpatialProduct", + "schema": { + "operator": 87, + "category": "Layer" + } + }, + { + "name": "Split", + "schema": { + "operator": 88 + } + }, + { + "name": "SPP", + "schema": { + "operator": 89, + "category": "Layer" + } + }, + { + "name": "Squeeze", + "schema": { + "operator": 90, + "category": "Transform" + } + }, + { + "name": "StridedSlice", + "schema": { + "operator": 91, + "category": "Tensor", + "attributes": [ + { "name": "Index", "type": "DataType" }, + { "name": "T", "type": "DataType" } + ] + } + }, + { + "name": "StringJoin", + "schema": { + "operator": 92, + "category": "Transform" + } + }, + { + "name": "StringSplit", + "schema": { + "operator": 93, + "category": "Transform" + } + }, + { + "name": "StringToNumber", + "schema": { + "operator": 94, + "category": "Transform" + } + }, + { + "name": "TanH", + "schema": { + "operator": 95, + "category": "Activation" + } + }, + { + "name": "TfQuantizedConv2D", + "schema": { + "operator": 96, + "category": "Layer" + } + }, + { + "name": "Threshold", + "schema": { + "operator": 97, + "category": "Activation" + } + }, + { + "name": "Tile", + "schema": { + "operator": 98 + } + }, + { + "name": "TopKV2", + "schema": { + "operator": 99, + "category": "Layer" + } + }, + { + "name": "Transpose", + "schema": { + "operator": 100, + "category": "Transform" + } + }, + { + "name": "UnaryOp", + "schema": { + "operator": 101 + } + }, + { + "name": "Unpack", + "schema": { + "operator": 102 + } + }, + { + "name": "Where", + "schema": { + "operator": 103 + } + }, + { + "name": "Moments", + "schema": { + "operator": 104, + "category": "Layer" + } + }, + { + "name": "RNNSequenceGRU", + "schema": { + "operator": 105, + "category": "Layer" + } + }, + { + "name": "BatchMatMul", + "schema": { + "operator": 106 + } + }, + { + "name": "Unsqueeze", + "schema": { + "operator": 107 + } + }, + { + "name": "MaxLayerCount", + "schema": { + "operator": 128 + } + }, + { + "name": "ConvertTensor", + "schema": { + "operator": 129, + "category": "Tensor" + } + }, + { + "name": "PLUGIN", + "schema": { + "operator": 256, + "category": "Layer" + } + }, + { + "name": "Select", + "schema": { + "operator": 257, + "category": "Layer" + } + }, + { + "name": "ZerosLike", + "schema": { + "operator": 258, + "category": "Layer" + } + }, + { + "name": "Broastcast", + "schema": { + "operator": 259, + "category": "Layer" + } + }, + { + "name": "SetDiff1D", + "schema": { + "operator": 260, + "category": "Layer" + } + }, + { + "name": "ReluGrad", + "schema": { + "operator": 261, + "category": "Activation" + } + }, + { + "name": "Relu6Grad", + "schema": { + "operator": 262, + "category": "Activation" + } + }, + { + "name": "PoolGrad", + "schema": { + "operator": 263, + "category": "Pool" + } + }, + { + "name": "SoftmaxGrad", + "schema": { + "operator": 264, + "category": "Activation" + } + }, + { + "name": "Conv2DBackPropFilter", + "schema": { + "operator": 265, + "category": "Layer" + } + }, + { + "name": "ConvInt8", + "schema": { + "operator": 513, + "category": "Layer" + } + }, + { + "name": "Int8ToFloat", + "schema": { + "operator": 514, + "category": "Transform" + } + }, + { + "name": "DepthwiseConvInt8", + "schema": { + "operator": 515, + "category": "Layer" + } + }, + { + "name": "PoolInt8", + "schema": { + "operator": 516, + "category": "Layer" + } + }, + { + "name": "FloatToInt8", + "schema": { + "operator": 517, + "category": "Transform" + } + } +] + diff --git a/frontend/packages/core/public/netron/mnn-schema.js b/frontend/packages/core/public/netron/mnn-schema.js new file mode 100644 index 00000000..10f4e7ef --- /dev/null +++ b/frontend/packages/core/public/netron/mnn-schema.js @@ -0,0 +1,18364 @@ +// automatically generated by the FlatBuffers compiler, do not modify + +/** + * @const + * @namespace + */ +var MNN = MNN || {}; + +/** + * @enum {number} + */ +MNN.NetSource = { + CAFFE: 0, + TENSORFLOW: 1, + TFLITE: 2, + ONNX: 3 +}; + +/** + * @enum {string} + */ +MNN.NetSourceName = { + '0': 'CAFFE', + '1': 'TENSORFLOW', + '2': 'TFLITE', + '3': 'ONNX' +}; + +/** + * @enum {number} + */ +MNN.DataType = { + DT_INVALID: 0, + DT_FLOAT: 1, + DT_DOUBLE: 2, + DT_INT32: 3, + DT_UINT8: 4, + DT_INT16: 5, + DT_INT8: 6, + DT_STRING: 7, + DT_COMPLEX64: 8, + DT_INT64: 9, + DT_BOOL: 10, + DT_QINT8: 11, + DT_QUINT8: 12, + DT_QINT32: 13, + DT_BFLOAT16: 14, + DT_QINT16: 15, + DT_QUINT16: 16, + DT_UINT16: 17, + DT_COMPLEX128: 18, + DT_HALF: 19, + DT_RESOURCE: 20, + DT_VARIANT: 21 +}; + +/** + * @enum {string} + */ +MNN.DataTypeName = { + '0': 'DT_INVALID', + '1': 'DT_FLOAT', + '2': 'DT_DOUBLE', + '3': 'DT_INT32', + '4': 'DT_UINT8', + '5': 'DT_INT16', + '6': 'DT_INT8', + '7': 'DT_STRING', + '8': 'DT_COMPLEX64', + '9': 'DT_INT64', + '10': 'DT_BOOL', + '11': 'DT_QINT8', + '12': 'DT_QUINT8', + '13': 'DT_QINT32', + '14': 'DT_BFLOAT16', + '15': 'DT_QINT16', + '16': 'DT_QUINT16', + '17': 'DT_UINT16', + '18': 'DT_COMPLEX128', + '19': 'DT_HALF', + '20': 'DT_RESOURCE', + '21': 'DT_VARIANT' +}; + +/** + * @enum {number} + */ +MNN.MNN_DATA_FORMAT = { + NCHW: 0, + NHWC: 1, + NC4HW4: 2, + NHWC4: 3, + UNKNOWN: 4 +}; + +/** + * @enum {string} + */ +MNN.MNN_DATA_FORMATName = { + '0': 'NCHW', + '1': 'NHWC', + '2': 'NC4HW4', + '3': 'NHWC4', + '4': 'UNKNOWN' +}; + +/** + * @enum {number} + */ +MNN.PadMode = { + CAFFE: 0, + VALID: 1, + SAME: 2 +}; + +/** + * @enum {string} + */ +MNN.PadModeName = { + '0': 'CAFFE', + '1': 'VALID', + '2': 'SAME' +}; + +/** + * @enum {number} + */ +MNN.QuantizeAlgo = { + DEFAULT: 0, + OVERFLOW_AWARE: 1 +}; + +/** + * @enum {string} + */ +MNN.QuantizeAlgoName = { + '0': 'DEFAULT', + '1': 'OVERFLOW_AWARE' +}; + +/** + * @enum {number} + */ +MNN.PoolType = { + MAXPOOL: 0, + AVEPOOL: 1 +}; + +/** + * @enum {string} + */ +MNN.PoolTypeName = { + '0': 'MAXPOOL', + '1': 'AVEPOOL' +}; + +/** + * @enum {number} + */ +MNN.PoolPadType = { + CAFFE: 0, + VALID: 1, + SAME: 2 +}; + +/** + * @enum {string} + */ +MNN.PoolPadTypeName = { + '0': 'CAFFE', + '1': 'VALID', + '2': 'SAME' +}; + +/** + * @enum {number} + */ +MNN.EltwiseType = { + PROD: 0, + SUM: 1, + MAXIMUM: 2, + SUB: 3 +}; + +/** + * @enum {string} + */ +MNN.EltwiseTypeName = { + '0': 'PROD', + '1': 'SUM', + '2': 'MAXIMUM', + '3': 'SUB' +}; + +/** + * @enum {number} + */ +MNN.BinaryOpOperation = { + ADD: 0, + SUB: 1, + MUL: 2, + DIV: 3, + MAX_TEMP: 4, + MIN_TEMP: 5, + POW: 6, + REALDIV: 7, + MINIMUM: 8, + MAXIMUM: 9, + GREATER: 10, + GREATER_EQUAL: 11, + LESS: 12, + FLOORDIV: 13, + SquaredDifference: 14, + EQUAL: 15, + LESS_EQUAL: 16, + FLOORMOD: 17, + MOD: 19, + ATAN2: 20, + LOGICALOR: 21, + NOTEQUAL: 22 +}; + +/** + * @enum {string} + */ +MNN.BinaryOpOperationName = { + '0': 'ADD', + '1': 'SUB', + '2': 'MUL', + '3': 'DIV', + '4': 'MAX_TEMP', + '5': 'MIN_TEMP', + '6': 'POW', + '7': 'REALDIV', + '8': 'MINIMUM', + '9': 'MAXIMUM', + '10': 'GREATER', + '11': 'GREATER_EQUAL', + '12': 'LESS', + '13': 'FLOORDIV', + '14': 'SquaredDifference', + '15': 'EQUAL', + '16': 'LESS_EQUAL', + '17': 'FLOORMOD', + '19': 'MOD', + '20': 'ATAN2', + '21': 'LOGICALOR', + '22': 'NOTEQUAL' +}; + +/** + * @enum {number} + */ +MNN.ReductionType = { + SUM: 0, + ASUM: 1, + SUMSQ: 2, + MEAN: 3, + MAXIMUM: 4, + MINIMUM: 5, + PROD: 6, + ANY: 7, + ALL: 8 +}; + +/** + * @enum {string} + */ +MNN.ReductionTypeName = { + '0': 'SUM', + '1': 'ASUM', + '2': 'SUMSQ', + '3': 'MEAN', + '4': 'MAXIMUM', + '5': 'MINIMUM', + '6': 'PROD', + '7': 'ANY', + '8': 'ALL' +}; + +/** + * @enum {number} + */ +MNN.UnaryOpOperation = { + ABS: 0, + NEG: 1, + FLOOR: 2, + CEIL: 3, + SQUARE: 4, + SQRT: 5, + RSQRT: 6, + EXP: 7, + LOG: 8, + SIN: 9, + COS: 10, + TAN: 11, + ASIN: 12, + ACOS: 13, + ATAN: 14, + RECIPROCAL: 15, + LOG1P: 16, + BNLL: 17, + ACOSH: 18, + SINH: 19, + ASINH: 20, + ATANH: 21, + SIGN: 22, + ROUND: 23, + COSH: 24, + ERF: 25, + ERFC: 26, + ERFINV: 27, + EXPM1: 28 +}; + +/** + * @enum {string} + */ +MNN.UnaryOpOperationName = { + '0': 'ABS', + '1': 'NEG', + '2': 'FLOOR', + '3': 'CEIL', + '4': 'SQUARE', + '5': 'SQRT', + '6': 'RSQRT', + '7': 'EXP', + '8': 'LOG', + '9': 'SIN', + '10': 'COS', + '11': 'TAN', + '12': 'ASIN', + '13': 'ACOS', + '14': 'ATAN', + '15': 'RECIPROCAL', + '16': 'LOG1P', + '17': 'BNLL', + '18': 'ACOSH', + '19': 'SINH', + '20': 'ASINH', + '21': 'ATANH', + '22': 'SIGN', + '23': 'ROUND', + '24': 'COSH', + '25': 'ERF', + '26': 'ERFC', + '27': 'ERFINV', + '28': 'EXPM1' +}; + +/** + * @enum {number} + */ +MNN.CropAndResizeMethod = { + BILINEAR: 0, + NEAREST: 1 +}; + +/** + * @enum {string} + */ +MNN.CropAndResizeMethodName = { + '0': 'BILINEAR', + '1': 'NEAREST' +}; + +/** + * @enum {number} + */ +MNN.PadValueMode = { + CONSTANT: 0, + REFLECT: 1, + SYMMETRIC: 2 +}; + +/** + * @enum {string} + */ +MNN.PadValueModeName = { + '0': 'CONSTANT', + '1': 'REFLECT', + '2': 'SYMMETRIC' +}; + +/** + * @enum {number} + */ +MNN.FusedActivation = { + kTfLiteActNone: 0, + kTfLiteActRelu: 1, + kTfLiteActRelu1: 2, + kTfLiteActRelu6: 3, + kTfLiteActTanh: 4, + kTfLiteActSignBit: 5, + kTfLiteActSigmoid: 6 +}; + +/** + * @enum {string} + */ +MNN.FusedActivationName = { + '0': 'kTfLiteActNone', + '1': 'kTfLiteActRelu', + '2': 'kTfLiteActRelu1', + '3': 'kTfLiteActRelu6', + '4': 'kTfLiteActTanh', + '5': 'kTfLiteActSignBit', + '6': 'kTfLiteActSigmoid' +}; + +/** + * @enum {number} + */ +MNN.ModeFormat = { + TENSORFLOW: 0, + TFLITE: 1 +}; + +/** + * @enum {string} + */ +MNN.ModeFormatName = { + '0': 'TENSORFLOW', + '1': 'TFLITE' +}; + +/** + * @enum {number} + */ +MNN.QuantizeMode = { + MIN_COMBINED: 0, + MIN_FIRST: 1, + SCALED: 2 +}; + +/** + * @enum {string} + */ +MNN.QuantizeModeName = { + '0': 'MIN_COMBINED', + '1': 'MIN_FIRST', + '2': 'SCALED' +}; + +/** + * @enum {number} + */ +MNN.QuantizeRoundMode = { + HALF_AWAY_FROM_ZERO: 0, + HALF_TO_EVEN: 1 +}; + +/** + * @enum {string} + */ +MNN.QuantizeRoundModeName = { + '0': 'HALF_AWAY_FROM_ZERO', + '1': 'HALF_TO_EVEN' +}; + +/** + * @enum {number} + */ +MNN.STORAGE_TYPE = { + BUFFER: 0, + UNIFORM: 1, + IMAGE: 2 +}; + +/** + * @enum {string} + */ +MNN.STORAGE_TYPEName = { + '0': 'BUFFER', + '1': 'UNIFORM', + '2': 'IMAGE' +}; + +/** + * @enum {number} + */ +MNN.ACCESS_TYPE = { + READ_ONLY: 0, + WRITE_ONLY: 1, + READ_WRITE: 2 +}; + +/** + * @enum {string} + */ +MNN.ACCESS_TYPEName = { + '0': 'READ_ONLY', + '1': 'WRITE_ONLY', + '2': 'READ_WRITE' +}; + +/** + * @enum {number} + */ +MNN.OpType = { + AbsVal: 0, + QuantizedAdd: 1, + ArgMax: 2, + AsString: 3, + InstanceNorm: 4, + BatchToSpaceND: 5, + Bias: 6, + BinaryOp: 7, + Bnll: 8, + Cast: 9, + Concat: 10, + Const: 11, + Convolution: 12, + ConvolutionDepthwise: 13, + Crop: 14, + CropAndResize: 15, + Cubic: 16, + Deconvolution: 17, + DeconvolutionDepthwise: 18, + Dequantize: 19, + DetectionOutput: 20, + Dropout: 21, + Eltwise: 22, + ELU: 23, + Embed: 24, + Exp: 25, + ExpandDims: 26, + Fill: 27, + Flatten: 28, + FloorMod: 29, + Gather: 30, + GatherV2: 31, + Im2Seq: 32, + InnerProduct: 33, + Input: 34, + Interp: 35, + Log: 36, + LRN: 37, + LSTM: 38, + MatMul: 39, + MVN: 40, + NonMaxSuppression: 41, + NonMaxSuppressionV2: 42, + Normalize: 43, + Pack: 44, + Padding: 45, + Permute: 46, + Pooling: 47, + Power: 48, + PReLU: 49, + PriorBox: 50, + Proposal: 51, + QuantizedAvgPool: 52, + QuantizedBiasAdd: 53, + QuantizedConcat: 54, + QuantizedDepthwiseConv2D: 55, + QuantizedLogistic: 56, + QuantizedMatMul: 57, + QuantizedMaxPool: 58, + QuantizedRelu: 59, + QuantizedRelu6: 60, + QuantizedReshape: 61, + QuantizedSoftmax: 62, + QuantizeMaxMin: 63, + QuantizeV2: 64, + Range: 65, + Rank: 66, + ReduceJoin: 67, + Reduction: 68, + ReLU: 69, + ReLU6: 70, + RequantizationRange: 71, + Requantize: 72, + Reshape: 73, + Resize: 74, + RNN: 75, + ROIPooling: 76, + Scale: 77, + Selu: 78, + Seq2Out: 79, + Shape: 80, + Sigmoid: 81, + Size: 82, + Slice: 83, + SliceTf: 84, + Softmax: 85, + SpaceToBatchND: 86, + SpatialProduct: 87, + Split: 88, + SPP: 89, + Squeeze: 90, + StridedSlice: 91, + StringJoin: 92, + StringSplit: 93, + StringToNumber: 94, + TanH: 95, + TfQuantizedConv2D: 96, + Threshold: 97, + Tile: 98, + TopKV2: 99, + Transpose: 100, + UnaryOp: 101, + Unpack: 102, + Where: 103, + Moments: 104, + RNNSequenceGRU: 105, + BatchMatMul: 106, + Unsqueeze: 107, + CosineSimilarity: 108, + DepthToSpace: 109, + SpaceToDepth: 110, + ReverseSequence: 111, + Pooling3D: 112, + Convolution3D: 113, + MatrixBandPart: 114, + GatherND: 115, + DetectionPostProcess: 116, + UnravelIndex: 117, + ScatterNd: 118, + OneHot: 119, + BroadcastTo: 120, + Dilation2D: 121, + MaxLayerCount: 128, + ConvertTensor: 129, + ArgMin: 130, + LinSpace: 131, + Plugin: 256, + Select: 257, + ZerosLike: 258, + Broastcast: 259, + SetDiff1D: 260, + ReluGrad: 261, + Relu6Grad: 262, + PoolGrad: 263, + SoftmaxGrad: 264, + Conv2DBackPropFilter: 265, + TrainableParam: 266, + BatchNorm: 267, + ZeroGrad: 268, + Extra: 512, + ConvInt8: 513, + Int8ToFloat: 514, + DepthwiseConvInt8: 515, + PoolInt8: 516, + FloatToInt8: 517, + EltwiseInt8: 518 +}; + +/** + * @enum {string} + */ +MNN.OpTypeName = { + '0': 'AbsVal', + '1': 'QuantizedAdd', + '2': 'ArgMax', + '3': 'AsString', + '4': 'InstanceNorm', + '5': 'BatchToSpaceND', + '6': 'Bias', + '7': 'BinaryOp', + '8': 'Bnll', + '9': 'Cast', + '10': 'Concat', + '11': 'Const', + '12': 'Convolution', + '13': 'ConvolutionDepthwise', + '14': 'Crop', + '15': 'CropAndResize', + '16': 'Cubic', + '17': 'Deconvolution', + '18': 'DeconvolutionDepthwise', + '19': 'Dequantize', + '20': 'DetectionOutput', + '21': 'Dropout', + '22': 'Eltwise', + '23': 'ELU', + '24': 'Embed', + '25': 'Exp', + '26': 'ExpandDims', + '27': 'Fill', + '28': 'Flatten', + '29': 'FloorMod', + '30': 'Gather', + '31': 'GatherV2', + '32': 'Im2Seq', + '33': 'InnerProduct', + '34': 'Input', + '35': 'Interp', + '36': 'Log', + '37': 'LRN', + '38': 'LSTM', + '39': 'MatMul', + '40': 'MVN', + '41': 'NonMaxSuppression', + '42': 'NonMaxSuppressionV2', + '43': 'Normalize', + '44': 'Pack', + '45': 'Padding', + '46': 'Permute', + '47': 'Pooling', + '48': 'Power', + '49': 'PReLU', + '50': 'PriorBox', + '51': 'Proposal', + '52': 'QuantizedAvgPool', + '53': 'QuantizedBiasAdd', + '54': 'QuantizedConcat', + '55': 'QuantizedDepthwiseConv2D', + '56': 'QuantizedLogistic', + '57': 'QuantizedMatMul', + '58': 'QuantizedMaxPool', + '59': 'QuantizedRelu', + '60': 'QuantizedRelu6', + '61': 'QuantizedReshape', + '62': 'QuantizedSoftmax', + '63': 'QuantizeMaxMin', + '64': 'QuantizeV2', + '65': 'Range', + '66': 'Rank', + '67': 'ReduceJoin', + '68': 'Reduction', + '69': 'ReLU', + '70': 'ReLU6', + '71': 'RequantizationRange', + '72': 'Requantize', + '73': 'Reshape', + '74': 'Resize', + '75': 'RNN', + '76': 'ROIPooling', + '77': 'Scale', + '78': 'Selu', + '79': 'Seq2Out', + '80': 'Shape', + '81': 'Sigmoid', + '82': 'Size', + '83': 'Slice', + '84': 'SliceTf', + '85': 'Softmax', + '86': 'SpaceToBatchND', + '87': 'SpatialProduct', + '88': 'Split', + '89': 'SPP', + '90': 'Squeeze', + '91': 'StridedSlice', + '92': 'StringJoin', + '93': 'StringSplit', + '94': 'StringToNumber', + '95': 'TanH', + '96': 'TfQuantizedConv2D', + '97': 'Threshold', + '98': 'Tile', + '99': 'TopKV2', + '100': 'Transpose', + '101': 'UnaryOp', + '102': 'Unpack', + '103': 'Where', + '104': 'Moments', + '105': 'RNNSequenceGRU', + '106': 'BatchMatMul', + '107': 'Unsqueeze', + '108': 'CosineSimilarity', + '109': 'DepthToSpace', + '110': 'SpaceToDepth', + '111': 'ReverseSequence', + '112': 'Pooling3D', + '113': 'Convolution3D', + '114': 'MatrixBandPart', + '115': 'GatherND', + '116': 'DetectionPostProcess', + '117': 'UnravelIndex', + '118': 'ScatterNd', + '119': 'OneHot', + '120': 'BroadcastTo', + '121': 'Dilation2D', + '128': 'MaxLayerCount', + '129': 'ConvertTensor', + '130': 'ArgMin', + '131': 'LinSpace', + '256': 'Plugin', + '257': 'Select', + '258': 'ZerosLike', + '259': 'Broastcast', + '260': 'SetDiff1D', + '261': 'ReluGrad', + '262': 'Relu6Grad', + '263': 'PoolGrad', + '264': 'SoftmaxGrad', + '265': 'Conv2DBackPropFilter', + '266': 'TrainableParam', + '267': 'BatchNorm', + '268': 'ZeroGrad', + '512': 'Extra', + '513': 'ConvInt8', + '514': 'Int8ToFloat', + '515': 'DepthwiseConvInt8', + '516': 'PoolInt8', + '517': 'FloatToInt8', + '518': 'EltwiseInt8' +}; + +/** + * @enum {number} + */ +MNN.OpParameter = { + NONE: 0, + QuantizedAdd: 1, + ArgMax: 2, + AsString: 3, + Axis: 4, + BatchNorm: 5, + BinaryOp: 6, + Blob: 7, + CastParam: 8, + Convolution2D: 9, + Crop: 10, + CropAndResize: 11, + Dequantize: 12, + DetectionOutput: 13, + Eltwise: 14, + ExpandDims: 15, + Fill: 16, + Flatten: 17, + Gather: 18, + GatherV2: 19, + InnerProduct: 20, + Input: 21, + Interp: 22, + LRN: 23, + LSTM: 24, + MatMul: 25, + NonMaxSuppressionV2: 26, + Normalize: 27, + PackParam: 28, + Permute: 29, + Plugin: 30, + Pool: 31, + PRelu: 32, + PriorBox: 33, + Proposal: 34, + QuantizedAvgPool: 35, + QuantizedBiasAdd: 36, + QuantizedConcat: 37, + QuantizedLogistic: 38, + QuantizedMatMul: 39, + QuantizedMaxPool: 40, + QuantizedRelu: 41, + QuantizedRelu6: 42, + QuantizedReshape: 43, + QuantizedSoftmax: 44, + QuantizeMaxMin: 45, + QuantizeV2: 46, + Range: 47, + Rank: 48, + ReduceJoin: 49, + ReductionParam: 50, + Relu: 51, + Relu6: 52, + RequantizationRange: 53, + Requantize: 54, + Reshape: 55, + Resize: 56, + RoiPooling: 57, + Scale: 58, + Selu: 59, + Size: 60, + Slice: 61, + SliceTf: 62, + SpaceBatch: 63, + SqueezeParam: 64, + StridedSliceParam: 65, + TensorConvertInfo: 66, + TfQuantizedConv2D: 67, + TopKV2: 68, + Transpose: 69, + UnaryOp: 70, + MomentsParam: 71, + RNNParam: 72, + BatchMatMulParam: 73, + QuantizedFloatParam: 74, + DepthSpaceParam: 75, + EltwiseInt8: 76, + ReverseSequenceParam: 77, + Extra: 78, + Pool3D: 79, + Convolution3D: 80, + ELU: 81, + DetectionPostProcessParam: 82, + OneHotParam: 83, + PadParam: 84 +}; + +/** + * @enum {string} + */ +MNN.OpParameterName = { + '0': 'NONE', + '1': 'QuantizedAdd', + '2': 'ArgMax', + '3': 'AsString', + '4': 'Axis', + '5': 'BatchNorm', + '6': 'BinaryOp', + '7': 'Blob', + '8': 'CastParam', + '9': 'Convolution2D', + '10': 'Crop', + '11': 'CropAndResize', + '12': 'Dequantize', + '13': 'DetectionOutput', + '14': 'Eltwise', + '15': 'ExpandDims', + '16': 'Fill', + '17': 'Flatten', + '18': 'Gather', + '19': 'GatherV2', + '20': 'InnerProduct', + '21': 'Input', + '22': 'Interp', + '23': 'LRN', + '24': 'LSTM', + '25': 'MatMul', + '26': 'NonMaxSuppressionV2', + '27': 'Normalize', + '28': 'PackParam', + '29': 'Permute', + '30': 'Plugin', + '31': 'Pool', + '32': 'PRelu', + '33': 'PriorBox', + '34': 'Proposal', + '35': 'QuantizedAvgPool', + '36': 'QuantizedBiasAdd', + '37': 'QuantizedConcat', + '38': 'QuantizedLogistic', + '39': 'QuantizedMatMul', + '40': 'QuantizedMaxPool', + '41': 'QuantizedRelu', + '42': 'QuantizedRelu6', + '43': 'QuantizedReshape', + '44': 'QuantizedSoftmax', + '45': 'QuantizeMaxMin', + '46': 'QuantizeV2', + '47': 'Range', + '48': 'Rank', + '49': 'ReduceJoin', + '50': 'ReductionParam', + '51': 'Relu', + '52': 'Relu6', + '53': 'RequantizationRange', + '54': 'Requantize', + '55': 'Reshape', + '56': 'Resize', + '57': 'RoiPooling', + '58': 'Scale', + '59': 'Selu', + '60': 'Size', + '61': 'Slice', + '62': 'SliceTf', + '63': 'SpaceBatch', + '64': 'SqueezeParam', + '65': 'StridedSliceParam', + '66': 'TensorConvertInfo', + '67': 'TfQuantizedConv2D', + '68': 'TopKV2', + '69': 'Transpose', + '70': 'UnaryOp', + '71': 'MomentsParam', + '72': 'RNNParam', + '73': 'BatchMatMulParam', + '74': 'QuantizedFloatParam', + '75': 'DepthSpaceParam', + '76': 'EltwiseInt8', + '77': 'ReverseSequenceParam', + '78': 'Extra', + '79': 'Pool3D', + '80': 'Convolution3D', + '81': 'ELU', + '82': 'DetectionPostProcessParam', + '83': 'OneHotParam', + '84': 'PadParam' +}; + +/** + * @enum {number} + */ +MNN.ForwardType = { + CPU: 0, + METAL: 1, + OPENCL: 2, + OPENGLES: 3, + VULKAN: 4 +}; + +/** + * @enum {string} + */ +MNN.ForwardTypeName = { + '0': 'CPU', + '1': 'METAL', + '2': 'OPENCL', + '3': 'OPENGLES', + '4': 'VULKAN' +}; + +/** + * @enum {number} + */ +MNN.Usage = { + INFERENCE: 0, + TRAIN: 1 +}; + +/** + * @enum {string} + */ +MNN.UsageName = { + '0': 'INFERENCE', + '1': 'TRAIN' +}; + +/** + * @constructor + */ +MNN.Blob = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Blob} + */ +MNN.Blob.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Blob=} obj + * @returns {MNN.Blob} + */ +MNN.Blob.getRootAsBlob = function(bb, obj) { + return (obj || new MNN.Blob).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Blob=} obj + * @returns {MNN.Blob} + */ +MNN.Blob.getSizePrefixedRootAsBlob = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Blob).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Blob.prototype.dims = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Blob.prototype.dimsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Blob.prototype.dimsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.MNN_DATA_FORMAT} + */ +MNN.Blob.prototype.dataFormat = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.MNN_DATA_FORMAT} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.MNN_DATA_FORMAT.NCHW; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.Blob.prototype.dataType = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_FLOAT; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Blob.prototype.uint8s = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readUint8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +MNN.Blob.prototype.uint8sLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint8Array} + */ +MNN.Blob.prototype.uint8sArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Uint8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Blob.prototype.int8s = function(index) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +MNN.Blob.prototype.int8sLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int8Array} + */ +MNN.Blob.prototype.int8sArray = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? new Int8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Blob.prototype.int32s = function(index) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Blob.prototype.int32sLength = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Blob.prototype.int32sArray = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {flatbuffers.Long} + */ +MNN.Blob.prototype.int64s = function(index) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt64(this.bb.__vector(this.bb_pos + offset) + index * 8) : this.bb.createLong(0, 0); +}; + +/** + * @returns {number} + */ +MNN.Blob.prototype.int64sLength = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Blob.prototype.float32s = function(index) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Blob.prototype.float32sLength = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.Blob.prototype.float32sArray = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array} + */ +MNN.Blob.prototype.strings = function(index, optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.__string(this.bb.__vector(this.bb_pos + offset) + index * 4, optionalEncoding) : null; +}; + +/** + * @returns {number} + */ +MNN.Blob.prototype.stringsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Blob.startBlob = function(builder) { + builder.startObject(9); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimsOffset + */ +MNN.Blob.addDims = function(builder, dimsOffset) { + builder.addFieldOffset(0, dimsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Blob.createDimsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Blob.startDimsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.MNN_DATA_FORMAT} dataFormat + */ +MNN.Blob.addDataFormat = function(builder, dataFormat) { + builder.addFieldInt8(1, dataFormat, MNN.MNN_DATA_FORMAT.NCHW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} dataType + */ +MNN.Blob.addDataType = function(builder, dataType) { + builder.addFieldInt32(2, dataType, MNN.DataType.DT_FLOAT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} uint8sOffset + */ +MNN.Blob.addUint8s = function(builder, uint8sOffset) { + builder.addFieldOffset(3, uint8sOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Blob.createUint8sVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Blob.startUint8sVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} int8sOffset + */ +MNN.Blob.addInt8s = function(builder, int8sOffset) { + builder.addFieldOffset(4, int8sOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Blob.createInt8sVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Blob.startInt8sVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} int32sOffset + */ +MNN.Blob.addInt32s = function(builder, int32sOffset) { + builder.addFieldOffset(5, int32sOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Blob.createInt32sVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Blob.startInt32sVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} int64sOffset + */ +MNN.Blob.addInt64s = function(builder, int64sOffset) { + builder.addFieldOffset(6, int64sOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Blob.createInt64sVector = function(builder, data) { + builder.startVector(8, data.length, 8); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt64(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Blob.startInt64sVector = function(builder, numElems) { + builder.startVector(8, numElems, 8); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} float32sOffset + */ +MNN.Blob.addFloat32s = function(builder, float32sOffset) { + builder.addFieldOffset(7, float32sOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Blob.createFloat32sVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Blob.startFloat32sVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} stringsOffset + */ +MNN.Blob.addStrings = function(builder, stringsOffset) { + builder.addFieldOffset(8, stringsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Blob.createStringsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Blob.startStringsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Blob.endBlob = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimsOffset + * @param {MNN.MNN_DATA_FORMAT} dataFormat + * @param {MNN.DataType} dataType + * @param {flatbuffers.Offset} uint8sOffset + * @param {flatbuffers.Offset} int8sOffset + * @param {flatbuffers.Offset} int32sOffset + * @param {flatbuffers.Offset} int64sOffset + * @param {flatbuffers.Offset} float32sOffset + * @param {flatbuffers.Offset} stringsOffset + * @returns {flatbuffers.Offset} + */ +MNN.Blob.createBlob = function(builder, dimsOffset, dataFormat, dataType, uint8sOffset, int8sOffset, int32sOffset, int64sOffset, float32sOffset, stringsOffset) { + MNN.Blob.startBlob(builder); + MNN.Blob.addDims(builder, dimsOffset); + MNN.Blob.addDataFormat(builder, dataFormat); + MNN.Blob.addDataType(builder, dataType); + MNN.Blob.addUint8s(builder, uint8sOffset); + MNN.Blob.addInt8s(builder, int8sOffset); + MNN.Blob.addInt32s(builder, int32sOffset); + MNN.Blob.addInt64s(builder, int64sOffset); + MNN.Blob.addFloat32s(builder, float32sOffset); + MNN.Blob.addStrings(builder, stringsOffset); + return MNN.Blob.endBlob(builder); +} + +/** + * @constructor + */ +MNN.ListValue = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.ListValue} + */ +MNN.ListValue.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ListValue=} obj + * @returns {MNN.ListValue} + */ +MNN.ListValue.getRootAsListValue = function(bb, obj) { + return (obj || new MNN.ListValue).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ListValue=} obj + * @returns {MNN.ListValue} + */ +MNN.ListValue.getSizePrefixedRootAsListValue = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.ListValue).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array} + */ +MNN.ListValue.prototype.s = function(index, optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__string(this.bb.__vector(this.bb_pos + offset) + index * 4, optionalEncoding) : null; +}; + +/** + * @returns {number} + */ +MNN.ListValue.prototype.sLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.ListValue.prototype.i = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.ListValue.prototype.iLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.ListValue.prototype.iArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.ListValue.prototype.f = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.ListValue.prototype.fLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.ListValue.prototype.fArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {boolean} + */ +MNN.ListValue.prototype.b = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb.__vector(this.bb_pos + offset) + index) : false; +}; + +/** + * @returns {number} + */ +MNN.ListValue.prototype.bLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int8Array} + */ +MNN.ListValue.prototype.bArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Int8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {MNN.DataType} + */ +MNN.ListValue.prototype.type = function(index) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4)) : /** @type {MNN.DataType} */ (0); +}; + +/** + * @returns {number} + */ +MNN.ListValue.prototype.typeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.ListValue.prototype.typeArray = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.ListValue.startListValue = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} sOffset + */ +MNN.ListValue.addS = function(builder, sOffset) { + builder.addFieldOffset(0, sOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.ListValue.createSVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.ListValue.startSVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} iOffset + */ +MNN.ListValue.addI = function(builder, iOffset) { + builder.addFieldOffset(1, iOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.ListValue.createIVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.ListValue.startIVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} fOffset + */ +MNN.ListValue.addF = function(builder, fOffset) { + builder.addFieldOffset(2, fOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.ListValue.createFVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.ListValue.startFVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} bOffset + */ +MNN.ListValue.addB = function(builder, bOffset) { + builder.addFieldOffset(3, bOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.ListValue.createBVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(+data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.ListValue.startBVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} typeOffset + */ +MNN.ListValue.addType = function(builder, typeOffset) { + builder.addFieldOffset(4, typeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.ListValue.createTypeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.ListValue.startTypeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.ListValue.endListValue = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} sOffset + * @param {flatbuffers.Offset} iOffset + * @param {flatbuffers.Offset} fOffset + * @param {flatbuffers.Offset} bOffset + * @param {flatbuffers.Offset} typeOffset + * @returns {flatbuffers.Offset} + */ +MNN.ListValue.createListValue = function(builder, sOffset, iOffset, fOffset, bOffset, typeOffset) { + MNN.ListValue.startListValue(builder); + MNN.ListValue.addS(builder, sOffset); + MNN.ListValue.addI(builder, iOffset); + MNN.ListValue.addF(builder, fOffset); + MNN.ListValue.addB(builder, bOffset); + MNN.ListValue.addType(builder, typeOffset); + return MNN.ListValue.endListValue(builder); +} + +/** + * @constructor + */ +MNN.Attribute = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Attribute} + */ +MNN.Attribute.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Attribute=} obj + * @returns {MNN.Attribute} + */ +MNN.Attribute.getRootAsAttribute = function(bb, obj) { + return (obj || new MNN.Attribute).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Attribute=} obj + * @returns {MNN.Attribute} + */ +MNN.Attribute.getSizePrefixedRootAsAttribute = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Attribute).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.Attribute.prototype.s = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @returns {number} + */ +MNN.Attribute.prototype.i = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +MNN.Attribute.prototype.b = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.Attribute.prototype.key = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.Attribute.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {number} + */ +MNN.Attribute.prototype.f = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.Attribute.prototype.tensor = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.ListValue=} obj + * @returns {MNN.ListValue|null} + */ +MNN.Attribute.prototype.list = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? (obj || new MNN.ListValue).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Attribute.startAttribute = function(builder) { + builder.startObject(8); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} sOffset + */ +MNN.Attribute.addS = function(builder, sOffset) { + builder.addFieldOffset(0, sOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} i + */ +MNN.Attribute.addI = function(builder, i) { + builder.addFieldInt32(1, i, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} b + */ +MNN.Attribute.addB = function(builder, b) { + builder.addFieldInt8(2, +b, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} keyOffset + */ +MNN.Attribute.addKey = function(builder, keyOffset) { + builder.addFieldOffset(3, keyOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} type + */ +MNN.Attribute.addType = function(builder, type) { + builder.addFieldInt32(4, type, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} f + */ +MNN.Attribute.addF = function(builder, f) { + builder.addFieldFloat32(5, f, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} tensorOffset + */ +MNN.Attribute.addTensor = function(builder, tensorOffset) { + builder.addFieldOffset(6, tensorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} listOffset + */ +MNN.Attribute.addList = function(builder, listOffset) { + builder.addFieldOffset(7, listOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Attribute.endAttribute = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} sOffset + * @param {number} i + * @param {boolean} b + * @param {flatbuffers.Offset} keyOffset + * @param {MNN.DataType} type + * @param {number} f + * @param {flatbuffers.Offset} tensorOffset + * @param {flatbuffers.Offset} listOffset + * @returns {flatbuffers.Offset} + */ +MNN.Attribute.createAttribute = function(builder, sOffset, i, b, keyOffset, type, f, tensorOffset, listOffset) { + MNN.Attribute.startAttribute(builder); + MNN.Attribute.addS(builder, sOffset); + MNN.Attribute.addI(builder, i); + MNN.Attribute.addB(builder, b); + MNN.Attribute.addKey(builder, keyOffset); + MNN.Attribute.addType(builder, type); + MNN.Attribute.addF(builder, f); + MNN.Attribute.addTensor(builder, tensorOffset); + MNN.Attribute.addList(builder, listOffset); + return MNN.Attribute.endAttribute(builder); +} + +/** + * @constructor + */ +MNN.Convolution2DCommon = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Convolution2DCommon} + */ +MNN.Convolution2DCommon.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Convolution2DCommon=} obj + * @returns {MNN.Convolution2DCommon} + */ +MNN.Convolution2DCommon.getRootAsConvolution2DCommon = function(bb, obj) { + return (obj || new MNN.Convolution2DCommon).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Convolution2DCommon=} obj + * @returns {MNN.Convolution2DCommon} + */ +MNN.Convolution2DCommon.getSizePrefixedRootAsConvolution2DCommon = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Convolution2DCommon).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.padX = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.padY = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.kernelX = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.kernelY = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.strideX = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.strideY = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.dilateX = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.dilateY = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {MNN.PadMode} + */ +MNN.Convolution2DCommon.prototype.padMode = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? /** @type {MNN.PadMode} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.PadMode.CAFFE; +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.group = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.outputCount = function() { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.inputCount = function() { + var offset = this.bb.__offset(this.bb_pos, 26); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +MNN.Convolution2DCommon.prototype.relu = function() { + var offset = this.bb.__offset(this.bb_pos, 28); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +MNN.Convolution2DCommon.prototype.relu6 = function() { + var offset = this.bb.__offset(this.bb_pos, 30); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.pads = function(index) { + var offset = this.bb.__offset(this.bb_pos, 32); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution2DCommon.prototype.padsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 32); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Convolution2DCommon.prototype.padsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 32); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Convolution2DCommon.startConvolution2DCommon = function(builder) { + builder.startObject(15); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padX + */ +MNN.Convolution2DCommon.addPadX = function(builder, padX) { + builder.addFieldInt32(0, padX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padY + */ +MNN.Convolution2DCommon.addPadY = function(builder, padY) { + builder.addFieldInt32(1, padY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} kernelX + */ +MNN.Convolution2DCommon.addKernelX = function(builder, kernelX) { + builder.addFieldInt32(2, kernelX, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} kernelY + */ +MNN.Convolution2DCommon.addKernelY = function(builder, kernelY) { + builder.addFieldInt32(3, kernelY, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideX + */ +MNN.Convolution2DCommon.addStrideX = function(builder, strideX) { + builder.addFieldInt32(4, strideX, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideY + */ +MNN.Convolution2DCommon.addStrideY = function(builder, strideY) { + builder.addFieldInt32(5, strideY, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} dilateX + */ +MNN.Convolution2DCommon.addDilateX = function(builder, dilateX) { + builder.addFieldInt32(6, dilateX, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} dilateY + */ +MNN.Convolution2DCommon.addDilateY = function(builder, dilateY) { + builder.addFieldInt32(7, dilateY, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.PadMode} padMode + */ +MNN.Convolution2DCommon.addPadMode = function(builder, padMode) { + builder.addFieldInt8(8, padMode, MNN.PadMode.CAFFE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} group + */ +MNN.Convolution2DCommon.addGroup = function(builder, group) { + builder.addFieldInt32(9, group, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputCount + */ +MNN.Convolution2DCommon.addOutputCount = function(builder, outputCount) { + builder.addFieldInt32(10, outputCount, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} inputCount + */ +MNN.Convolution2DCommon.addInputCount = function(builder, inputCount) { + builder.addFieldInt32(11, inputCount, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} relu + */ +MNN.Convolution2DCommon.addRelu = function(builder, relu) { + builder.addFieldInt8(12, +relu, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} relu6 + */ +MNN.Convolution2DCommon.addRelu6 = function(builder, relu6) { + builder.addFieldInt8(13, +relu6, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} padsOffset + */ +MNN.Convolution2DCommon.addPads = function(builder, padsOffset) { + builder.addFieldOffset(14, padsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Convolution2DCommon.createPadsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Convolution2DCommon.startPadsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Convolution2DCommon.endConvolution2DCommon = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padX + * @param {number} padY + * @param {number} kernelX + * @param {number} kernelY + * @param {number} strideX + * @param {number} strideY + * @param {number} dilateX + * @param {number} dilateY + * @param {MNN.PadMode} padMode + * @param {number} group + * @param {number} outputCount + * @param {number} inputCount + * @param {boolean} relu + * @param {boolean} relu6 + * @param {flatbuffers.Offset} padsOffset + * @returns {flatbuffers.Offset} + */ +MNN.Convolution2DCommon.createConvolution2DCommon = function(builder, padX, padY, kernelX, kernelY, strideX, strideY, dilateX, dilateY, padMode, group, outputCount, inputCount, relu, relu6, padsOffset) { + MNN.Convolution2DCommon.startConvolution2DCommon(builder); + MNN.Convolution2DCommon.addPadX(builder, padX); + MNN.Convolution2DCommon.addPadY(builder, padY); + MNN.Convolution2DCommon.addKernelX(builder, kernelX); + MNN.Convolution2DCommon.addKernelY(builder, kernelY); + MNN.Convolution2DCommon.addStrideX(builder, strideX); + MNN.Convolution2DCommon.addStrideY(builder, strideY); + MNN.Convolution2DCommon.addDilateX(builder, dilateX); + MNN.Convolution2DCommon.addDilateY(builder, dilateY); + MNN.Convolution2DCommon.addPadMode(builder, padMode); + MNN.Convolution2DCommon.addGroup(builder, group); + MNN.Convolution2DCommon.addOutputCount(builder, outputCount); + MNN.Convolution2DCommon.addInputCount(builder, inputCount); + MNN.Convolution2DCommon.addRelu(builder, relu); + MNN.Convolution2DCommon.addRelu6(builder, relu6); + MNN.Convolution2DCommon.addPads(builder, padsOffset); + return MNN.Convolution2DCommon.endConvolution2DCommon(builder); +} + +/** + * @constructor + */ +MNN.Convolution3DCommon = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Convolution3DCommon} + */ +MNN.Convolution3DCommon.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Convolution3DCommon=} obj + * @returns {MNN.Convolution3DCommon} + */ +MNN.Convolution3DCommon.getRootAsConvolution3DCommon = function(bb, obj) { + return (obj || new MNN.Convolution3DCommon).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Convolution3DCommon=} obj + * @returns {MNN.Convolution3DCommon} + */ +MNN.Convolution3DCommon.getSizePrefixedRootAsConvolution3DCommon = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Convolution3DCommon).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Convolution3DCommon.prototype.dilates = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution3DCommon.prototype.dilatesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Convolution3DCommon.prototype.dilatesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Convolution3DCommon.prototype.strides = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution3DCommon.prototype.stridesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Convolution3DCommon.prototype.stridesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Convolution3DCommon.prototype.kernels = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution3DCommon.prototype.kernelsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Convolution3DCommon.prototype.kernelsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Convolution3DCommon.prototype.pads = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution3DCommon.prototype.padsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Convolution3DCommon.prototype.padsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.PadMode} + */ +MNN.Convolution3DCommon.prototype.padMode = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? /** @type {MNN.PadMode} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.PadMode.CAFFE; +}; + +/** + * @returns {number} + */ +MNN.Convolution3DCommon.prototype.inputCount = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution3DCommon.prototype.outputCount = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +MNN.Convolution3DCommon.prototype.relu = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +MNN.Convolution3DCommon.prototype.relu6 = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Convolution3DCommon.startConvolution3DCommon = function(builder) { + builder.startObject(9); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dilatesOffset + */ +MNN.Convolution3DCommon.addDilates = function(builder, dilatesOffset) { + builder.addFieldOffset(0, dilatesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Convolution3DCommon.createDilatesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Convolution3DCommon.startDilatesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} stridesOffset + */ +MNN.Convolution3DCommon.addStrides = function(builder, stridesOffset) { + builder.addFieldOffset(1, stridesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Convolution3DCommon.createStridesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Convolution3DCommon.startStridesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} kernelsOffset + */ +MNN.Convolution3DCommon.addKernels = function(builder, kernelsOffset) { + builder.addFieldOffset(2, kernelsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Convolution3DCommon.createKernelsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Convolution3DCommon.startKernelsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} padsOffset + */ +MNN.Convolution3DCommon.addPads = function(builder, padsOffset) { + builder.addFieldOffset(3, padsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Convolution3DCommon.createPadsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Convolution3DCommon.startPadsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.PadMode} padMode + */ +MNN.Convolution3DCommon.addPadMode = function(builder, padMode) { + builder.addFieldInt8(4, padMode, MNN.PadMode.CAFFE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} inputCount + */ +MNN.Convolution3DCommon.addInputCount = function(builder, inputCount) { + builder.addFieldInt32(5, inputCount, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputCount + */ +MNN.Convolution3DCommon.addOutputCount = function(builder, outputCount) { + builder.addFieldInt32(6, outputCount, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} relu + */ +MNN.Convolution3DCommon.addRelu = function(builder, relu) { + builder.addFieldInt8(7, +relu, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} relu6 + */ +MNN.Convolution3DCommon.addRelu6 = function(builder, relu6) { + builder.addFieldInt8(8, +relu6, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Convolution3DCommon.endConvolution3DCommon = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dilatesOffset + * @param {flatbuffers.Offset} stridesOffset + * @param {flatbuffers.Offset} kernelsOffset + * @param {flatbuffers.Offset} padsOffset + * @param {MNN.PadMode} padMode + * @param {number} inputCount + * @param {number} outputCount + * @param {boolean} relu + * @param {boolean} relu6 + * @returns {flatbuffers.Offset} + */ +MNN.Convolution3DCommon.createConvolution3DCommon = function(builder, dilatesOffset, stridesOffset, kernelsOffset, padsOffset, padMode, inputCount, outputCount, relu, relu6) { + MNN.Convolution3DCommon.startConvolution3DCommon(builder); + MNN.Convolution3DCommon.addDilates(builder, dilatesOffset); + MNN.Convolution3DCommon.addStrides(builder, stridesOffset); + MNN.Convolution3DCommon.addKernels(builder, kernelsOffset); + MNN.Convolution3DCommon.addPads(builder, padsOffset); + MNN.Convolution3DCommon.addPadMode(builder, padMode); + MNN.Convolution3DCommon.addInputCount(builder, inputCount); + MNN.Convolution3DCommon.addOutputCount(builder, outputCount); + MNN.Convolution3DCommon.addRelu(builder, relu); + MNN.Convolution3DCommon.addRelu6(builder, relu6); + return MNN.Convolution3DCommon.endConvolution3DCommon(builder); +} + +/** + * @constructor + */ +MNN.IDSTQuan = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.IDSTQuan} + */ +MNN.IDSTQuan.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.IDSTQuan=} obj + * @returns {MNN.IDSTQuan} + */ +MNN.IDSTQuan.getRootAsIDSTQuan = function(bb, obj) { + return (obj || new MNN.IDSTQuan).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.IDSTQuan=} obj + * @returns {MNN.IDSTQuan} + */ +MNN.IDSTQuan.getSizePrefixedRootAsIDSTQuan = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.IDSTQuan).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.IDSTQuan.prototype.buffer = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +MNN.IDSTQuan.prototype.bufferLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int8Array} + */ +MNN.IDSTQuan.prototype.bufferArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.IDSTQuan.prototype.alpha = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.IDSTQuan.prototype.alphaLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.IDSTQuan.prototype.alphaArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {number} + */ +MNN.IDSTQuan.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +MNN.IDSTQuan.prototype.useInt32 = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {number} + */ +MNN.IDSTQuan.prototype.quantScale = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.IDSTQuan.prototype.scaleIn = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.IDSTQuan.prototype.scaleOut = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.IDSTQuan.prototype.aMax = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.IDSTQuan.prototype.aMin = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.IDSTQuan.prototype.readType = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +MNN.IDSTQuan.prototype.hasScaleInt = function() { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.IDSTQuan.startIDSTQuan = function(builder) { + builder.startObject(11); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} bufferOffset + */ +MNN.IDSTQuan.addBuffer = function(builder, bufferOffset) { + builder.addFieldOffset(0, bufferOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.IDSTQuan.createBufferVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.IDSTQuan.startBufferVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} alphaOffset + */ +MNN.IDSTQuan.addAlpha = function(builder, alphaOffset) { + builder.addFieldOffset(1, alphaOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.IDSTQuan.createAlphaVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.IDSTQuan.startAlphaVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} type + */ +MNN.IDSTQuan.addType = function(builder, type) { + builder.addFieldInt32(2, type, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} useInt32 + */ +MNN.IDSTQuan.addUseInt32 = function(builder, useInt32) { + builder.addFieldInt8(3, +useInt32, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} quantScale + */ +MNN.IDSTQuan.addQuantScale = function(builder, quantScale) { + builder.addFieldFloat32(4, quantScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} scaleIn + */ +MNN.IDSTQuan.addScaleIn = function(builder, scaleIn) { + builder.addFieldFloat32(5, scaleIn, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} scaleOut + */ +MNN.IDSTQuan.addScaleOut = function(builder, scaleOut) { + builder.addFieldFloat32(6, scaleOut, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} aMax + */ +MNN.IDSTQuan.addAMax = function(builder, aMax) { + builder.addFieldInt32(7, aMax, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} aMin + */ +MNN.IDSTQuan.addAMin = function(builder, aMin) { + builder.addFieldInt32(8, aMin, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} readType + */ +MNN.IDSTQuan.addReadType = function(builder, readType) { + builder.addFieldInt32(9, readType, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} hasScaleInt + */ +MNN.IDSTQuan.addHasScaleInt = function(builder, hasScaleInt) { + builder.addFieldInt8(10, +hasScaleInt, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.IDSTQuan.endIDSTQuan = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} bufferOffset + * @param {flatbuffers.Offset} alphaOffset + * @param {number} type + * @param {boolean} useInt32 + * @param {number} quantScale + * @param {number} scaleIn + * @param {number} scaleOut + * @param {number} aMax + * @param {number} aMin + * @param {number} readType + * @param {boolean} hasScaleInt + * @returns {flatbuffers.Offset} + */ +MNN.IDSTQuan.createIDSTQuan = function(builder, bufferOffset, alphaOffset, type, useInt32, quantScale, scaleIn, scaleOut, aMax, aMin, readType, hasScaleInt) { + MNN.IDSTQuan.startIDSTQuan(builder); + MNN.IDSTQuan.addBuffer(builder, bufferOffset); + MNN.IDSTQuan.addAlpha(builder, alphaOffset); + MNN.IDSTQuan.addType(builder, type); + MNN.IDSTQuan.addUseInt32(builder, useInt32); + MNN.IDSTQuan.addQuantScale(builder, quantScale); + MNN.IDSTQuan.addScaleIn(builder, scaleIn); + MNN.IDSTQuan.addScaleOut(builder, scaleOut); + MNN.IDSTQuan.addAMax(builder, aMax); + MNN.IDSTQuan.addAMin(builder, aMin); + MNN.IDSTQuan.addReadType(builder, readType); + MNN.IDSTQuan.addHasScaleInt(builder, hasScaleInt); + return MNN.IDSTQuan.endIDSTQuan(builder); +} + +/** + * @constructor + */ +MNN.QuantizedFloatParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedFloatParam} + */ +MNN.QuantizedFloatParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedFloatParam=} obj + * @returns {MNN.QuantizedFloatParam} + */ +MNN.QuantizedFloatParam.getRootAsQuantizedFloatParam = function(bb, obj) { + return (obj || new MNN.QuantizedFloatParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedFloatParam=} obj + * @returns {MNN.QuantizedFloatParam} + */ +MNN.QuantizedFloatParam.getSizePrefixedRootAsQuantizedFloatParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedFloatParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.QuantizedFloatParam.prototype.weight = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedFloatParam.prototype.weightLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int8Array} + */ +MNN.QuantizedFloatParam.prototype.weightArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.QuantizedFloatParam.prototype.bias = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedFloatParam.prototype.biasLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.QuantizedFloatParam.prototype.biasArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.QuantizedFloatParam.prototype.scale = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedFloatParam.prototype.scaleLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.QuantizedFloatParam.prototype.scaleArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.QuantizedFloatParam.prototype.tensorScale = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedFloatParam.prototype.tensorScaleLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.QuantizedFloatParam.prototype.tensorScaleArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.QuantizeAlgo} + */ +MNN.QuantizedFloatParam.prototype.method = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? /** @type {MNN.QuantizeAlgo} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.QuantizeAlgo.DEFAULT; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedFloatParam.startQuantizedFloatParam = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightOffset + */ +MNN.QuantizedFloatParam.addWeight = function(builder, weightOffset) { + builder.addFieldOffset(0, weightOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedFloatParam.createWeightVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.QuantizedFloatParam.startWeightVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasOffset + */ +MNN.QuantizedFloatParam.addBias = function(builder, biasOffset) { + builder.addFieldOffset(1, biasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedFloatParam.createBiasVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.QuantizedFloatParam.startBiasVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} scaleOffset + */ +MNN.QuantizedFloatParam.addScale = function(builder, scaleOffset) { + builder.addFieldOffset(2, scaleOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedFloatParam.createScaleVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.QuantizedFloatParam.startScaleVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} tensorScaleOffset + */ +MNN.QuantizedFloatParam.addTensorScale = function(builder, tensorScaleOffset) { + builder.addFieldOffset(3, tensorScaleOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedFloatParam.createTensorScaleVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.QuantizedFloatParam.startTensorScaleVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.QuantizeAlgo} method + */ +MNN.QuantizedFloatParam.addMethod = function(builder, method) { + builder.addFieldInt8(4, method, MNN.QuantizeAlgo.DEFAULT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedFloatParam.endQuantizedFloatParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightOffset + * @param {flatbuffers.Offset} biasOffset + * @param {flatbuffers.Offset} scaleOffset + * @param {flatbuffers.Offset} tensorScaleOffset + * @param {MNN.QuantizeAlgo} method + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedFloatParam.createQuantizedFloatParam = function(builder, weightOffset, biasOffset, scaleOffset, tensorScaleOffset, method) { + MNN.QuantizedFloatParam.startQuantizedFloatParam(builder); + MNN.QuantizedFloatParam.addWeight(builder, weightOffset); + MNN.QuantizedFloatParam.addBias(builder, biasOffset); + MNN.QuantizedFloatParam.addScale(builder, scaleOffset); + MNN.QuantizedFloatParam.addTensorScale(builder, tensorScaleOffset); + MNN.QuantizedFloatParam.addMethod(builder, method); + return MNN.QuantizedFloatParam.endQuantizedFloatParam(builder); +} + +/** + * @constructor + */ +MNN.Convolution2D = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Convolution2D} + */ +MNN.Convolution2D.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Convolution2D=} obj + * @returns {MNN.Convolution2D} + */ +MNN.Convolution2D.getRootAsConvolution2D = function(bb, obj) { + return (obj || new MNN.Convolution2D).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Convolution2D=} obj + * @returns {MNN.Convolution2D} + */ +MNN.Convolution2D.getSizePrefixedRootAsConvolution2D = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Convolution2D).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {MNN.Convolution2DCommon=} obj + * @returns {MNN.Convolution2DCommon|null} + */ +MNN.Convolution2D.prototype.common = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new MNN.Convolution2DCommon).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Convolution2D.prototype.weight = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution2D.prototype.weightLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.Convolution2D.prototype.weightArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Convolution2D.prototype.bias = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution2D.prototype.biasLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.Convolution2D.prototype.biasArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {MNN.IDSTQuan=} obj + * @returns {MNN.IDSTQuan|null} + */ +MNN.Convolution2D.prototype.quanParameter = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new MNN.IDSTQuan).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.QuantizedFloatParam=} obj + * @returns {MNN.QuantizedFloatParam|null} + */ +MNN.Convolution2D.prototype.symmetricQuan = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new MNN.QuantizedFloatParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Convolution2D.startConvolution2D = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} commonOffset + */ +MNN.Convolution2D.addCommon = function(builder, commonOffset) { + builder.addFieldOffset(0, commonOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightOffset + */ +MNN.Convolution2D.addWeight = function(builder, weightOffset) { + builder.addFieldOffset(1, weightOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Convolution2D.createWeightVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Convolution2D.startWeightVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasOffset + */ +MNN.Convolution2D.addBias = function(builder, biasOffset) { + builder.addFieldOffset(2, biasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Convolution2D.createBiasVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Convolution2D.startBiasVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} quanParameterOffset + */ +MNN.Convolution2D.addQuanParameter = function(builder, quanParameterOffset) { + builder.addFieldOffset(3, quanParameterOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} symmetricQuanOffset + */ +MNN.Convolution2D.addSymmetricQuan = function(builder, symmetricQuanOffset) { + builder.addFieldOffset(4, symmetricQuanOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Convolution2D.endConvolution2D = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} commonOffset + * @param {flatbuffers.Offset} weightOffset + * @param {flatbuffers.Offset} biasOffset + * @param {flatbuffers.Offset} quanParameterOffset + * @param {flatbuffers.Offset} symmetricQuanOffset + * @returns {flatbuffers.Offset} + */ +MNN.Convolution2D.createConvolution2D = function(builder, commonOffset, weightOffset, biasOffset, quanParameterOffset, symmetricQuanOffset) { + MNN.Convolution2D.startConvolution2D(builder); + MNN.Convolution2D.addCommon(builder, commonOffset); + MNN.Convolution2D.addWeight(builder, weightOffset); + MNN.Convolution2D.addBias(builder, biasOffset); + MNN.Convolution2D.addQuanParameter(builder, quanParameterOffset); + MNN.Convolution2D.addSymmetricQuan(builder, symmetricQuanOffset); + return MNN.Convolution2D.endConvolution2D(builder); +} + +/** + * @constructor + */ +MNN.Convolution3D = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Convolution3D} + */ +MNN.Convolution3D.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Convolution3D=} obj + * @returns {MNN.Convolution3D} + */ +MNN.Convolution3D.getRootAsConvolution3D = function(bb, obj) { + return (obj || new MNN.Convolution3D).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Convolution3D=} obj + * @returns {MNN.Convolution3D} + */ +MNN.Convolution3D.getSizePrefixedRootAsConvolution3D = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Convolution3D).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {MNN.Convolution3DCommon=} obj + * @returns {MNN.Convolution3DCommon|null} + */ +MNN.Convolution3D.prototype.common = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new MNN.Convolution3DCommon).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Convolution3D.prototype.weight = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution3D.prototype.weightLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.Convolution3D.prototype.weightArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Convolution3D.prototype.bias = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Convolution3D.prototype.biasLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.Convolution3D.prototype.biasArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Convolution3D.startConvolution3D = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} commonOffset + */ +MNN.Convolution3D.addCommon = function(builder, commonOffset) { + builder.addFieldOffset(0, commonOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightOffset + */ +MNN.Convolution3D.addWeight = function(builder, weightOffset) { + builder.addFieldOffset(1, weightOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Convolution3D.createWeightVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Convolution3D.startWeightVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasOffset + */ +MNN.Convolution3D.addBias = function(builder, biasOffset) { + builder.addFieldOffset(2, biasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Convolution3D.createBiasVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Convolution3D.startBiasVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Convolution3D.endConvolution3D = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} commonOffset + * @param {flatbuffers.Offset} weightOffset + * @param {flatbuffers.Offset} biasOffset + * @returns {flatbuffers.Offset} + */ +MNN.Convolution3D.createConvolution3D = function(builder, commonOffset, weightOffset, biasOffset) { + MNN.Convolution3D.startConvolution3D(builder); + MNN.Convolution3D.addCommon(builder, commonOffset); + MNN.Convolution3D.addWeight(builder, weightOffset); + MNN.Convolution3D.addBias(builder, biasOffset); + return MNN.Convolution3D.endConvolution3D(builder); +} + +/** + * @constructor + */ +MNN.InnerProduct = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.InnerProduct} + */ +MNN.InnerProduct.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.InnerProduct=} obj + * @returns {MNN.InnerProduct} + */ +MNN.InnerProduct.getRootAsInnerProduct = function(bb, obj) { + return (obj || new MNN.InnerProduct).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.InnerProduct=} obj + * @returns {MNN.InnerProduct} + */ +MNN.InnerProduct.getSizePrefixedRootAsInnerProduct = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.InnerProduct).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.InnerProduct.prototype.outputCount = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.InnerProduct.prototype.biasTerm = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.InnerProduct.prototype.weightSize = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.InnerProduct.prototype.weight = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.InnerProduct.prototype.weightLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.InnerProduct.prototype.weightArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.InnerProduct.prototype.bias = function(index) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.InnerProduct.prototype.biasLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.InnerProduct.prototype.biasArray = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {number} + */ +MNN.InnerProduct.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +MNN.InnerProduct.prototype.transpose = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {MNN.IDSTQuan=} obj + * @returns {MNN.IDSTQuan|null} + */ +MNN.InnerProduct.prototype.quanParameter = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? (obj || new MNN.IDSTQuan).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.InnerProduct.startInnerProduct = function(builder) { + builder.startObject(8); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputCount + */ +MNN.InnerProduct.addOutputCount = function(builder, outputCount) { + builder.addFieldInt32(0, outputCount, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} biasTerm + */ +MNN.InnerProduct.addBiasTerm = function(builder, biasTerm) { + builder.addFieldInt32(1, biasTerm, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} weightSize + */ +MNN.InnerProduct.addWeightSize = function(builder, weightSize) { + builder.addFieldInt32(2, weightSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightOffset + */ +MNN.InnerProduct.addWeight = function(builder, weightOffset) { + builder.addFieldOffset(3, weightOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.InnerProduct.createWeightVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.InnerProduct.startWeightVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasOffset + */ +MNN.InnerProduct.addBias = function(builder, biasOffset) { + builder.addFieldOffset(4, biasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.InnerProduct.createBiasVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.InnerProduct.startBiasVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +MNN.InnerProduct.addAxis = function(builder, axis) { + builder.addFieldInt32(5, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} transpose + */ +MNN.InnerProduct.addTranspose = function(builder, transpose) { + builder.addFieldInt8(6, +transpose, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} quanParameterOffset + */ +MNN.InnerProduct.addQuanParameter = function(builder, quanParameterOffset) { + builder.addFieldOffset(7, quanParameterOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.InnerProduct.endInnerProduct = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputCount + * @param {number} biasTerm + * @param {number} weightSize + * @param {flatbuffers.Offset} weightOffset + * @param {flatbuffers.Offset} biasOffset + * @param {number} axis + * @param {boolean} transpose + * @param {flatbuffers.Offset} quanParameterOffset + * @returns {flatbuffers.Offset} + */ +MNN.InnerProduct.createInnerProduct = function(builder, outputCount, biasTerm, weightSize, weightOffset, biasOffset, axis, transpose, quanParameterOffset) { + MNN.InnerProduct.startInnerProduct(builder); + MNN.InnerProduct.addOutputCount(builder, outputCount); + MNN.InnerProduct.addBiasTerm(builder, biasTerm); + MNN.InnerProduct.addWeightSize(builder, weightSize); + MNN.InnerProduct.addWeight(builder, weightOffset); + MNN.InnerProduct.addBias(builder, biasOffset); + MNN.InnerProduct.addAxis(builder, axis); + MNN.InnerProduct.addTranspose(builder, transpose); + MNN.InnerProduct.addQuanParameter(builder, quanParameterOffset); + return MNN.InnerProduct.endInnerProduct(builder); +} + +/** + * @constructor + */ +MNN.Pool = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Pool} + */ +MNN.Pool.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Pool=} obj + * @returns {MNN.Pool} + */ +MNN.Pool.getRootAsPool = function(bb, obj) { + return (obj || new MNN.Pool).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Pool=} obj + * @returns {MNN.Pool} + */ +MNN.Pool.getSizePrefixedRootAsPool = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Pool).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Pool.prototype.padX = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Pool.prototype.padY = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +MNN.Pool.prototype.isGlobal = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {number} + */ +MNN.Pool.prototype.kernelX = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Pool.prototype.kernelY = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Pool.prototype.strideX = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Pool.prototype.strideY = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {MNN.PoolType} + */ +MNN.Pool.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? /** @type {MNN.PoolType} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.PoolType.MAXPOOL; +}; + +/** + * @returns {MNN.PoolPadType} + */ +MNN.Pool.prototype.padType = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? /** @type {MNN.PoolPadType} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.PoolPadType.CAFFE; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.Pool.prototype.dataType = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_FLOAT; +}; + +/** + * @returns {boolean} + */ +MNN.Pool.prototype.ceilModel = function() { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : true; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Pool.prototype.pads = function(index) { + var offset = this.bb.__offset(this.bb_pos, 26); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Pool.prototype.padsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 26); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Pool.prototype.padsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 26); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Pool.startPool = function(builder) { + builder.startObject(12); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padX + */ +MNN.Pool.addPadX = function(builder, padX) { + builder.addFieldInt32(0, padX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padY + */ +MNN.Pool.addPadY = function(builder, padY) { + builder.addFieldInt32(1, padY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} isGlobal + */ +MNN.Pool.addIsGlobal = function(builder, isGlobal) { + builder.addFieldInt8(2, +isGlobal, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} kernelX + */ +MNN.Pool.addKernelX = function(builder, kernelX) { + builder.addFieldInt32(3, kernelX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} kernelY + */ +MNN.Pool.addKernelY = function(builder, kernelY) { + builder.addFieldInt32(4, kernelY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideX + */ +MNN.Pool.addStrideX = function(builder, strideX) { + builder.addFieldInt32(5, strideX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideY + */ +MNN.Pool.addStrideY = function(builder, strideY) { + builder.addFieldInt32(6, strideY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.PoolType} type + */ +MNN.Pool.addType = function(builder, type) { + builder.addFieldInt8(7, type, MNN.PoolType.MAXPOOL); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.PoolPadType} padType + */ +MNN.Pool.addPadType = function(builder, padType) { + builder.addFieldInt8(8, padType, MNN.PoolPadType.CAFFE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} dataType + */ +MNN.Pool.addDataType = function(builder, dataType) { + builder.addFieldInt32(9, dataType, MNN.DataType.DT_FLOAT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} ceilModel + */ +MNN.Pool.addCeilModel = function(builder, ceilModel) { + builder.addFieldInt8(10, +ceilModel, +true); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} padsOffset + */ +MNN.Pool.addPads = function(builder, padsOffset) { + builder.addFieldOffset(11, padsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Pool.createPadsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Pool.startPadsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Pool.endPool = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padX + * @param {number} padY + * @param {boolean} isGlobal + * @param {number} kernelX + * @param {number} kernelY + * @param {number} strideX + * @param {number} strideY + * @param {MNN.PoolType} type + * @param {MNN.PoolPadType} padType + * @param {MNN.DataType} dataType + * @param {boolean} ceilModel + * @param {flatbuffers.Offset} padsOffset + * @returns {flatbuffers.Offset} + */ +MNN.Pool.createPool = function(builder, padX, padY, isGlobal, kernelX, kernelY, strideX, strideY, type, padType, dataType, ceilModel, padsOffset) { + MNN.Pool.startPool(builder); + MNN.Pool.addPadX(builder, padX); + MNN.Pool.addPadY(builder, padY); + MNN.Pool.addIsGlobal(builder, isGlobal); + MNN.Pool.addKernelX(builder, kernelX); + MNN.Pool.addKernelY(builder, kernelY); + MNN.Pool.addStrideX(builder, strideX); + MNN.Pool.addStrideY(builder, strideY); + MNN.Pool.addType(builder, type); + MNN.Pool.addPadType(builder, padType); + MNN.Pool.addDataType(builder, dataType); + MNN.Pool.addCeilModel(builder, ceilModel); + MNN.Pool.addPads(builder, padsOffset); + return MNN.Pool.endPool(builder); +} + +/** + * @constructor + */ +MNN.Pool3D = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Pool3D} + */ +MNN.Pool3D.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Pool3D=} obj + * @returns {MNN.Pool3D} + */ +MNN.Pool3D.getRootAsPool3D = function(bb, obj) { + return (obj || new MNN.Pool3D).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Pool3D=} obj + * @returns {MNN.Pool3D} + */ +MNN.Pool3D.getSizePrefixedRootAsPool3D = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Pool3D).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Pool3D.prototype.strides = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Pool3D.prototype.stridesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Pool3D.prototype.stridesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Pool3D.prototype.kernels = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Pool3D.prototype.kernelsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Pool3D.prototype.kernelsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Pool3D.prototype.pads = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Pool3D.prototype.padsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Pool3D.prototype.padsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.PoolType} + */ +MNN.Pool3D.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? /** @type {MNN.PoolType} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.PoolType.MAXPOOL; +}; + +/** + * @returns {MNN.PoolPadType} + */ +MNN.Pool3D.prototype.padType = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? /** @type {MNN.PoolPadType} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.PoolPadType.CAFFE; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Pool3D.startPool3D = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} stridesOffset + */ +MNN.Pool3D.addStrides = function(builder, stridesOffset) { + builder.addFieldOffset(0, stridesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Pool3D.createStridesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Pool3D.startStridesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} kernelsOffset + */ +MNN.Pool3D.addKernels = function(builder, kernelsOffset) { + builder.addFieldOffset(1, kernelsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Pool3D.createKernelsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Pool3D.startKernelsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} padsOffset + */ +MNN.Pool3D.addPads = function(builder, padsOffset) { + builder.addFieldOffset(2, padsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Pool3D.createPadsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Pool3D.startPadsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.PoolType} type + */ +MNN.Pool3D.addType = function(builder, type) { + builder.addFieldInt8(3, type, MNN.PoolType.MAXPOOL); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.PoolPadType} padType + */ +MNN.Pool3D.addPadType = function(builder, padType) { + builder.addFieldInt8(4, padType, MNN.PoolPadType.CAFFE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Pool3D.endPool3D = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} stridesOffset + * @param {flatbuffers.Offset} kernelsOffset + * @param {flatbuffers.Offset} padsOffset + * @param {MNN.PoolType} type + * @param {MNN.PoolPadType} padType + * @returns {flatbuffers.Offset} + */ +MNN.Pool3D.createPool3D = function(builder, stridesOffset, kernelsOffset, padsOffset, type, padType) { + MNN.Pool3D.startPool3D(builder); + MNN.Pool3D.addStrides(builder, stridesOffset); + MNN.Pool3D.addKernels(builder, kernelsOffset); + MNN.Pool3D.addPads(builder, padsOffset); + MNN.Pool3D.addType(builder, type); + MNN.Pool3D.addPadType(builder, padType); + return MNN.Pool3D.endPool3D(builder); +} + +/** + * @constructor + */ +MNN.Relu = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Relu} + */ +MNN.Relu.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Relu=} obj + * @returns {MNN.Relu} + */ +MNN.Relu.getRootAsRelu = function(bb, obj) { + return (obj || new MNN.Relu).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Relu=} obj + * @returns {MNN.Relu} + */ +MNN.Relu.getSizePrefixedRootAsRelu = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Relu).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Relu.prototype.slope = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Relu.startRelu = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} slope + */ +MNN.Relu.addSlope = function(builder, slope) { + builder.addFieldFloat32(0, slope, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Relu.endRelu = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} slope + * @returns {flatbuffers.Offset} + */ +MNN.Relu.createRelu = function(builder, slope) { + MNN.Relu.startRelu(builder); + MNN.Relu.addSlope(builder, slope); + return MNN.Relu.endRelu(builder); +} + +/** + * @constructor + */ +MNN.Relu6 = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Relu6} + */ +MNN.Relu6.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Relu6=} obj + * @returns {MNN.Relu6} + */ +MNN.Relu6.getRootAsRelu6 = function(bb, obj) { + return (obj || new MNN.Relu6).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Relu6=} obj + * @returns {MNN.Relu6} + */ +MNN.Relu6.getSizePrefixedRootAsRelu6 = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Relu6).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Relu6.prototype.slope = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Relu6.startRelu6 = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} slope + */ +MNN.Relu6.addSlope = function(builder, slope) { + builder.addFieldFloat32(0, slope, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Relu6.endRelu6 = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} slope + * @returns {flatbuffers.Offset} + */ +MNN.Relu6.createRelu6 = function(builder, slope) { + MNN.Relu6.startRelu6(builder); + MNN.Relu6.addSlope(builder, slope); + return MNN.Relu6.endRelu6(builder); +} + +/** + * @constructor + */ +MNN.PRelu = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.PRelu} + */ +MNN.PRelu.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.PRelu=} obj + * @returns {MNN.PRelu} + */ +MNN.PRelu.getRootAsPRelu = function(bb, obj) { + return (obj || new MNN.PRelu).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.PRelu=} obj + * @returns {MNN.PRelu} + */ +MNN.PRelu.getSizePrefixedRootAsPRelu = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.PRelu).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.PRelu.prototype.slopeCount = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.PRelu.prototype.slope = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.PRelu.prototype.slopeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.PRelu.prototype.slopeArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.PRelu.startPRelu = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} slopeCount + */ +MNN.PRelu.addSlopeCount = function(builder, slopeCount) { + builder.addFieldInt32(0, slopeCount, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} slopeOffset + */ +MNN.PRelu.addSlope = function(builder, slopeOffset) { + builder.addFieldOffset(1, slopeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.PRelu.createSlopeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.PRelu.startSlopeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.PRelu.endPRelu = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} slopeCount + * @param {flatbuffers.Offset} slopeOffset + * @returns {flatbuffers.Offset} + */ +MNN.PRelu.createPRelu = function(builder, slopeCount, slopeOffset) { + MNN.PRelu.startPRelu(builder); + MNN.PRelu.addSlopeCount(builder, slopeCount); + MNN.PRelu.addSlope(builder, slopeOffset); + return MNN.PRelu.endPRelu(builder); +} + +/** + * @constructor + */ +MNN.ELU = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.ELU} + */ +MNN.ELU.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ELU=} obj + * @returns {MNN.ELU} + */ +MNN.ELU.getRootAsELU = function(bb, obj) { + return (obj || new MNN.ELU).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ELU=} obj + * @returns {MNN.ELU} + */ +MNN.ELU.getSizePrefixedRootAsELU = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.ELU).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.ELU.prototype.alpha = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.ELU.startELU = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} alpha + */ +MNN.ELU.addAlpha = function(builder, alpha) { + builder.addFieldFloat32(0, alpha, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.ELU.endELU = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} alpha + * @returns {flatbuffers.Offset} + */ +MNN.ELU.createELU = function(builder, alpha) { + MNN.ELU.startELU(builder); + MNN.ELU.addAlpha(builder, alpha); + return MNN.ELU.endELU(builder); +} + +/** + * @constructor + */ +MNN.LRN = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.LRN} + */ +MNN.LRN.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.LRN=} obj + * @returns {MNN.LRN} + */ +MNN.LRN.getRootAsLRN = function(bb, obj) { + return (obj || new MNN.LRN).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.LRN=} obj + * @returns {MNN.LRN} + */ +MNN.LRN.getSizePrefixedRootAsLRN = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.LRN).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.LRN.prototype.regionType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.LRN.prototype.localSize = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.LRN.prototype.alpha = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.LRN.prototype.beta = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.LRN.startLRN = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} regionType + */ +MNN.LRN.addRegionType = function(builder, regionType) { + builder.addFieldInt32(0, regionType, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} localSize + */ +MNN.LRN.addLocalSize = function(builder, localSize) { + builder.addFieldInt32(1, localSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} alpha + */ +MNN.LRN.addAlpha = function(builder, alpha) { + builder.addFieldFloat32(2, alpha, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + */ +MNN.LRN.addBeta = function(builder, beta) { + builder.addFieldFloat32(3, beta, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.LRN.endLRN = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} regionType + * @param {number} localSize + * @param {number} alpha + * @param {number} beta + * @returns {flatbuffers.Offset} + */ +MNN.LRN.createLRN = function(builder, regionType, localSize, alpha, beta) { + MNN.LRN.startLRN(builder); + MNN.LRN.addRegionType(builder, regionType); + MNN.LRN.addLocalSize(builder, localSize); + MNN.LRN.addAlpha(builder, alpha); + MNN.LRN.addBeta(builder, beta); + return MNN.LRN.endLRN(builder); +} + +/** + * @constructor + */ +MNN.ArgMax = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.ArgMax} + */ +MNN.ArgMax.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ArgMax=} obj + * @returns {MNN.ArgMax} + */ +MNN.ArgMax.getRootAsArgMax = function(bb, obj) { + return (obj || new MNN.ArgMax).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ArgMax=} obj + * @returns {MNN.ArgMax} + */ +MNN.ArgMax.getSizePrefixedRootAsArgMax = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.ArgMax).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.ArgMax.prototype.outMaxVal = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.ArgMax.prototype.topK = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.ArgMax.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.ArgMax.prototype.softmaxThreshold = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.ArgMax.startArgMax = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outMaxVal + */ +MNN.ArgMax.addOutMaxVal = function(builder, outMaxVal) { + builder.addFieldInt32(0, outMaxVal, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} topK + */ +MNN.ArgMax.addTopK = function(builder, topK) { + builder.addFieldInt32(1, topK, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +MNN.ArgMax.addAxis = function(builder, axis) { + builder.addFieldInt32(2, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} softmaxThreshold + */ +MNN.ArgMax.addSoftmaxThreshold = function(builder, softmaxThreshold) { + builder.addFieldInt32(3, softmaxThreshold, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.ArgMax.endArgMax = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outMaxVal + * @param {number} topK + * @param {number} axis + * @param {number} softmaxThreshold + * @returns {flatbuffers.Offset} + */ +MNN.ArgMax.createArgMax = function(builder, outMaxVal, topK, axis, softmaxThreshold) { + MNN.ArgMax.startArgMax(builder); + MNN.ArgMax.addOutMaxVal(builder, outMaxVal); + MNN.ArgMax.addTopK(builder, topK); + MNN.ArgMax.addAxis(builder, axis); + MNN.ArgMax.addSoftmaxThreshold(builder, softmaxThreshold); + return MNN.ArgMax.endArgMax(builder); +} + +/** + * @constructor + */ +MNN.Axis = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Axis} + */ +MNN.Axis.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Axis=} obj + * @returns {MNN.Axis} + */ +MNN.Axis.getRootAsAxis = function(bb, obj) { + return (obj || new MNN.Axis).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Axis=} obj + * @returns {MNN.Axis} + */ +MNN.Axis.getSizePrefixedRootAsAxis = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Axis).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Axis.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Axis.startAxis = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +MNN.Axis.addAxis = function(builder, axis) { + builder.addFieldInt32(0, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Axis.endAxis = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + * @returns {flatbuffers.Offset} + */ +MNN.Axis.createAxis = function(builder, axis) { + MNN.Axis.startAxis(builder); + MNN.Axis.addAxis(builder, axis); + return MNN.Axis.endAxis(builder); +} + +/** + * @constructor + */ +MNN.Input = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Input} + */ +MNN.Input.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Input=} obj + * @returns {MNN.Input} + */ +MNN.Input.getRootAsInput = function(bb, obj) { + return (obj || new MNN.Input).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Input=} obj + * @returns {MNN.Input} + */ +MNN.Input.getSizePrefixedRootAsInput = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Input).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Input.prototype.dims = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Input.prototype.dimsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Input.prototype.dimsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.Input.prototype.dtype = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_FLOAT; +}; + +/** + * @returns {MNN.MNN_DATA_FORMAT} + */ +MNN.Input.prototype.dformat = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {MNN.MNN_DATA_FORMAT} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.MNN_DATA_FORMAT.NC4HW4; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Input.startInput = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimsOffset + */ +MNN.Input.addDims = function(builder, dimsOffset) { + builder.addFieldOffset(0, dimsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Input.createDimsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Input.startDimsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} dtype + */ +MNN.Input.addDtype = function(builder, dtype) { + builder.addFieldInt32(1, dtype, MNN.DataType.DT_FLOAT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.MNN_DATA_FORMAT} dformat + */ +MNN.Input.addDformat = function(builder, dformat) { + builder.addFieldInt8(2, dformat, MNN.MNN_DATA_FORMAT.NC4HW4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Input.endInput = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimsOffset + * @param {MNN.DataType} dtype + * @param {MNN.MNN_DATA_FORMAT} dformat + * @returns {flatbuffers.Offset} + */ +MNN.Input.createInput = function(builder, dimsOffset, dtype, dformat) { + MNN.Input.startInput(builder); + MNN.Input.addDims(builder, dimsOffset); + MNN.Input.addDtype(builder, dtype); + MNN.Input.addDformat(builder, dformat); + return MNN.Input.endInput(builder); +} + +/** + * @constructor + */ +MNN.LSTM = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.LSTM} + */ +MNN.LSTM.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.LSTM=} obj + * @returns {MNN.LSTM} + */ +MNN.LSTM.getRootAsLSTM = function(bb, obj) { + return (obj || new MNN.LSTM).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.LSTM=} obj + * @returns {MNN.LSTM} + */ +MNN.LSTM.getSizePrefixedRootAsLSTM = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.LSTM).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.LSTM.prototype.outputCount = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.LSTM.prototype.weightSize = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.LSTM.prototype.clippingThreshold = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.LSTM.prototype.weightI = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.LSTM.prototype.weightH = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.LSTM.prototype.bias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.LSTM.prototype.weightIQ = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.LSTM.prototype.weightIA = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.LSTM.prototype.quantScale = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.LSTM.startLSTM = function(builder) { + builder.startObject(9); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputCount + */ +MNN.LSTM.addOutputCount = function(builder, outputCount) { + builder.addFieldInt32(0, outputCount, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} weightSize + */ +MNN.LSTM.addWeightSize = function(builder, weightSize) { + builder.addFieldInt32(1, weightSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} clippingThreshold + */ +MNN.LSTM.addClippingThreshold = function(builder, clippingThreshold) { + builder.addFieldFloat32(2, clippingThreshold, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightIOffset + */ +MNN.LSTM.addWeightI = function(builder, weightIOffset) { + builder.addFieldOffset(3, weightIOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightHOffset + */ +MNN.LSTM.addWeightH = function(builder, weightHOffset) { + builder.addFieldOffset(4, weightHOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasOffset + */ +MNN.LSTM.addBias = function(builder, biasOffset) { + builder.addFieldOffset(5, biasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightIQOffset + */ +MNN.LSTM.addWeightIQ = function(builder, weightIQOffset) { + builder.addFieldOffset(6, weightIQOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightIAOffset + */ +MNN.LSTM.addWeightIA = function(builder, weightIAOffset) { + builder.addFieldOffset(7, weightIAOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} quantScale + */ +MNN.LSTM.addQuantScale = function(builder, quantScale) { + builder.addFieldFloat32(8, quantScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.LSTM.endLSTM = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputCount + * @param {number} weightSize + * @param {number} clippingThreshold + * @param {flatbuffers.Offset} weightIOffset + * @param {flatbuffers.Offset} weightHOffset + * @param {flatbuffers.Offset} biasOffset + * @param {flatbuffers.Offset} weightIQOffset + * @param {flatbuffers.Offset} weightIAOffset + * @param {number} quantScale + * @returns {flatbuffers.Offset} + */ +MNN.LSTM.createLSTM = function(builder, outputCount, weightSize, clippingThreshold, weightIOffset, weightHOffset, biasOffset, weightIQOffset, weightIAOffset, quantScale) { + MNN.LSTM.startLSTM(builder); + MNN.LSTM.addOutputCount(builder, outputCount); + MNN.LSTM.addWeightSize(builder, weightSize); + MNN.LSTM.addClippingThreshold(builder, clippingThreshold); + MNN.LSTM.addWeightI(builder, weightIOffset); + MNN.LSTM.addWeightH(builder, weightHOffset); + MNN.LSTM.addBias(builder, biasOffset); + MNN.LSTM.addWeightIQ(builder, weightIQOffset); + MNN.LSTM.addWeightIA(builder, weightIAOffset); + MNN.LSTM.addQuantScale(builder, quantScale); + return MNN.LSTM.endLSTM(builder); +} + +/** + * @constructor + */ +MNN.Slice = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Slice} + */ +MNN.Slice.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Slice=} obj + * @returns {MNN.Slice} + */ +MNN.Slice.getRootAsSlice = function(bb, obj) { + return (obj || new MNN.Slice).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Slice=} obj + * @returns {MNN.Slice} + */ +MNN.Slice.getSizePrefixedRootAsSlice = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Slice).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Slice.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Slice.prototype.slicePoints = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Slice.prototype.slicePointsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Slice.prototype.slicePointsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.NetSource} + */ +MNN.Slice.prototype.sourceType = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {MNN.NetSource} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.NetSource.CAFFE; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Slice.startSlice = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +MNN.Slice.addAxis = function(builder, axis) { + builder.addFieldInt32(0, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} slicePointsOffset + */ +MNN.Slice.addSlicePoints = function(builder, slicePointsOffset) { + builder.addFieldOffset(1, slicePointsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Slice.createSlicePointsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Slice.startSlicePointsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.NetSource} sourceType + */ +MNN.Slice.addSourceType = function(builder, sourceType) { + builder.addFieldInt8(2, sourceType, MNN.NetSource.CAFFE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Slice.endSlice = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + * @param {flatbuffers.Offset} slicePointsOffset + * @param {MNN.NetSource} sourceType + * @returns {flatbuffers.Offset} + */ +MNN.Slice.createSlice = function(builder, axis, slicePointsOffset, sourceType) { + MNN.Slice.startSlice(builder); + MNN.Slice.addAxis(builder, axis); + MNN.Slice.addSlicePoints(builder, slicePointsOffset); + MNN.Slice.addSourceType(builder, sourceType); + return MNN.Slice.endSlice(builder); +} + +/** + * @constructor + */ +MNN.BatchNorm = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.BatchNorm} + */ +MNN.BatchNorm.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.BatchNorm=} obj + * @returns {MNN.BatchNorm} + */ +MNN.BatchNorm.getRootAsBatchNorm = function(bb, obj) { + return (obj || new MNN.BatchNorm).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.BatchNorm=} obj + * @returns {MNN.BatchNorm} + */ +MNN.BatchNorm.getSizePrefixedRootAsBatchNorm = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.BatchNorm).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.BatchNorm.prototype.channels = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.BatchNorm.prototype.slopeData = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.BatchNorm.prototype.slopeDataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.BatchNorm.prototype.slopeDataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.BatchNorm.prototype.meanData = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.BatchNorm.prototype.meanDataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.BatchNorm.prototype.meanDataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.BatchNorm.prototype.varData = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.BatchNorm.prototype.varDataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.BatchNorm.prototype.varDataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.BatchNorm.prototype.biasData = function(index) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.BatchNorm.prototype.biasDataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.BatchNorm.prototype.biasDataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.BatchNorm.prototype.Adata = function(index) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.BatchNorm.prototype.AdataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.BatchNorm.prototype.AdataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.BatchNorm.prototype.Bdata = function(index) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.BatchNorm.prototype.BdataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.BatchNorm.prototype.BdataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {number} + */ +MNN.BatchNorm.prototype.epsilon = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.001; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.BatchNorm.startBatchNorm = function(builder) { + builder.startObject(8); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} channels + */ +MNN.BatchNorm.addChannels = function(builder, channels) { + builder.addFieldInt32(0, channels, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} slopeDataOffset + */ +MNN.BatchNorm.addSlopeData = function(builder, slopeDataOffset) { + builder.addFieldOffset(1, slopeDataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.BatchNorm.createSlopeDataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.BatchNorm.startSlopeDataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} meanDataOffset + */ +MNN.BatchNorm.addMeanData = function(builder, meanDataOffset) { + builder.addFieldOffset(2, meanDataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.BatchNorm.createMeanDataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.BatchNorm.startMeanDataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} varDataOffset + */ +MNN.BatchNorm.addVarData = function(builder, varDataOffset) { + builder.addFieldOffset(3, varDataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.BatchNorm.createVarDataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.BatchNorm.startVarDataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasDataOffset + */ +MNN.BatchNorm.addBiasData = function(builder, biasDataOffset) { + builder.addFieldOffset(4, biasDataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.BatchNorm.createBiasDataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.BatchNorm.startBiasDataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} AdataOffset + */ +MNN.BatchNorm.addAdata = function(builder, AdataOffset) { + builder.addFieldOffset(5, AdataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.BatchNorm.createAdataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.BatchNorm.startAdataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} BdataOffset + */ +MNN.BatchNorm.addBdata = function(builder, BdataOffset) { + builder.addFieldOffset(6, BdataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.BatchNorm.createBdataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.BatchNorm.startBdataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} epsilon + */ +MNN.BatchNorm.addEpsilon = function(builder, epsilon) { + builder.addFieldFloat32(7, epsilon, 0.001); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.BatchNorm.endBatchNorm = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} channels + * @param {flatbuffers.Offset} slopeDataOffset + * @param {flatbuffers.Offset} meanDataOffset + * @param {flatbuffers.Offset} varDataOffset + * @param {flatbuffers.Offset} biasDataOffset + * @param {flatbuffers.Offset} AdataOffset + * @param {flatbuffers.Offset} BdataOffset + * @param {number} epsilon + * @returns {flatbuffers.Offset} + */ +MNN.BatchNorm.createBatchNorm = function(builder, channels, slopeDataOffset, meanDataOffset, varDataOffset, biasDataOffset, AdataOffset, BdataOffset, epsilon) { + MNN.BatchNorm.startBatchNorm(builder); + MNN.BatchNorm.addChannels(builder, channels); + MNN.BatchNorm.addSlopeData(builder, slopeDataOffset); + MNN.BatchNorm.addMeanData(builder, meanDataOffset); + MNN.BatchNorm.addVarData(builder, varDataOffset); + MNN.BatchNorm.addBiasData(builder, biasDataOffset); + MNN.BatchNorm.addAdata(builder, AdataOffset); + MNN.BatchNorm.addBdata(builder, BdataOffset); + MNN.BatchNorm.addEpsilon(builder, epsilon); + return MNN.BatchNorm.endBatchNorm(builder); +} + +/** + * @constructor + */ +MNN.Scale = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Scale} + */ +MNN.Scale.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Scale=} obj + * @returns {MNN.Scale} + */ +MNN.Scale.getRootAsScale = function(bb, obj) { + return (obj || new MNN.Scale).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Scale=} obj + * @returns {MNN.Scale} + */ +MNN.Scale.getSizePrefixedRootAsScale = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Scale).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Scale.prototype.channels = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Scale.prototype.scaleData = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Scale.prototype.scaleDataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.Scale.prototype.scaleDataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Scale.prototype.biasData = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Scale.prototype.biasDataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.Scale.prototype.biasDataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Scale.startScale = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} channels + */ +MNN.Scale.addChannels = function(builder, channels) { + builder.addFieldInt32(0, channels, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} scaleDataOffset + */ +MNN.Scale.addScaleData = function(builder, scaleDataOffset) { + builder.addFieldOffset(1, scaleDataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Scale.createScaleDataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Scale.startScaleDataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasDataOffset + */ +MNN.Scale.addBiasData = function(builder, biasDataOffset) { + builder.addFieldOffset(2, biasDataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Scale.createBiasDataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Scale.startBiasDataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Scale.endScale = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} channels + * @param {flatbuffers.Offset} scaleDataOffset + * @param {flatbuffers.Offset} biasDataOffset + * @returns {flatbuffers.Offset} + */ +MNN.Scale.createScale = function(builder, channels, scaleDataOffset, biasDataOffset) { + MNN.Scale.startScale(builder); + MNN.Scale.addChannels(builder, channels); + MNN.Scale.addScaleData(builder, scaleDataOffset); + MNN.Scale.addBiasData(builder, biasDataOffset); + return MNN.Scale.endScale(builder); +} + +/** + * @constructor + */ +MNN.Eltwise = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Eltwise} + */ +MNN.Eltwise.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Eltwise=} obj + * @returns {MNN.Eltwise} + */ +MNN.Eltwise.getRootAsEltwise = function(bb, obj) { + return (obj || new MNN.Eltwise).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Eltwise=} obj + * @returns {MNN.Eltwise} + */ +MNN.Eltwise.getSizePrefixedRootAsEltwise = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Eltwise).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.EltwiseType} + */ +MNN.Eltwise.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.EltwiseType} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.EltwiseType.PROD; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Eltwise.prototype.coeff = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Eltwise.prototype.coeffLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.Eltwise.prototype.coeffArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Eltwise.startEltwise = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.EltwiseType} type + */ +MNN.Eltwise.addType = function(builder, type) { + builder.addFieldInt8(0, type, MNN.EltwiseType.PROD); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} coeffOffset + */ +MNN.Eltwise.addCoeff = function(builder, coeffOffset) { + builder.addFieldOffset(1, coeffOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Eltwise.createCoeffVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Eltwise.startCoeffVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Eltwise.endEltwise = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.EltwiseType} type + * @param {flatbuffers.Offset} coeffOffset + * @returns {flatbuffers.Offset} + */ +MNN.Eltwise.createEltwise = function(builder, type, coeffOffset) { + MNN.Eltwise.startEltwise(builder); + MNN.Eltwise.addType(builder, type); + MNN.Eltwise.addCoeff(builder, coeffOffset); + return MNN.Eltwise.endEltwise(builder); +} + +/** + * @constructor + */ +MNN.Flatten = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Flatten} + */ +MNN.Flatten.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Flatten=} obj + * @returns {MNN.Flatten} + */ +MNN.Flatten.getRootAsFlatten = function(bb, obj) { + return (obj || new MNN.Flatten).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Flatten=} obj + * @returns {MNN.Flatten} + */ +MNN.Flatten.getSizePrefixedRootAsFlatten = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Flatten).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Flatten.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Flatten.prototype.endAxis = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Flatten.startFlatten = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +MNN.Flatten.addAxis = function(builder, axis) { + builder.addFieldInt32(0, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} endAxis + */ +MNN.Flatten.addEndAxis = function(builder, endAxis) { + builder.addFieldInt32(1, endAxis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Flatten.endFlatten = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + * @param {number} endAxis + * @returns {flatbuffers.Offset} + */ +MNN.Flatten.createFlatten = function(builder, axis, endAxis) { + MNN.Flatten.startFlatten(builder); + MNN.Flatten.addAxis(builder, axis); + MNN.Flatten.addEndAxis(builder, endAxis); + return MNN.Flatten.endFlatten(builder); +} + +/** + * @constructor + */ +MNN.Permute = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Permute} + */ +MNN.Permute.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Permute=} obj + * @returns {MNN.Permute} + */ +MNN.Permute.getRootAsPermute = function(bb, obj) { + return (obj || new MNN.Permute).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Permute=} obj + * @returns {MNN.Permute} + */ +MNN.Permute.getSizePrefixedRootAsPermute = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Permute).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Permute.prototype.dims = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Permute.prototype.dimsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Permute.prototype.dimsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Permute.startPermute = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimsOffset + */ +MNN.Permute.addDims = function(builder, dimsOffset) { + builder.addFieldOffset(0, dimsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Permute.createDimsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Permute.startDimsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Permute.endPermute = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimsOffset + * @returns {flatbuffers.Offset} + */ +MNN.Permute.createPermute = function(builder, dimsOffset) { + MNN.Permute.startPermute(builder); + MNN.Permute.addDims(builder, dimsOffset); + return MNN.Permute.endPermute(builder); +} + +/** + * @constructor + */ +MNN.Reshape = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Reshape} + */ +MNN.Reshape.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Reshape=} obj + * @returns {MNN.Reshape} + */ +MNN.Reshape.getRootAsReshape = function(bb, obj) { + return (obj || new MNN.Reshape).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Reshape=} obj + * @returns {MNN.Reshape} + */ +MNN.Reshape.getSizePrefixedRootAsReshape = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Reshape).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Reshape.prototype.dims = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Reshape.prototype.dimsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Reshape.prototype.dimsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.MNN_DATA_FORMAT} + */ +MNN.Reshape.prototype.dimType = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.MNN_DATA_FORMAT} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.MNN_DATA_FORMAT.NCHW; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Reshape.startReshape = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimsOffset + */ +MNN.Reshape.addDims = function(builder, dimsOffset) { + builder.addFieldOffset(0, dimsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Reshape.createDimsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Reshape.startDimsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.MNN_DATA_FORMAT} dimType + */ +MNN.Reshape.addDimType = function(builder, dimType) { + builder.addFieldInt8(1, dimType, MNN.MNN_DATA_FORMAT.NCHW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Reshape.endReshape = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimsOffset + * @param {MNN.MNN_DATA_FORMAT} dimType + * @returns {flatbuffers.Offset} + */ +MNN.Reshape.createReshape = function(builder, dimsOffset, dimType) { + MNN.Reshape.startReshape(builder); + MNN.Reshape.addDims(builder, dimsOffset); + MNN.Reshape.addDimType(builder, dimType); + return MNN.Reshape.endReshape(builder); +} + +/** + * @constructor + */ +MNN.DetectionOutput = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.DetectionOutput} + */ +MNN.DetectionOutput.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.DetectionOutput=} obj + * @returns {MNN.DetectionOutput} + */ +MNN.DetectionOutput.getRootAsDetectionOutput = function(bb, obj) { + return (obj || new MNN.DetectionOutput).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.DetectionOutput=} obj + * @returns {MNN.DetectionOutput} + */ +MNN.DetectionOutput.getSizePrefixedRootAsDetectionOutput = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.DetectionOutput).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.DetectionOutput.prototype.classCount = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.DetectionOutput.prototype.nmsThresholdold = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.DetectionOutput.prototype.nmsTopK = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.DetectionOutput.prototype.keepTopK = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.DetectionOutput.prototype.confidenceThreshold = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.DetectionOutput.prototype.shareLocation = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.DetectionOutput.prototype.backgroundLable = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.DetectionOutput.prototype.varianceEncodedTarget = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.DetectionOutput.prototype.codeType = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.DetectionOutput.prototype.objectnessScore = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.01; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.DetectionOutput.startDetectionOutput = function(builder) { + builder.startObject(10); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} classCount + */ +MNN.DetectionOutput.addClassCount = function(builder, classCount) { + builder.addFieldInt32(0, classCount, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} nmsThresholdold + */ +MNN.DetectionOutput.addNmsThresholdold = function(builder, nmsThresholdold) { + builder.addFieldFloat32(1, nmsThresholdold, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} nmsTopK + */ +MNN.DetectionOutput.addNmsTopK = function(builder, nmsTopK) { + builder.addFieldInt32(2, nmsTopK, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} keepTopK + */ +MNN.DetectionOutput.addKeepTopK = function(builder, keepTopK) { + builder.addFieldInt32(3, keepTopK, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} confidenceThreshold + */ +MNN.DetectionOutput.addConfidenceThreshold = function(builder, confidenceThreshold) { + builder.addFieldFloat32(4, confidenceThreshold, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} shareLocation + */ +MNN.DetectionOutput.addShareLocation = function(builder, shareLocation) { + builder.addFieldInt32(5, shareLocation, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} backgroundLable + */ +MNN.DetectionOutput.addBackgroundLable = function(builder, backgroundLable) { + builder.addFieldInt32(6, backgroundLable, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} varianceEncodedTarget + */ +MNN.DetectionOutput.addVarianceEncodedTarget = function(builder, varianceEncodedTarget) { + builder.addFieldInt32(7, varianceEncodedTarget, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} codeType + */ +MNN.DetectionOutput.addCodeType = function(builder, codeType) { + builder.addFieldInt32(8, codeType, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} objectnessScore + */ +MNN.DetectionOutput.addObjectnessScore = function(builder, objectnessScore) { + builder.addFieldFloat32(9, objectnessScore, 0.01); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.DetectionOutput.endDetectionOutput = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} classCount + * @param {number} nmsThresholdold + * @param {number} nmsTopK + * @param {number} keepTopK + * @param {number} confidenceThreshold + * @param {number} shareLocation + * @param {number} backgroundLable + * @param {number} varianceEncodedTarget + * @param {number} codeType + * @param {number} objectnessScore + * @returns {flatbuffers.Offset} + */ +MNN.DetectionOutput.createDetectionOutput = function(builder, classCount, nmsThresholdold, nmsTopK, keepTopK, confidenceThreshold, shareLocation, backgroundLable, varianceEncodedTarget, codeType, objectnessScore) { + MNN.DetectionOutput.startDetectionOutput(builder); + MNN.DetectionOutput.addClassCount(builder, classCount); + MNN.DetectionOutput.addNmsThresholdold(builder, nmsThresholdold); + MNN.DetectionOutput.addNmsTopK(builder, nmsTopK); + MNN.DetectionOutput.addKeepTopK(builder, keepTopK); + MNN.DetectionOutput.addConfidenceThreshold(builder, confidenceThreshold); + MNN.DetectionOutput.addShareLocation(builder, shareLocation); + MNN.DetectionOutput.addBackgroundLable(builder, backgroundLable); + MNN.DetectionOutput.addVarianceEncodedTarget(builder, varianceEncodedTarget); + MNN.DetectionOutput.addCodeType(builder, codeType); + MNN.DetectionOutput.addObjectnessScore(builder, objectnessScore); + return MNN.DetectionOutput.endDetectionOutput(builder); +} + +/** + * @constructor + */ +MNN.RoiPooling = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.RoiPooling} + */ +MNN.RoiPooling.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.RoiPooling=} obj + * @returns {MNN.RoiPooling} + */ +MNN.RoiPooling.getRootAsRoiPooling = function(bb, obj) { + return (obj || new MNN.RoiPooling).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.RoiPooling=} obj + * @returns {MNN.RoiPooling} + */ +MNN.RoiPooling.getSizePrefixedRootAsRoiPooling = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.RoiPooling).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.RoiPooling.prototype.pooledWidth = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.RoiPooling.prototype.pooledHeight = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.RoiPooling.prototype.spatialScale = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.RoiPooling.startRoiPooling = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} pooledWidth + */ +MNN.RoiPooling.addPooledWidth = function(builder, pooledWidth) { + builder.addFieldInt32(0, pooledWidth, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} pooledHeight + */ +MNN.RoiPooling.addPooledHeight = function(builder, pooledHeight) { + builder.addFieldInt32(1, pooledHeight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} spatialScale + */ +MNN.RoiPooling.addSpatialScale = function(builder, spatialScale) { + builder.addFieldFloat32(2, spatialScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.RoiPooling.endRoiPooling = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} pooledWidth + * @param {number} pooledHeight + * @param {number} spatialScale + * @returns {flatbuffers.Offset} + */ +MNN.RoiPooling.createRoiPooling = function(builder, pooledWidth, pooledHeight, spatialScale) { + MNN.RoiPooling.startRoiPooling(builder); + MNN.RoiPooling.addPooledWidth(builder, pooledWidth); + MNN.RoiPooling.addPooledHeight(builder, pooledHeight); + MNN.RoiPooling.addSpatialScale(builder, spatialScale); + return MNN.RoiPooling.endRoiPooling(builder); +} + +/** + * @constructor + */ +MNN.Proposal = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Proposal} + */ +MNN.Proposal.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Proposal=} obj + * @returns {MNN.Proposal} + */ +MNN.Proposal.getRootAsProposal = function(bb, obj) { + return (obj || new MNN.Proposal).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Proposal=} obj + * @returns {MNN.Proposal} + */ +MNN.Proposal.getSizePrefixedRootAsProposal = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Proposal).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Proposal.prototype.featStride = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Proposal.prototype.baseSize = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Proposal.prototype.preNmsTopN = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Proposal.prototype.afterNmsTopN = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Proposal.prototype.nmsThreshold = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.Proposal.prototype.minSize = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.Proposal.prototype.ratios = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.Proposal.prototype.scales = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.Proposal.prototype.anchors = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Proposal.startProposal = function(builder) { + builder.startObject(9); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} featStride + */ +MNN.Proposal.addFeatStride = function(builder, featStride) { + builder.addFieldInt32(0, featStride, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} baseSize + */ +MNN.Proposal.addBaseSize = function(builder, baseSize) { + builder.addFieldInt32(1, baseSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} preNmsTopN + */ +MNN.Proposal.addPreNmsTopN = function(builder, preNmsTopN) { + builder.addFieldInt32(2, preNmsTopN, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} afterNmsTopN + */ +MNN.Proposal.addAfterNmsTopN = function(builder, afterNmsTopN) { + builder.addFieldInt32(3, afterNmsTopN, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} nmsThreshold + */ +MNN.Proposal.addNmsThreshold = function(builder, nmsThreshold) { + builder.addFieldFloat32(4, nmsThreshold, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} minSize + */ +MNN.Proposal.addMinSize = function(builder, minSize) { + builder.addFieldInt32(5, minSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} ratiosOffset + */ +MNN.Proposal.addRatios = function(builder, ratiosOffset) { + builder.addFieldOffset(6, ratiosOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} scalesOffset + */ +MNN.Proposal.addScales = function(builder, scalesOffset) { + builder.addFieldOffset(7, scalesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} anchorsOffset + */ +MNN.Proposal.addAnchors = function(builder, anchorsOffset) { + builder.addFieldOffset(8, anchorsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Proposal.endProposal = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} featStride + * @param {number} baseSize + * @param {number} preNmsTopN + * @param {number} afterNmsTopN + * @param {number} nmsThreshold + * @param {number} minSize + * @param {flatbuffers.Offset} ratiosOffset + * @param {flatbuffers.Offset} scalesOffset + * @param {flatbuffers.Offset} anchorsOffset + * @returns {flatbuffers.Offset} + */ +MNN.Proposal.createProposal = function(builder, featStride, baseSize, preNmsTopN, afterNmsTopN, nmsThreshold, minSize, ratiosOffset, scalesOffset, anchorsOffset) { + MNN.Proposal.startProposal(builder); + MNN.Proposal.addFeatStride(builder, featStride); + MNN.Proposal.addBaseSize(builder, baseSize); + MNN.Proposal.addPreNmsTopN(builder, preNmsTopN); + MNN.Proposal.addAfterNmsTopN(builder, afterNmsTopN); + MNN.Proposal.addNmsThreshold(builder, nmsThreshold); + MNN.Proposal.addMinSize(builder, minSize); + MNN.Proposal.addRatios(builder, ratiosOffset); + MNN.Proposal.addScales(builder, scalesOffset); + MNN.Proposal.addAnchors(builder, anchorsOffset); + return MNN.Proposal.endProposal(builder); +} + +/** + * @constructor + */ +MNN.Interp = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Interp} + */ +MNN.Interp.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Interp=} obj + * @returns {MNN.Interp} + */ +MNN.Interp.getRootAsInterp = function(bb, obj) { + return (obj || new MNN.Interp).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Interp=} obj + * @returns {MNN.Interp} + */ +MNN.Interp.getSizePrefixedRootAsInterp = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Interp).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Interp.prototype.widthScale = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.Interp.prototype.heightScale = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.Interp.prototype.outputWidth = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Interp.prototype.outputHeight = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Interp.prototype.resizeType = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +MNN.Interp.prototype.alignCorners = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +MNN.Interp.prototype.halfPixelCenters = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Interp.startInterp = function(builder) { + builder.startObject(7); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} widthScale + */ +MNN.Interp.addWidthScale = function(builder, widthScale) { + builder.addFieldFloat32(0, widthScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} heightScale + */ +MNN.Interp.addHeightScale = function(builder, heightScale) { + builder.addFieldFloat32(1, heightScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputWidth + */ +MNN.Interp.addOutputWidth = function(builder, outputWidth) { + builder.addFieldInt32(2, outputWidth, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputHeight + */ +MNN.Interp.addOutputHeight = function(builder, outputHeight) { + builder.addFieldInt32(3, outputHeight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} resizeType + */ +MNN.Interp.addResizeType = function(builder, resizeType) { + builder.addFieldInt32(4, resizeType, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} alignCorners + */ +MNN.Interp.addAlignCorners = function(builder, alignCorners) { + builder.addFieldInt8(5, +alignCorners, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} halfPixelCenters + */ +MNN.Interp.addHalfPixelCenters = function(builder, halfPixelCenters) { + builder.addFieldInt8(6, +halfPixelCenters, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Interp.endInterp = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} widthScale + * @param {number} heightScale + * @param {number} outputWidth + * @param {number} outputHeight + * @param {number} resizeType + * @param {boolean} alignCorners + * @param {boolean} halfPixelCenters + * @returns {flatbuffers.Offset} + */ +MNN.Interp.createInterp = function(builder, widthScale, heightScale, outputWidth, outputHeight, resizeType, alignCorners, halfPixelCenters) { + MNN.Interp.startInterp(builder); + MNN.Interp.addWidthScale(builder, widthScale); + MNN.Interp.addHeightScale(builder, heightScale); + MNN.Interp.addOutputWidth(builder, outputWidth); + MNN.Interp.addOutputHeight(builder, outputHeight); + MNN.Interp.addResizeType(builder, resizeType); + MNN.Interp.addAlignCorners(builder, alignCorners); + MNN.Interp.addHalfPixelCenters(builder, halfPixelCenters); + return MNN.Interp.endInterp(builder); +} + +/** + * @constructor + */ +MNN.Resize = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Resize} + */ +MNN.Resize.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Resize=} obj + * @returns {MNN.Resize} + */ +MNN.Resize.getRootAsResize = function(bb, obj) { + return (obj || new MNN.Resize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Resize=} obj + * @returns {MNN.Resize} + */ +MNN.Resize.getSizePrefixedRootAsResize = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Resize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Resize.prototype.xScale = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.Resize.prototype.yScale = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Resize.startResize = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} xScale + */ +MNN.Resize.addXScale = function(builder, xScale) { + builder.addFieldFloat32(0, xScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} yScale + */ +MNN.Resize.addYScale = function(builder, yScale) { + builder.addFieldFloat32(1, yScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Resize.endResize = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} xScale + * @param {number} yScale + * @returns {flatbuffers.Offset} + */ +MNN.Resize.createResize = function(builder, xScale, yScale) { + MNN.Resize.startResize(builder); + MNN.Resize.addXScale(builder, xScale); + MNN.Resize.addYScale(builder, yScale); + return MNN.Resize.endResize(builder); +} + +/** + * @constructor + */ +MNN.PriorBox = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.PriorBox} + */ +MNN.PriorBox.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.PriorBox=} obj + * @returns {MNN.PriorBox} + */ +MNN.PriorBox.getRootAsPriorBox = function(bb, obj) { + return (obj || new MNN.PriorBox).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.PriorBox=} obj + * @returns {MNN.PriorBox} + */ +MNN.PriorBox.getSizePrefixedRootAsPriorBox = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.PriorBox).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.PriorBox.prototype.minSizes = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.PriorBox.prototype.minSizesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.PriorBox.prototype.minSizesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.PriorBox.prototype.maxSizes = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.PriorBox.prototype.maxSizesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.PriorBox.prototype.maxSizesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.PriorBox.prototype.aspectRatios = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.PriorBox.prototype.aspectRatiosLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.PriorBox.prototype.aspectRatiosArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.PriorBox.prototype.variances = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.PriorBox.prototype.variancesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.PriorBox.prototype.variancesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {boolean} + */ +MNN.PriorBox.prototype.flip = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +MNN.PriorBox.prototype.clip = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {number} + */ +MNN.PriorBox.prototype.imageWidth = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.PriorBox.prototype.imageHeight = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.PriorBox.prototype.stepWidth = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.PriorBox.prototype.stepHeight = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.PriorBox.prototype.offset = function() { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.PriorBox.startPriorBox = function(builder) { + builder.startObject(11); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} minSizesOffset + */ +MNN.PriorBox.addMinSizes = function(builder, minSizesOffset) { + builder.addFieldOffset(0, minSizesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.PriorBox.createMinSizesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.PriorBox.startMinSizesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} maxSizesOffset + */ +MNN.PriorBox.addMaxSizes = function(builder, maxSizesOffset) { + builder.addFieldOffset(1, maxSizesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.PriorBox.createMaxSizesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.PriorBox.startMaxSizesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} aspectRatiosOffset + */ +MNN.PriorBox.addAspectRatios = function(builder, aspectRatiosOffset) { + builder.addFieldOffset(2, aspectRatiosOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.PriorBox.createAspectRatiosVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.PriorBox.startAspectRatiosVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} variancesOffset + */ +MNN.PriorBox.addVariances = function(builder, variancesOffset) { + builder.addFieldOffset(3, variancesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.PriorBox.createVariancesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.PriorBox.startVariancesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} flip + */ +MNN.PriorBox.addFlip = function(builder, flip) { + builder.addFieldInt8(4, +flip, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} clip + */ +MNN.PriorBox.addClip = function(builder, clip) { + builder.addFieldInt8(5, +clip, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} imageWidth + */ +MNN.PriorBox.addImageWidth = function(builder, imageWidth) { + builder.addFieldInt32(6, imageWidth, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} imageHeight + */ +MNN.PriorBox.addImageHeight = function(builder, imageHeight) { + builder.addFieldInt32(7, imageHeight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} stepWidth + */ +MNN.PriorBox.addStepWidth = function(builder, stepWidth) { + builder.addFieldInt32(8, stepWidth, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} stepHeight + */ +MNN.PriorBox.addStepHeight = function(builder, stepHeight) { + builder.addFieldInt32(9, stepHeight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} offset + */ +MNN.PriorBox.addOffset = function(builder, offset) { + builder.addFieldFloat32(10, offset, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.PriorBox.endPriorBox = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} minSizesOffset + * @param {flatbuffers.Offset} maxSizesOffset + * @param {flatbuffers.Offset} aspectRatiosOffset + * @param {flatbuffers.Offset} variancesOffset + * @param {boolean} flip + * @param {boolean} clip + * @param {number} imageWidth + * @param {number} imageHeight + * @param {number} stepWidth + * @param {number} stepHeight + * @param {number} offset + * @returns {flatbuffers.Offset} + */ +MNN.PriorBox.createPriorBox = function(builder, minSizesOffset, maxSizesOffset, aspectRatiosOffset, variancesOffset, flip, clip, imageWidth, imageHeight, stepWidth, stepHeight, offset) { + MNN.PriorBox.startPriorBox(builder); + MNN.PriorBox.addMinSizes(builder, minSizesOffset); + MNN.PriorBox.addMaxSizes(builder, maxSizesOffset); + MNN.PriorBox.addAspectRatios(builder, aspectRatiosOffset); + MNN.PriorBox.addVariances(builder, variancesOffset); + MNN.PriorBox.addFlip(builder, flip); + MNN.PriorBox.addClip(builder, clip); + MNN.PriorBox.addImageWidth(builder, imageWidth); + MNN.PriorBox.addImageHeight(builder, imageHeight); + MNN.PriorBox.addStepWidth(builder, stepWidth); + MNN.PriorBox.addStepHeight(builder, stepHeight); + MNN.PriorBox.addOffset(builder, offset); + return MNN.PriorBox.endPriorBox(builder); +} + +/** + * @constructor + */ +MNN.Normalize = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Normalize} + */ +MNN.Normalize.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Normalize=} obj + * @returns {MNN.Normalize} + */ +MNN.Normalize.getRootAsNormalize = function(bb, obj) { + return (obj || new MNN.Normalize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Normalize=} obj + * @returns {MNN.Normalize} + */ +MNN.Normalize.getSizePrefixedRootAsNormalize = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Normalize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Normalize.prototype.acrossSpatial = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Normalize.prototype.channelShared = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Normalize.prototype.eps = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Normalize.prototype.scale = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Normalize.prototype.scaleLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.Normalize.prototype.scaleArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Normalize.startNormalize = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} acrossSpatial + */ +MNN.Normalize.addAcrossSpatial = function(builder, acrossSpatial) { + builder.addFieldInt32(0, acrossSpatial, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} channelShared + */ +MNN.Normalize.addChannelShared = function(builder, channelShared) { + builder.addFieldInt32(1, channelShared, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} eps + */ +MNN.Normalize.addEps = function(builder, eps) { + builder.addFieldFloat32(2, eps, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} scaleOffset + */ +MNN.Normalize.addScale = function(builder, scaleOffset) { + builder.addFieldOffset(3, scaleOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Normalize.createScaleVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Normalize.startScaleVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Normalize.endNormalize = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} acrossSpatial + * @param {number} channelShared + * @param {number} eps + * @param {flatbuffers.Offset} scaleOffset + * @returns {flatbuffers.Offset} + */ +MNN.Normalize.createNormalize = function(builder, acrossSpatial, channelShared, eps, scaleOffset) { + MNN.Normalize.startNormalize(builder); + MNN.Normalize.addAcrossSpatial(builder, acrossSpatial); + MNN.Normalize.addChannelShared(builder, channelShared); + MNN.Normalize.addEps(builder, eps); + MNN.Normalize.addScale(builder, scaleOffset); + return MNN.Normalize.endNormalize(builder); +} + +/** + * @constructor + */ +MNN.EltwiseInt8 = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.EltwiseInt8} + */ +MNN.EltwiseInt8.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.EltwiseInt8=} obj + * @returns {MNN.EltwiseInt8} + */ +MNN.EltwiseInt8.getRootAsEltwiseInt8 = function(bb, obj) { + return (obj || new MNN.EltwiseInt8).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.EltwiseInt8=} obj + * @returns {MNN.EltwiseInt8} + */ +MNN.EltwiseInt8.getSizePrefixedRootAsEltwiseInt8 = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.EltwiseInt8).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.EltwiseType} + */ +MNN.EltwiseInt8.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.EltwiseType} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.EltwiseType.PROD; +}; + +/** + * @param {MNN.QuantizedFloatParam=} obj + * @returns {MNN.QuantizedFloatParam|null} + */ +MNN.EltwiseInt8.prototype.inputQuan0 = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new MNN.QuantizedFloatParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.QuantizedFloatParam=} obj + * @returns {MNN.QuantizedFloatParam|null} + */ +MNN.EltwiseInt8.prototype.inputQuan1 = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new MNN.QuantizedFloatParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.QuantizedFloatParam=} obj + * @returns {MNN.QuantizedFloatParam|null} + */ +MNN.EltwiseInt8.prototype.outputQuan = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new MNN.QuantizedFloatParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.EltwiseInt8.startEltwiseInt8 = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.EltwiseType} type + */ +MNN.EltwiseInt8.addType = function(builder, type) { + builder.addFieldInt8(0, type, MNN.EltwiseType.PROD); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputQuan0Offset + */ +MNN.EltwiseInt8.addInputQuan0 = function(builder, inputQuan0Offset) { + builder.addFieldOffset(1, inputQuan0Offset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputQuan1Offset + */ +MNN.EltwiseInt8.addInputQuan1 = function(builder, inputQuan1Offset) { + builder.addFieldOffset(2, inputQuan1Offset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputQuanOffset + */ +MNN.EltwiseInt8.addOutputQuan = function(builder, outputQuanOffset) { + builder.addFieldOffset(3, outputQuanOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.EltwiseInt8.endEltwiseInt8 = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.EltwiseType} type + * @param {flatbuffers.Offset} inputQuan0Offset + * @param {flatbuffers.Offset} inputQuan1Offset + * @param {flatbuffers.Offset} outputQuanOffset + * @returns {flatbuffers.Offset} + */ +MNN.EltwiseInt8.createEltwiseInt8 = function(builder, type, inputQuan0Offset, inputQuan1Offset, outputQuanOffset) { + MNN.EltwiseInt8.startEltwiseInt8(builder); + MNN.EltwiseInt8.addType(builder, type); + MNN.EltwiseInt8.addInputQuan0(builder, inputQuan0Offset); + MNN.EltwiseInt8.addInputQuan1(builder, inputQuan1Offset); + MNN.EltwiseInt8.addOutputQuan(builder, outputQuanOffset); + return MNN.EltwiseInt8.endEltwiseInt8(builder); +} + +/** + * @constructor + */ +MNN.BinaryOp = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.BinaryOp} + */ +MNN.BinaryOp.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.BinaryOp=} obj + * @returns {MNN.BinaryOp} + */ +MNN.BinaryOp.getRootAsBinaryOp = function(bb, obj) { + return (obj || new MNN.BinaryOp).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.BinaryOp=} obj + * @returns {MNN.BinaryOp} + */ +MNN.BinaryOp.getSizePrefixedRootAsBinaryOp = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.BinaryOp).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.BinaryOp.prototype.opType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.BinaryOp.prototype.T = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_FLOAT; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.BinaryOp.startBinaryOp = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} opType + */ +MNN.BinaryOp.addOpType = function(builder, opType) { + builder.addFieldInt32(0, opType, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + */ +MNN.BinaryOp.addT = function(builder, T) { + builder.addFieldInt32(1, T, MNN.DataType.DT_FLOAT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.BinaryOp.endBinaryOp = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} opType + * @param {MNN.DataType} T + * @returns {flatbuffers.Offset} + */ +MNN.BinaryOp.createBinaryOp = function(builder, opType, T) { + MNN.BinaryOp.startBinaryOp(builder); + MNN.BinaryOp.addOpType(builder, opType); + MNN.BinaryOp.addT(builder, T); + return MNN.BinaryOp.endBinaryOp(builder); +} + +/** + * @constructor + */ +MNN.PackParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.PackParam} + */ +MNN.PackParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.PackParam=} obj + * @returns {MNN.PackParam} + */ +MNN.PackParam.getRootAsPackParam = function(bb, obj) { + return (obj || new MNN.PackParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.PackParam=} obj + * @returns {MNN.PackParam} + */ +MNN.PackParam.getSizePrefixedRootAsPackParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.PackParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.PackParam.prototype.dataType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {number} + */ +MNN.PackParam.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.PackParam.startPackParam = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} dataType + */ +MNN.PackParam.addDataType = function(builder, dataType) { + builder.addFieldInt32(0, dataType, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +MNN.PackParam.addAxis = function(builder, axis) { + builder.addFieldInt32(1, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.PackParam.endPackParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} dataType + * @param {number} axis + * @returns {flatbuffers.Offset} + */ +MNN.PackParam.createPackParam = function(builder, dataType, axis) { + MNN.PackParam.startPackParam(builder); + MNN.PackParam.addDataType(builder, dataType); + MNN.PackParam.addAxis(builder, axis); + return MNN.PackParam.endPackParam(builder); +} + +/** + * @constructor + */ +MNN.StridedSliceParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.StridedSliceParam} + */ +MNN.StridedSliceParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.StridedSliceParam=} obj + * @returns {MNN.StridedSliceParam} + */ +MNN.StridedSliceParam.getRootAsStridedSliceParam = function(bb, obj) { + return (obj || new MNN.StridedSliceParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.StridedSliceParam=} obj + * @returns {MNN.StridedSliceParam} + */ +MNN.StridedSliceParam.getSizePrefixedRootAsStridedSliceParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.StridedSliceParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.StridedSliceParam.prototype.Index = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.StridedSliceParam.prototype.T = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {number} + */ +MNN.StridedSliceParam.prototype.beginMask = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.StridedSliceParam.prototype.endMask = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.StridedSliceParam.prototype.ellipsisMask = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.StridedSliceParam.prototype.newAxisMask = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.StridedSliceParam.prototype.shrinkAxisMask = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.StridedSliceParam.startStridedSliceParam = function(builder) { + builder.startObject(7); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Index + */ +MNN.StridedSliceParam.addIndex = function(builder, Index) { + builder.addFieldInt32(0, Index, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + */ +MNN.StridedSliceParam.addT = function(builder, T) { + builder.addFieldInt32(1, T, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beginMask + */ +MNN.StridedSliceParam.addBeginMask = function(builder, beginMask) { + builder.addFieldInt32(2, beginMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} endMask + */ +MNN.StridedSliceParam.addEndMask = function(builder, endMask) { + builder.addFieldInt32(3, endMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} ellipsisMask + */ +MNN.StridedSliceParam.addEllipsisMask = function(builder, ellipsisMask) { + builder.addFieldInt32(4, ellipsisMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} newAxisMask + */ +MNN.StridedSliceParam.addNewAxisMask = function(builder, newAxisMask) { + builder.addFieldInt32(5, newAxisMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} shrinkAxisMask + */ +MNN.StridedSliceParam.addShrinkAxisMask = function(builder, shrinkAxisMask) { + builder.addFieldInt32(6, shrinkAxisMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.StridedSliceParam.endStridedSliceParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Index + * @param {MNN.DataType} T + * @param {number} beginMask + * @param {number} endMask + * @param {number} ellipsisMask + * @param {number} newAxisMask + * @param {number} shrinkAxisMask + * @returns {flatbuffers.Offset} + */ +MNN.StridedSliceParam.createStridedSliceParam = function(builder, Index, T, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask) { + MNN.StridedSliceParam.startStridedSliceParam(builder); + MNN.StridedSliceParam.addIndex(builder, Index); + MNN.StridedSliceParam.addT(builder, T); + MNN.StridedSliceParam.addBeginMask(builder, beginMask); + MNN.StridedSliceParam.addEndMask(builder, endMask); + MNN.StridedSliceParam.addEllipsisMask(builder, ellipsisMask); + MNN.StridedSliceParam.addNewAxisMask(builder, newAxisMask); + MNN.StridedSliceParam.addShrinkAxisMask(builder, shrinkAxisMask); + return MNN.StridedSliceParam.endStridedSliceParam(builder); +} + +/** + * @constructor + */ +MNN.SqueezeParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.SqueezeParam} + */ +MNN.SqueezeParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.SqueezeParam=} obj + * @returns {MNN.SqueezeParam} + */ +MNN.SqueezeParam.getRootAsSqueezeParam = function(bb, obj) { + return (obj || new MNN.SqueezeParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.SqueezeParam=} obj + * @returns {MNN.SqueezeParam} + */ +MNN.SqueezeParam.getSizePrefixedRootAsSqueezeParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.SqueezeParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.SqueezeParam.prototype.squeezeDims = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.SqueezeParam.prototype.squeezeDimsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.SqueezeParam.prototype.squeezeDimsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.SqueezeParam.startSqueezeParam = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} squeezeDimsOffset + */ +MNN.SqueezeParam.addSqueezeDims = function(builder, squeezeDimsOffset) { + builder.addFieldOffset(0, squeezeDimsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.SqueezeParam.createSqueezeDimsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.SqueezeParam.startSqueezeDimsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.SqueezeParam.endSqueezeParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} squeezeDimsOffset + * @returns {flatbuffers.Offset} + */ +MNN.SqueezeParam.createSqueezeParam = function(builder, squeezeDimsOffset) { + MNN.SqueezeParam.startSqueezeParam(builder); + MNN.SqueezeParam.addSqueezeDims(builder, squeezeDimsOffset); + return MNN.SqueezeParam.endSqueezeParam(builder); +} + +/** + * @constructor + */ +MNN.CastParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.CastParam} + */ +MNN.CastParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.CastParam=} obj + * @returns {MNN.CastParam} + */ +MNN.CastParam.getRootAsCastParam = function(bb, obj) { + return (obj || new MNN.CastParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.CastParam=} obj + * @returns {MNN.CastParam} + */ +MNN.CastParam.getSizePrefixedRootAsCastParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.CastParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.CastParam.prototype.srcT = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.CastParam.prototype.dstT = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.CastParam.startCastParam = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} srcT + */ +MNN.CastParam.addSrcT = function(builder, srcT) { + builder.addFieldInt32(0, srcT, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} dstT + */ +MNN.CastParam.addDstT = function(builder, dstT) { + builder.addFieldInt32(1, dstT, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.CastParam.endCastParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} srcT + * @param {MNN.DataType} dstT + * @returns {flatbuffers.Offset} + */ +MNN.CastParam.createCastParam = function(builder, srcT, dstT) { + MNN.CastParam.startCastParam(builder); + MNN.CastParam.addSrcT(builder, srcT); + MNN.CastParam.addDstT(builder, dstT); + return MNN.CastParam.endCastParam(builder); +} + +/** + * @constructor + */ +MNN.ReductionParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.ReductionParam} + */ +MNN.ReductionParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ReductionParam=} obj + * @returns {MNN.ReductionParam} + */ +MNN.ReductionParam.getRootAsReductionParam = function(bb, obj) { + return (obj || new MNN.ReductionParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ReductionParam=} obj + * @returns {MNN.ReductionParam} + */ +MNN.ReductionParam.getSizePrefixedRootAsReductionParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.ReductionParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.ReductionType} + */ +MNN.ReductionParam.prototype.operation = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.ReductionType} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.ReductionType.SUM; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.ReductionParam.prototype.dim = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.ReductionParam.prototype.dimLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.ReductionParam.prototype.dimArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {number} + */ +MNN.ReductionParam.prototype.coeff = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {boolean} + */ +MNN.ReductionParam.prototype.keepDims = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.ReductionParam.prototype.dType = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_FLOAT; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.ReductionParam.startReductionParam = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.ReductionType} operation + */ +MNN.ReductionParam.addOperation = function(builder, operation) { + builder.addFieldInt8(0, operation, MNN.ReductionType.SUM); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimOffset + */ +MNN.ReductionParam.addDim = function(builder, dimOffset) { + builder.addFieldOffset(1, dimOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.ReductionParam.createDimVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.ReductionParam.startDimVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} coeff + */ +MNN.ReductionParam.addCoeff = function(builder, coeff) { + builder.addFieldFloat32(2, coeff, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} keepDims + */ +MNN.ReductionParam.addKeepDims = function(builder, keepDims) { + builder.addFieldInt8(3, +keepDims, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} dType + */ +MNN.ReductionParam.addDType = function(builder, dType) { + builder.addFieldInt32(4, dType, MNN.DataType.DT_FLOAT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.ReductionParam.endReductionParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.ReductionType} operation + * @param {flatbuffers.Offset} dimOffset + * @param {number} coeff + * @param {boolean} keepDims + * @param {MNN.DataType} dType + * @returns {flatbuffers.Offset} + */ +MNN.ReductionParam.createReductionParam = function(builder, operation, dimOffset, coeff, keepDims, dType) { + MNN.ReductionParam.startReductionParam(builder); + MNN.ReductionParam.addOperation(builder, operation); + MNN.ReductionParam.addDim(builder, dimOffset); + MNN.ReductionParam.addCoeff(builder, coeff); + MNN.ReductionParam.addKeepDims(builder, keepDims); + MNN.ReductionParam.addDType(builder, dType); + return MNN.ReductionParam.endReductionParam(builder); +} + +/** + * @constructor + */ +MNN.Gather = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Gather} + */ +MNN.Gather.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Gather=} obj + * @returns {MNN.Gather} + */ +MNN.Gather.getRootAsGather = function(bb, obj) { + return (obj || new MNN.Gather).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Gather=} obj + * @returns {MNN.Gather} + */ +MNN.Gather.getSizePrefixedRootAsGather = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Gather).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.Gather.prototype.Tindices = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.Gather.prototype.Tparams = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {boolean} + */ +MNN.Gather.prototype.validateIndices = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {number} + */ +MNN.Gather.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Gather.startGather = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Tindices + */ +MNN.Gather.addTindices = function(builder, Tindices) { + builder.addFieldInt32(0, Tindices, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Tparams + */ +MNN.Gather.addTparams = function(builder, Tparams) { + builder.addFieldInt32(1, Tparams, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} validateIndices + */ +MNN.Gather.addValidateIndices = function(builder, validateIndices) { + builder.addFieldInt8(2, +validateIndices, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +MNN.Gather.addAxis = function(builder, axis) { + builder.addFieldInt32(3, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Gather.endGather = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Tindices + * @param {MNN.DataType} Tparams + * @param {boolean} validateIndices + * @param {number} axis + * @returns {flatbuffers.Offset} + */ +MNN.Gather.createGather = function(builder, Tindices, Tparams, validateIndices, axis) { + MNN.Gather.startGather(builder); + MNN.Gather.addTindices(builder, Tindices); + MNN.Gather.addTparams(builder, Tparams); + MNN.Gather.addValidateIndices(builder, validateIndices); + MNN.Gather.addAxis(builder, axis); + return MNN.Gather.endGather(builder); +} + +/** + * @constructor + */ +MNN.ExpandDims = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.ExpandDims} + */ +MNN.ExpandDims.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ExpandDims=} obj + * @returns {MNN.ExpandDims} + */ +MNN.ExpandDims.getRootAsExpandDims = function(bb, obj) { + return (obj || new MNN.ExpandDims).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ExpandDims=} obj + * @returns {MNN.ExpandDims} + */ +MNN.ExpandDims.getSizePrefixedRootAsExpandDims = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.ExpandDims).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.ExpandDims.prototype.T = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.ExpandDims.prototype.Tdim = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {number} + */ +MNN.ExpandDims.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.ExpandDims.startExpandDims = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + */ +MNN.ExpandDims.addT = function(builder, T) { + builder.addFieldInt32(0, T, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Tdim + */ +MNN.ExpandDims.addTdim = function(builder, Tdim) { + builder.addFieldInt32(1, Tdim, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +MNN.ExpandDims.addAxis = function(builder, axis) { + builder.addFieldInt32(2, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.ExpandDims.endExpandDims = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + * @param {MNN.DataType} Tdim + * @param {number} axis + * @returns {flatbuffers.Offset} + */ +MNN.ExpandDims.createExpandDims = function(builder, T, Tdim, axis) { + MNN.ExpandDims.startExpandDims(builder); + MNN.ExpandDims.addT(builder, T); + MNN.ExpandDims.addTdim(builder, Tdim); + MNN.ExpandDims.addAxis(builder, axis); + return MNN.ExpandDims.endExpandDims(builder); +} + +/** + * @constructor + */ +MNN.Selu = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Selu} + */ +MNN.Selu.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Selu=} obj + * @returns {MNN.Selu} + */ +MNN.Selu.getRootAsSelu = function(bb, obj) { + return (obj || new MNN.Selu).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Selu=} obj + * @returns {MNN.Selu} + */ +MNN.Selu.getSizePrefixedRootAsSelu = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Selu).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Selu.prototype.scale = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.Selu.prototype.alpha = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Selu.startSelu = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} scale + */ +MNN.Selu.addScale = function(builder, scale) { + builder.addFieldFloat32(0, scale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} alpha + */ +MNN.Selu.addAlpha = function(builder, alpha) { + builder.addFieldFloat32(1, alpha, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Selu.endSelu = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} scale + * @param {number} alpha + * @returns {flatbuffers.Offset} + */ +MNN.Selu.createSelu = function(builder, scale, alpha) { + MNN.Selu.startSelu(builder); + MNN.Selu.addScale(builder, scale); + MNN.Selu.addAlpha(builder, alpha); + return MNN.Selu.endSelu(builder); +} + +/** + * @constructor + */ +MNN.AsString = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.AsString} + */ +MNN.AsString.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.AsString=} obj + * @returns {MNN.AsString} + */ +MNN.AsString.getRootAsAsString = function(bb, obj) { + return (obj || new MNN.AsString).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.AsString=} obj + * @returns {MNN.AsString} + */ +MNN.AsString.getSizePrefixedRootAsAsString = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.AsString).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.AsString.prototype.T = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {number} + */ +MNN.AsString.prototype.precision = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +MNN.AsString.prototype.scientific = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +MNN.AsString.prototype.shortest = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {number} + */ +MNN.AsString.prototype.width = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.AsString.prototype.fillString = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.AsString.startAsString = function(builder) { + builder.startObject(6); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + */ +MNN.AsString.addT = function(builder, T) { + builder.addFieldInt32(0, T, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} precision + */ +MNN.AsString.addPrecision = function(builder, precision) { + builder.addFieldInt32(1, precision, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} scientific + */ +MNN.AsString.addScientific = function(builder, scientific) { + builder.addFieldInt8(2, +scientific, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} shortest + */ +MNN.AsString.addShortest = function(builder, shortest) { + builder.addFieldInt8(3, +shortest, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} width + */ +MNN.AsString.addWidth = function(builder, width) { + builder.addFieldInt32(4, width, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} fillStringOffset + */ +MNN.AsString.addFillString = function(builder, fillStringOffset) { + builder.addFieldOffset(5, fillStringOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.AsString.endAsString = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + * @param {number} precision + * @param {boolean} scientific + * @param {boolean} shortest + * @param {number} width + * @param {flatbuffers.Offset} fillStringOffset + * @returns {flatbuffers.Offset} + */ +MNN.AsString.createAsString = function(builder, T, precision, scientific, shortest, width, fillStringOffset) { + MNN.AsString.startAsString(builder); + MNN.AsString.addT(builder, T); + MNN.AsString.addPrecision(builder, precision); + MNN.AsString.addScientific(builder, scientific); + MNN.AsString.addShortest(builder, shortest); + MNN.AsString.addWidth(builder, width); + MNN.AsString.addFillString(builder, fillStringOffset); + return MNN.AsString.endAsString(builder); +} + +/** + * @constructor + */ +MNN.ReduceJoin = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.ReduceJoin} + */ +MNN.ReduceJoin.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ReduceJoin=} obj + * @returns {MNN.ReduceJoin} + */ +MNN.ReduceJoin.getRootAsReduceJoin = function(bb, obj) { + return (obj || new MNN.ReduceJoin).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ReduceJoin=} obj + * @returns {MNN.ReduceJoin} + */ +MNN.ReduceJoin.getSizePrefixedRootAsReduceJoin = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.ReduceJoin).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +MNN.ReduceJoin.prototype.keepDims = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.ReduceJoin.prototype.separator = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.ReduceJoin.startReduceJoin = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} keepDims + */ +MNN.ReduceJoin.addKeepDims = function(builder, keepDims) { + builder.addFieldInt8(0, +keepDims, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} separatorOffset + */ +MNN.ReduceJoin.addSeparator = function(builder, separatorOffset) { + builder.addFieldOffset(1, separatorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.ReduceJoin.endReduceJoin = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} keepDims + * @param {flatbuffers.Offset} separatorOffset + * @returns {flatbuffers.Offset} + */ +MNN.ReduceJoin.createReduceJoin = function(builder, keepDims, separatorOffset) { + MNN.ReduceJoin.startReduceJoin(builder); + MNN.ReduceJoin.addKeepDims(builder, keepDims); + MNN.ReduceJoin.addSeparator(builder, separatorOffset); + return MNN.ReduceJoin.endReduceJoin(builder); +} + +/** + * @constructor + */ +MNN.UnaryOp = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.UnaryOp} + */ +MNN.UnaryOp.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.UnaryOp=} obj + * @returns {MNN.UnaryOp} + */ +MNN.UnaryOp.getRootAsUnaryOp = function(bb, obj) { + return (obj || new MNN.UnaryOp).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.UnaryOp=} obj + * @returns {MNN.UnaryOp} + */ +MNN.UnaryOp.getSizePrefixedRootAsUnaryOp = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.UnaryOp).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.UnaryOpOperation} + */ +MNN.UnaryOp.prototype.opType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.UnaryOpOperation} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.UnaryOpOperation.ABS; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.UnaryOp.prototype.T = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.UnaryOp.startUnaryOp = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.UnaryOpOperation} opType + */ +MNN.UnaryOp.addOpType = function(builder, opType) { + builder.addFieldInt32(0, opType, MNN.UnaryOpOperation.ABS); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + */ +MNN.UnaryOp.addT = function(builder, T) { + builder.addFieldInt32(1, T, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.UnaryOp.endUnaryOp = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.UnaryOpOperation} opType + * @param {MNN.DataType} T + * @returns {flatbuffers.Offset} + */ +MNN.UnaryOp.createUnaryOp = function(builder, opType, T) { + MNN.UnaryOp.startUnaryOp(builder); + MNN.UnaryOp.addOpType(builder, opType); + MNN.UnaryOp.addT(builder, T); + return MNN.UnaryOp.endUnaryOp(builder); +} + +/** + * @constructor + */ +MNN.TopKV2 = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.TopKV2} + */ +MNN.TopKV2.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.TopKV2=} obj + * @returns {MNN.TopKV2} + */ +MNN.TopKV2.getRootAsTopKV2 = function(bb, obj) { + return (obj || new MNN.TopKV2).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.TopKV2=} obj + * @returns {MNN.TopKV2} + */ +MNN.TopKV2.getSizePrefixedRootAsTopKV2 = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.TopKV2).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.TopKV2.prototype.T = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_FLOAT; +}; + +/** + * @returns {boolean} + */ +MNN.TopKV2.prototype.sorted = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.TopKV2.startTopKV2 = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + */ +MNN.TopKV2.addT = function(builder, T) { + builder.addFieldInt32(0, T, MNN.DataType.DT_FLOAT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} sorted + */ +MNN.TopKV2.addSorted = function(builder, sorted) { + builder.addFieldInt8(1, +sorted, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.TopKV2.endTopKV2 = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + * @param {boolean} sorted + * @returns {flatbuffers.Offset} + */ +MNN.TopKV2.createTopKV2 = function(builder, T, sorted) { + MNN.TopKV2.startTopKV2(builder); + MNN.TopKV2.addT(builder, T); + MNN.TopKV2.addSorted(builder, sorted); + return MNN.TopKV2.endTopKV2(builder); +} + +/** + * @constructor + */ +MNN.CropAndResize = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.CropAndResize} + */ +MNN.CropAndResize.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.CropAndResize=} obj + * @returns {MNN.CropAndResize} + */ +MNN.CropAndResize.getRootAsCropAndResize = function(bb, obj) { + return (obj || new MNN.CropAndResize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.CropAndResize=} obj + * @returns {MNN.CropAndResize} + */ +MNN.CropAndResize.getSizePrefixedRootAsCropAndResize = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.CropAndResize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.CropAndResize.prototype.extrapolationValue = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {MNN.CropAndResizeMethod} + */ +MNN.CropAndResize.prototype.method = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.CropAndResizeMethod} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.CropAndResizeMethod.BILINEAR; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.CropAndResize.startCropAndResize = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} extrapolationValue + */ +MNN.CropAndResize.addExtrapolationValue = function(builder, extrapolationValue) { + builder.addFieldFloat32(0, extrapolationValue, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.CropAndResizeMethod} method + */ +MNN.CropAndResize.addMethod = function(builder, method) { + builder.addFieldInt8(1, method, MNN.CropAndResizeMethod.BILINEAR); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.CropAndResize.endCropAndResize = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} extrapolationValue + * @param {MNN.CropAndResizeMethod} method + * @returns {flatbuffers.Offset} + */ +MNN.CropAndResize.createCropAndResize = function(builder, extrapolationValue, method) { + MNN.CropAndResize.startCropAndResize(builder); + MNN.CropAndResize.addExtrapolationValue(builder, extrapolationValue); + MNN.CropAndResize.addMethod(builder, method); + return MNN.CropAndResize.endCropAndResize(builder); +} + +/** + * @constructor + */ +MNN.Fill = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Fill} + */ +MNN.Fill.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Fill=} obj + * @returns {MNN.Fill} + */ +MNN.Fill.getRootAsFill = function(bb, obj) { + return (obj || new MNN.Fill).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Fill=} obj + * @returns {MNN.Fill} + */ +MNN.Fill.getSizePrefixedRootAsFill = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Fill).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Fill.startFill = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Fill.endFill = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Fill.createFill = function(builder) { + MNN.Fill.startFill(builder); + return MNN.Fill.endFill(builder); +} + +/** + * @constructor + */ +MNN.GatherV2 = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.GatherV2} + */ +MNN.GatherV2.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GatherV2=} obj + * @returns {MNN.GatherV2} + */ +MNN.GatherV2.getRootAsGatherV2 = function(bb, obj) { + return (obj || new MNN.GatherV2).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GatherV2=} obj + * @returns {MNN.GatherV2} + */ +MNN.GatherV2.getSizePrefixedRootAsGatherV2 = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.GatherV2).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.GatherV2.prototype.Taxis = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.GatherV2.prototype.Tindices = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.GatherV2.prototype.Tparams = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.GatherV2.startGatherV2 = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Taxis + */ +MNN.GatherV2.addTaxis = function(builder, Taxis) { + builder.addFieldInt32(0, Taxis, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Tindices + */ +MNN.GatherV2.addTindices = function(builder, Tindices) { + builder.addFieldInt32(1, Tindices, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Tparams + */ +MNN.GatherV2.addTparams = function(builder, Tparams) { + builder.addFieldInt32(2, Tparams, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.GatherV2.endGatherV2 = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Taxis + * @param {MNN.DataType} Tindices + * @param {MNN.DataType} Tparams + * @returns {flatbuffers.Offset} + */ +MNN.GatherV2.createGatherV2 = function(builder, Taxis, Tindices, Tparams) { + MNN.GatherV2.startGatherV2(builder); + MNN.GatherV2.addTaxis(builder, Taxis); + MNN.GatherV2.addTindices(builder, Tindices); + MNN.GatherV2.addTparams(builder, Tparams); + return MNN.GatherV2.endGatherV2(builder); +} + +/** + * @constructor + */ +MNN.NonMaxSuppressionV2 = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.NonMaxSuppressionV2} + */ +MNN.NonMaxSuppressionV2.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.NonMaxSuppressionV2=} obj + * @returns {MNN.NonMaxSuppressionV2} + */ +MNN.NonMaxSuppressionV2.getRootAsNonMaxSuppressionV2 = function(bb, obj) { + return (obj || new MNN.NonMaxSuppressionV2).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.NonMaxSuppressionV2=} obj + * @returns {MNN.NonMaxSuppressionV2} + */ +MNN.NonMaxSuppressionV2.getSizePrefixedRootAsNonMaxSuppressionV2 = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.NonMaxSuppressionV2).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.NonMaxSuppressionV2.startNonMaxSuppressionV2 = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.NonMaxSuppressionV2.endNonMaxSuppressionV2 = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.NonMaxSuppressionV2.createNonMaxSuppressionV2 = function(builder) { + MNN.NonMaxSuppressionV2.startNonMaxSuppressionV2(builder); + return MNN.NonMaxSuppressionV2.endNonMaxSuppressionV2(builder); +} + +/** + * @constructor + */ +MNN.Range = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Range} + */ +MNN.Range.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Range=} obj + * @returns {MNN.Range} + */ +MNN.Range.getRootAsRange = function(bb, obj) { + return (obj || new MNN.Range).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Range=} obj + * @returns {MNN.Range} + */ +MNN.Range.getSizePrefixedRootAsRange = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Range).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.Range.prototype.Tidx = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Range.startRange = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Tidx + */ +MNN.Range.addTidx = function(builder, Tidx) { + builder.addFieldInt32(0, Tidx, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Range.endRange = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Tidx + * @returns {flatbuffers.Offset} + */ +MNN.Range.createRange = function(builder, Tidx) { + MNN.Range.startRange(builder); + MNN.Range.addTidx(builder, Tidx); + return MNN.Range.endRange(builder); +} + +/** + * @constructor + */ +MNN.Rank = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Rank} + */ +MNN.Rank.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Rank=} obj + * @returns {MNN.Rank} + */ +MNN.Rank.getRootAsRank = function(bb, obj) { + return (obj || new MNN.Rank).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Rank=} obj + * @returns {MNN.Rank} + */ +MNN.Rank.getSizePrefixedRootAsRank = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Rank).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Rank.startRank = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Rank.endRank = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Rank.createRank = function(builder) { + MNN.Rank.startRank(builder); + return MNN.Rank.endRank(builder); +} + +/** + * @constructor + */ +MNN.Size = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Size} + */ +MNN.Size.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Size=} obj + * @returns {MNN.Size} + */ +MNN.Size.getRootAsSize = function(bb, obj) { + return (obj || new MNN.Size).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Size=} obj + * @returns {MNN.Size} + */ +MNN.Size.getSizePrefixedRootAsSize = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Size).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.Size.prototype.outputDataType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Size.startSize = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} outputDataType + */ +MNN.Size.addOutputDataType = function(builder, outputDataType) { + builder.addFieldInt32(0, outputDataType, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Size.endSize = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} outputDataType + * @returns {flatbuffers.Offset} + */ +MNN.Size.createSize = function(builder, outputDataType) { + MNN.Size.startSize(builder); + MNN.Size.addOutputDataType(builder, outputDataType); + return MNN.Size.endSize(builder); +} + +/** + * @constructor + */ +MNN.Transpose = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Transpose} + */ +MNN.Transpose.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Transpose=} obj + * @returns {MNN.Transpose} + */ +MNN.Transpose.getRootAsTranspose = function(bb, obj) { + return (obj || new MNN.Transpose).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Transpose=} obj + * @returns {MNN.Transpose} + */ +MNN.Transpose.getSizePrefixedRootAsTranspose = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Transpose).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.Transpose.prototype.Tperm = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Transpose.startTranspose = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Tperm + */ +MNN.Transpose.addTperm = function(builder, Tperm) { + builder.addFieldInt32(0, Tperm, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Transpose.endTranspose = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} Tperm + * @returns {flatbuffers.Offset} + */ +MNN.Transpose.createTranspose = function(builder, Tperm) { + MNN.Transpose.startTranspose(builder); + MNN.Transpose.addTperm(builder, Tperm); + return MNN.Transpose.endTranspose(builder); +} + +/** + * @constructor + */ +MNN.SliceTf = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.SliceTf} + */ +MNN.SliceTf.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.SliceTf=} obj + * @returns {MNN.SliceTf} + */ +MNN.SliceTf.getRootAsSliceTf = function(bb, obj) { + return (obj || new MNN.SliceTf).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.SliceTf=} obj + * @returns {MNN.SliceTf} + */ +MNN.SliceTf.getSizePrefixedRootAsSliceTf = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.SliceTf).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.SliceTf.prototype.T = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.SliceTf.startSliceTf = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + */ +MNN.SliceTf.addT = function(builder, T) { + builder.addFieldInt32(0, T, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.SliceTf.endSliceTf = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + * @returns {flatbuffers.Offset} + */ +MNN.SliceTf.createSliceTf = function(builder, T) { + MNN.SliceTf.startSliceTf(builder); + MNN.SliceTf.addT(builder, T); + return MNN.SliceTf.endSliceTf(builder); +} + +/** + * @constructor + */ +MNN.QuantizeMaxMin = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizeMaxMin} + */ +MNN.QuantizeMaxMin.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizeMaxMin=} obj + * @returns {MNN.QuantizeMaxMin} + */ +MNN.QuantizeMaxMin.getRootAsQuantizeMaxMin = function(bb, obj) { + return (obj || new MNN.QuantizeMaxMin).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizeMaxMin=} obj + * @returns {MNN.QuantizeMaxMin} + */ +MNN.QuantizeMaxMin.getSizePrefixedRootAsQuantizeMaxMin = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizeMaxMin).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.QuantizeMaxMin.prototype.T = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizeMaxMin.startQuantizeMaxMin = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + */ +MNN.QuantizeMaxMin.addT = function(builder, T) { + builder.addFieldInt32(0, T, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizeMaxMin.endQuantizeMaxMin = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + * @returns {flatbuffers.Offset} + */ +MNN.QuantizeMaxMin.createQuantizeMaxMin = function(builder, T) { + MNN.QuantizeMaxMin.startQuantizeMaxMin(builder); + MNN.QuantizeMaxMin.addT(builder, T); + return MNN.QuantizeMaxMin.endQuantizeMaxMin(builder); +} + +/** + * @constructor + */ +MNN.Crop = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Crop} + */ +MNN.Crop.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Crop=} obj + * @returns {MNN.Crop} + */ +MNN.Crop.getRootAsCrop = function(bb, obj) { + return (obj || new MNN.Crop).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Crop=} obj + * @returns {MNN.Crop} + */ +MNN.Crop.getSizePrefixedRootAsCrop = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Crop).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.Crop.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 2; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Crop.prototype.offset = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Crop.prototype.offsetLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Crop.prototype.offsetArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Crop.startCrop = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +MNN.Crop.addAxis = function(builder, axis) { + builder.addFieldInt32(0, axis, 2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} offsetOffset + */ +MNN.Crop.addOffset = function(builder, offsetOffset) { + builder.addFieldOffset(1, offsetOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Crop.createOffsetVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Crop.startOffsetVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Crop.endCrop = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + * @param {flatbuffers.Offset} offsetOffset + * @returns {flatbuffers.Offset} + */ +MNN.Crop.createCrop = function(builder, axis, offsetOffset) { + MNN.Crop.startCrop(builder); + MNN.Crop.addAxis(builder, axis); + MNN.Crop.addOffset(builder, offsetOffset); + return MNN.Crop.endCrop(builder); +} + +/** + * @constructor + */ +MNN.SpaceBatch = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.SpaceBatch} + */ +MNN.SpaceBatch.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.SpaceBatch=} obj + * @returns {MNN.SpaceBatch} + */ +MNN.SpaceBatch.getRootAsSpaceBatch = function(bb, obj) { + return (obj || new MNN.SpaceBatch).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.SpaceBatch=} obj + * @returns {MNN.SpaceBatch} + */ +MNN.SpaceBatch.getSizePrefixedRootAsSpaceBatch = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.SpaceBatch).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.SpaceBatch.prototype.blockShape = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.SpaceBatch.prototype.padding = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.SpaceBatch.startSpaceBatch = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} blockShapeOffset + */ +MNN.SpaceBatch.addBlockShape = function(builder, blockShapeOffset) { + builder.addFieldOffset(0, blockShapeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} paddingOffset + */ +MNN.SpaceBatch.addPadding = function(builder, paddingOffset) { + builder.addFieldOffset(1, paddingOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.SpaceBatch.endSpaceBatch = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} blockShapeOffset + * @param {flatbuffers.Offset} paddingOffset + * @returns {flatbuffers.Offset} + */ +MNN.SpaceBatch.createSpaceBatch = function(builder, blockShapeOffset, paddingOffset) { + MNN.SpaceBatch.startSpaceBatch(builder); + MNN.SpaceBatch.addBlockShape(builder, blockShapeOffset); + MNN.SpaceBatch.addPadding(builder, paddingOffset); + return MNN.SpaceBatch.endSpaceBatch(builder); +} + +/** + * @constructor + */ +MNN.MatMul = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.MatMul} + */ +MNN.MatMul.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.MatMul=} obj + * @returns {MNN.MatMul} + */ +MNN.MatMul.getRootAsMatMul = function(bb, obj) { + return (obj || new MNN.MatMul).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.MatMul=} obj + * @returns {MNN.MatMul} + */ +MNN.MatMul.getSizePrefixedRootAsMatMul = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.MatMul).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.MatMul.prototype.T = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {boolean} + */ +MNN.MatMul.prototype.transposeA = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +MNN.MatMul.prototype.transposeB = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.MatMul.prototype.weight = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.MatMul.prototype.weightLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.MatMul.prototype.weightArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.MatMul.prototype.bias = function(index) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.MatMul.prototype.biasLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.MatMul.prototype.biasArray = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.MatMul.startMatMul = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + */ +MNN.MatMul.addT = function(builder, T) { + builder.addFieldInt32(0, T, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} transposeA + */ +MNN.MatMul.addTransposeA = function(builder, transposeA) { + builder.addFieldInt8(1, +transposeA, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} transposeB + */ +MNN.MatMul.addTransposeB = function(builder, transposeB) { + builder.addFieldInt8(2, +transposeB, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightOffset + */ +MNN.MatMul.addWeight = function(builder, weightOffset) { + builder.addFieldOffset(3, weightOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.MatMul.createWeightVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.MatMul.startWeightVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasOffset + */ +MNN.MatMul.addBias = function(builder, biasOffset) { + builder.addFieldOffset(4, biasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.MatMul.createBiasVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.MatMul.startBiasVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.MatMul.endMatMul = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} T + * @param {boolean} transposeA + * @param {boolean} transposeB + * @param {flatbuffers.Offset} weightOffset + * @param {flatbuffers.Offset} biasOffset + * @returns {flatbuffers.Offset} + */ +MNN.MatMul.createMatMul = function(builder, T, transposeA, transposeB, weightOffset, biasOffset) { + MNN.MatMul.startMatMul(builder); + MNN.MatMul.addT(builder, T); + MNN.MatMul.addTransposeA(builder, transposeA); + MNN.MatMul.addTransposeB(builder, transposeB); + MNN.MatMul.addWeight(builder, weightOffset); + MNN.MatMul.addBias(builder, biasOffset); + return MNN.MatMul.endMatMul(builder); +} + +/** + * @constructor + */ +MNN.MomentsParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.MomentsParam} + */ +MNN.MomentsParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.MomentsParam=} obj + * @returns {MNN.MomentsParam} + */ +MNN.MomentsParam.getRootAsMomentsParam = function(bb, obj) { + return (obj || new MNN.MomentsParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.MomentsParam=} obj + * @returns {MNN.MomentsParam} + */ +MNN.MomentsParam.getSizePrefixedRootAsMomentsParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.MomentsParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.MomentsParam.prototype.dim = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.MomentsParam.prototype.dimLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.MomentsParam.prototype.dimArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {boolean} + */ +MNN.MomentsParam.prototype.keepDims = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : true; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.MomentsParam.prototype.dType = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_FLOAT; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.MomentsParam.startMomentsParam = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimOffset + */ +MNN.MomentsParam.addDim = function(builder, dimOffset) { + builder.addFieldOffset(0, dimOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.MomentsParam.createDimVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.MomentsParam.startDimVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} keepDims + */ +MNN.MomentsParam.addKeepDims = function(builder, keepDims) { + builder.addFieldInt8(1, +keepDims, +true); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} dType + */ +MNN.MomentsParam.addDType = function(builder, dType) { + builder.addFieldInt32(2, dType, MNN.DataType.DT_FLOAT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.MomentsParam.endMomentsParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimOffset + * @param {boolean} keepDims + * @param {MNN.DataType} dType + * @returns {flatbuffers.Offset} + */ +MNN.MomentsParam.createMomentsParam = function(builder, dimOffset, keepDims, dType) { + MNN.MomentsParam.startMomentsParam(builder); + MNN.MomentsParam.addDim(builder, dimOffset); + MNN.MomentsParam.addKeepDims(builder, keepDims); + MNN.MomentsParam.addDType(builder, dType); + return MNN.MomentsParam.endMomentsParam(builder); +} + +/** + * @constructor + */ +MNN.RNNParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.RNNParam} + */ +MNN.RNNParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.RNNParam=} obj + * @returns {MNN.RNNParam} + */ +MNN.RNNParam.getRootAsRNNParam = function(bb, obj) { + return (obj || new MNN.RNNParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.RNNParam=} obj + * @returns {MNN.RNNParam} + */ +MNN.RNNParam.getSizePrefixedRootAsRNNParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.RNNParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.RNNParam.prototype.numUnits = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +MNN.RNNParam.prototype.isBidirectionalRNN = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +MNN.RNNParam.prototype.keepAllOutputs = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.RNNParam.prototype.fwGateWeight = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.RNNParam.prototype.fwGateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.RNNParam.prototype.fwCandidateWeight = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.RNNParam.prototype.fwCandidateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.RNNParam.prototype.bwGateWeight = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.RNNParam.prototype.bwGateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.RNNParam.prototype.bwCandidateWeight = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.RNNParam.prototype.bwCandidateBias = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.RNNParam.startRNNParam = function(builder) { + builder.startObject(11); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numUnits + */ +MNN.RNNParam.addNumUnits = function(builder, numUnits) { + builder.addFieldInt32(0, numUnits, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} isBidirectionalRNN + */ +MNN.RNNParam.addIsBidirectionalRNN = function(builder, isBidirectionalRNN) { + builder.addFieldInt8(1, +isBidirectionalRNN, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} keepAllOutputs + */ +MNN.RNNParam.addKeepAllOutputs = function(builder, keepAllOutputs) { + builder.addFieldInt8(2, +keepAllOutputs, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} fwGateWeightOffset + */ +MNN.RNNParam.addFwGateWeight = function(builder, fwGateWeightOffset) { + builder.addFieldOffset(3, fwGateWeightOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} fwGateBiasOffset + */ +MNN.RNNParam.addFwGateBias = function(builder, fwGateBiasOffset) { + builder.addFieldOffset(4, fwGateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} fwCandidateWeightOffset + */ +MNN.RNNParam.addFwCandidateWeight = function(builder, fwCandidateWeightOffset) { + builder.addFieldOffset(5, fwCandidateWeightOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} fwCandidateBiasOffset + */ +MNN.RNNParam.addFwCandidateBias = function(builder, fwCandidateBiasOffset) { + builder.addFieldOffset(6, fwCandidateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} bwGateWeightOffset + */ +MNN.RNNParam.addBwGateWeight = function(builder, bwGateWeightOffset) { + builder.addFieldOffset(7, bwGateWeightOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} bwGateBiasOffset + */ +MNN.RNNParam.addBwGateBias = function(builder, bwGateBiasOffset) { + builder.addFieldOffset(8, bwGateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} bwCandidateWeightOffset + */ +MNN.RNNParam.addBwCandidateWeight = function(builder, bwCandidateWeightOffset) { + builder.addFieldOffset(9, bwCandidateWeightOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} bwCandidateBiasOffset + */ +MNN.RNNParam.addBwCandidateBias = function(builder, bwCandidateBiasOffset) { + builder.addFieldOffset(10, bwCandidateBiasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.RNNParam.endRNNParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numUnits + * @param {boolean} isBidirectionalRNN + * @param {boolean} keepAllOutputs + * @param {flatbuffers.Offset} fwGateWeightOffset + * @param {flatbuffers.Offset} fwGateBiasOffset + * @param {flatbuffers.Offset} fwCandidateWeightOffset + * @param {flatbuffers.Offset} fwCandidateBiasOffset + * @param {flatbuffers.Offset} bwGateWeightOffset + * @param {flatbuffers.Offset} bwGateBiasOffset + * @param {flatbuffers.Offset} bwCandidateWeightOffset + * @param {flatbuffers.Offset} bwCandidateBiasOffset + * @returns {flatbuffers.Offset} + */ +MNN.RNNParam.createRNNParam = function(builder, numUnits, isBidirectionalRNN, keepAllOutputs, fwGateWeightOffset, fwGateBiasOffset, fwCandidateWeightOffset, fwCandidateBiasOffset, bwGateWeightOffset, bwGateBiasOffset, bwCandidateWeightOffset, bwCandidateBiasOffset) { + MNN.RNNParam.startRNNParam(builder); + MNN.RNNParam.addNumUnits(builder, numUnits); + MNN.RNNParam.addIsBidirectionalRNN(builder, isBidirectionalRNN); + MNN.RNNParam.addKeepAllOutputs(builder, keepAllOutputs); + MNN.RNNParam.addFwGateWeight(builder, fwGateWeightOffset); + MNN.RNNParam.addFwGateBias(builder, fwGateBiasOffset); + MNN.RNNParam.addFwCandidateWeight(builder, fwCandidateWeightOffset); + MNN.RNNParam.addFwCandidateBias(builder, fwCandidateBiasOffset); + MNN.RNNParam.addBwGateWeight(builder, bwGateWeightOffset); + MNN.RNNParam.addBwGateBias(builder, bwGateBiasOffset); + MNN.RNNParam.addBwCandidateWeight(builder, bwCandidateWeightOffset); + MNN.RNNParam.addBwCandidateBias(builder, bwCandidateBiasOffset); + return MNN.RNNParam.endRNNParam(builder); +} + +/** + * @constructor + */ +MNN.BatchMatMulParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.BatchMatMulParam} + */ +MNN.BatchMatMulParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.BatchMatMulParam=} obj + * @returns {MNN.BatchMatMulParam} + */ +MNN.BatchMatMulParam.getRootAsBatchMatMulParam = function(bb, obj) { + return (obj || new MNN.BatchMatMulParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.BatchMatMulParam=} obj + * @returns {MNN.BatchMatMulParam} + */ +MNN.BatchMatMulParam.getSizePrefixedRootAsBatchMatMulParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.BatchMatMulParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +MNN.BatchMatMulParam.prototype.adjX = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +MNN.BatchMatMulParam.prototype.adjY = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.BatchMatMulParam.startBatchMatMulParam = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} adjX + */ +MNN.BatchMatMulParam.addAdjX = function(builder, adjX) { + builder.addFieldInt8(0, +adjX, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} adjY + */ +MNN.BatchMatMulParam.addAdjY = function(builder, adjY) { + builder.addFieldInt8(1, +adjY, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.BatchMatMulParam.endBatchMatMulParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} adjX + * @param {boolean} adjY + * @returns {flatbuffers.Offset} + */ +MNN.BatchMatMulParam.createBatchMatMulParam = function(builder, adjX, adjY) { + MNN.BatchMatMulParam.startBatchMatMulParam(builder); + MNN.BatchMatMulParam.addAdjX(builder, adjX); + MNN.BatchMatMulParam.addAdjY(builder, adjY); + return MNN.BatchMatMulParam.endBatchMatMulParam(builder); +} + +/** + * @constructor + */ +MNN.DepthSpaceParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.DepthSpaceParam} + */ +MNN.DepthSpaceParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.DepthSpaceParam=} obj + * @returns {MNN.DepthSpaceParam} + */ +MNN.DepthSpaceParam.getRootAsDepthSpaceParam = function(bb, obj) { + return (obj || new MNN.DepthSpaceParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.DepthSpaceParam=} obj + * @returns {MNN.DepthSpaceParam} + */ +MNN.DepthSpaceParam.getSizePrefixedRootAsDepthSpaceParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.DepthSpaceParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.DepthSpaceParam.prototype.blockSize = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.DepthSpaceParam.startDepthSpaceParam = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} blockSize + */ +MNN.DepthSpaceParam.addBlockSize = function(builder, blockSize) { + builder.addFieldInt32(0, blockSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.DepthSpaceParam.endDepthSpaceParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} blockSize + * @returns {flatbuffers.Offset} + */ +MNN.DepthSpaceParam.createDepthSpaceParam = function(builder, blockSize) { + MNN.DepthSpaceParam.startDepthSpaceParam(builder); + MNN.DepthSpaceParam.addBlockSize(builder, blockSize); + return MNN.DepthSpaceParam.endDepthSpaceParam(builder); +} + +/** + * @constructor + */ +MNN.ReverseSequenceParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.ReverseSequenceParam} + */ +MNN.ReverseSequenceParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ReverseSequenceParam=} obj + * @returns {MNN.ReverseSequenceParam} + */ +MNN.ReverseSequenceParam.getRootAsReverseSequenceParam = function(bb, obj) { + return (obj || new MNN.ReverseSequenceParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.ReverseSequenceParam=} obj + * @returns {MNN.ReverseSequenceParam} + */ +MNN.ReverseSequenceParam.getSizePrefixedRootAsReverseSequenceParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.ReverseSequenceParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.ReverseSequenceParam.prototype.batchDim = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.ReverseSequenceParam.prototype.seqDim = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.ReverseSequenceParam.startReverseSequenceParam = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} batchDim + */ +MNN.ReverseSequenceParam.addBatchDim = function(builder, batchDim) { + builder.addFieldInt32(0, batchDim, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} seqDim + */ +MNN.ReverseSequenceParam.addSeqDim = function(builder, seqDim) { + builder.addFieldInt32(1, seqDim, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.ReverseSequenceParam.endReverseSequenceParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} batchDim + * @param {number} seqDim + * @returns {flatbuffers.Offset} + */ +MNN.ReverseSequenceParam.createReverseSequenceParam = function(builder, batchDim, seqDim) { + MNN.ReverseSequenceParam.startReverseSequenceParam(builder); + MNN.ReverseSequenceParam.addBatchDim(builder, batchDim); + MNN.ReverseSequenceParam.addSeqDim(builder, seqDim); + return MNN.ReverseSequenceParam.endReverseSequenceParam(builder); +} + +/** + * @constructor + */ +MNN.DetectionPostProcessParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.DetectionPostProcessParam} + */ +MNN.DetectionPostProcessParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.DetectionPostProcessParam=} obj + * @returns {MNN.DetectionPostProcessParam} + */ +MNN.DetectionPostProcessParam.getRootAsDetectionPostProcessParam = function(bb, obj) { + return (obj || new MNN.DetectionPostProcessParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.DetectionPostProcessParam=} obj + * @returns {MNN.DetectionPostProcessParam} + */ +MNN.DetectionPostProcessParam.getSizePrefixedRootAsDetectionPostProcessParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.DetectionPostProcessParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.DetectionPostProcessParam.prototype.maxDetections = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.DetectionPostProcessParam.prototype.maxClassesPerDetection = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.DetectionPostProcessParam.prototype.detectionsPerClass = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.DetectionPostProcessParam.prototype.nmsScoreThreshold = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.DetectionPostProcessParam.prototype.iouThreshold = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.DetectionPostProcessParam.prototype.numClasses = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +MNN.DetectionPostProcessParam.prototype.useRegularNMS = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.DetectionPostProcessParam.prototype.centerSizeEncoding = function(index) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.DetectionPostProcessParam.prototype.centerSizeEncodingLength = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.DetectionPostProcessParam.prototype.centerSizeEncodingArray = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.DetectionPostProcessParam.startDetectionPostProcessParam = function(builder) { + builder.startObject(8); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} maxDetections + */ +MNN.DetectionPostProcessParam.addMaxDetections = function(builder, maxDetections) { + builder.addFieldInt32(0, maxDetections, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} maxClassesPerDetection + */ +MNN.DetectionPostProcessParam.addMaxClassesPerDetection = function(builder, maxClassesPerDetection) { + builder.addFieldInt32(1, maxClassesPerDetection, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} detectionsPerClass + */ +MNN.DetectionPostProcessParam.addDetectionsPerClass = function(builder, detectionsPerClass) { + builder.addFieldInt32(2, detectionsPerClass, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} nmsScoreThreshold + */ +MNN.DetectionPostProcessParam.addNmsScoreThreshold = function(builder, nmsScoreThreshold) { + builder.addFieldFloat32(3, nmsScoreThreshold, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} iouThreshold + */ +MNN.DetectionPostProcessParam.addIouThreshold = function(builder, iouThreshold) { + builder.addFieldFloat32(4, iouThreshold, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numClasses + */ +MNN.DetectionPostProcessParam.addNumClasses = function(builder, numClasses) { + builder.addFieldInt32(5, numClasses, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} useRegularNMS + */ +MNN.DetectionPostProcessParam.addUseRegularNMS = function(builder, useRegularNMS) { + builder.addFieldInt8(6, +useRegularNMS, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} centerSizeEncodingOffset + */ +MNN.DetectionPostProcessParam.addCenterSizeEncoding = function(builder, centerSizeEncodingOffset) { + builder.addFieldOffset(7, centerSizeEncodingOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.DetectionPostProcessParam.createCenterSizeEncodingVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.DetectionPostProcessParam.startCenterSizeEncodingVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.DetectionPostProcessParam.endDetectionPostProcessParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} maxDetections + * @param {number} maxClassesPerDetection + * @param {number} detectionsPerClass + * @param {number} nmsScoreThreshold + * @param {number} iouThreshold + * @param {number} numClasses + * @param {boolean} useRegularNMS + * @param {flatbuffers.Offset} centerSizeEncodingOffset + * @returns {flatbuffers.Offset} + */ +MNN.DetectionPostProcessParam.createDetectionPostProcessParam = function(builder, maxDetections, maxClassesPerDetection, detectionsPerClass, nmsScoreThreshold, iouThreshold, numClasses, useRegularNMS, centerSizeEncodingOffset) { + MNN.DetectionPostProcessParam.startDetectionPostProcessParam(builder); + MNN.DetectionPostProcessParam.addMaxDetections(builder, maxDetections); + MNN.DetectionPostProcessParam.addMaxClassesPerDetection(builder, maxClassesPerDetection); + MNN.DetectionPostProcessParam.addDetectionsPerClass(builder, detectionsPerClass); + MNN.DetectionPostProcessParam.addNmsScoreThreshold(builder, nmsScoreThreshold); + MNN.DetectionPostProcessParam.addIouThreshold(builder, iouThreshold); + MNN.DetectionPostProcessParam.addNumClasses(builder, numClasses); + MNN.DetectionPostProcessParam.addUseRegularNMS(builder, useRegularNMS); + MNN.DetectionPostProcessParam.addCenterSizeEncoding(builder, centerSizeEncodingOffset); + return MNN.DetectionPostProcessParam.endDetectionPostProcessParam(builder); +} + +/** + * @constructor + */ +MNN.OneHotParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.OneHotParam} + */ +MNN.OneHotParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.OneHotParam=} obj + * @returns {MNN.OneHotParam} + */ +MNN.OneHotParam.getRootAsOneHotParam = function(bb, obj) { + return (obj || new MNN.OneHotParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.OneHotParam=} obj + * @returns {MNN.OneHotParam} + */ +MNN.OneHotParam.getSizePrefixedRootAsOneHotParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.OneHotParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.OneHotParam.prototype.dType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_FLOAT; +}; + +/** + * @returns {number} + */ +MNN.OneHotParam.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : -1; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.OneHotParam.startOneHotParam = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} dType + */ +MNN.OneHotParam.addDType = function(builder, dType) { + builder.addFieldInt32(0, dType, MNN.DataType.DT_FLOAT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +MNN.OneHotParam.addAxis = function(builder, axis) { + builder.addFieldInt32(1, axis, -1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.OneHotParam.endOneHotParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} dType + * @param {number} axis + * @returns {flatbuffers.Offset} + */ +MNN.OneHotParam.createOneHotParam = function(builder, dType, axis) { + MNN.OneHotParam.startOneHotParam(builder); + MNN.OneHotParam.addDType(builder, dType); + MNN.OneHotParam.addAxis(builder, axis); + return MNN.OneHotParam.endOneHotParam(builder); +} + +/** + * @constructor + */ +MNN.PadParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.PadParam} + */ +MNN.PadParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.PadParam=} obj + * @returns {MNN.PadParam} + */ +MNN.PadParam.getRootAsPadParam = function(bb, obj) { + return (obj || new MNN.PadParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.PadParam=} obj + * @returns {MNN.PadParam} + */ +MNN.PadParam.getSizePrefixedRootAsPadParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.PadParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.PadValueMode} + */ +MNN.PadParam.prototype.mode = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.PadValueMode} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.PadValueMode.CONSTANT; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.PadParam.startPadParam = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.PadValueMode} mode + */ +MNN.PadParam.addMode = function(builder, mode) { + builder.addFieldInt8(0, mode, MNN.PadValueMode.CONSTANT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.PadParam.endPadParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.PadValueMode} mode + * @returns {flatbuffers.Offset} + */ +MNN.PadParam.createPadParam = function(builder, mode) { + MNN.PadParam.startPadParam(builder); + MNN.PadParam.addMode(builder, mode); + return MNN.PadParam.endPadParam(builder); +} + +/** + * @constructor + */ +MNN.QuantizedParam = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedParam} + */ +MNN.QuantizedParam.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam} + */ +MNN.QuantizedParam.getRootAsQuantizedParam = function(bb, obj) { + return (obj || new MNN.QuantizedParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam} + */ +MNN.QuantizedParam.getSizePrefixedRootAsQuantizedParam = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedParam).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.QuantizedParam.prototype.zeroPoint = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedParam.prototype.scale = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedParam.startQuantizedParam = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} zeroPoint + */ +MNN.QuantizedParam.addZeroPoint = function(builder, zeroPoint) { + builder.addFieldInt32(0, zeroPoint, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} scale + */ +MNN.QuantizedParam.addScale = function(builder, scale) { + builder.addFieldFloat32(1, scale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedParam.endQuantizedParam = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} zeroPoint + * @param {number} scale + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedParam.createQuantizedParam = function(builder, zeroPoint, scale) { + MNN.QuantizedParam.startQuantizedParam(builder); + MNN.QuantizedParam.addZeroPoint(builder, zeroPoint); + MNN.QuantizedParam.addScale(builder, scale); + return MNN.QuantizedParam.endQuantizedParam(builder); +} + +/** + * @constructor + */ +MNN.QuantizedAdd = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedAdd} + */ +MNN.QuantizedAdd.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedAdd=} obj + * @returns {MNN.QuantizedAdd} + */ +MNN.QuantizedAdd.getRootAsQuantizedAdd = function(bb, obj) { + return (obj || new MNN.QuantizedAdd).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedAdd=} obj + * @returns {MNN.QuantizedAdd} + */ +MNN.QuantizedAdd.getSizePrefixedRootAsQuantizedAdd = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedAdd).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.FusedActivation} + */ +MNN.QuantizedAdd.prototype.activationType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.FusedActivation} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.FusedActivation.kTfLiteActNone; +}; + +/** + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam|null} + */ +MNN.QuantizedAdd.prototype.input1QuantizedParam = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new MNN.QuantizedParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam|null} + */ +MNN.QuantizedAdd.prototype.input2QuantizedParam = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new MNN.QuantizedParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam|null} + */ +MNN.QuantizedAdd.prototype.outputQuantizedParam = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new MNN.QuantizedParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedAdd.startQuantizedAdd = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.FusedActivation} activationType + */ +MNN.QuantizedAdd.addActivationType = function(builder, activationType) { + builder.addFieldInt8(0, activationType, MNN.FusedActivation.kTfLiteActNone); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} input1QuantizedParamOffset + */ +MNN.QuantizedAdd.addInput1QuantizedParam = function(builder, input1QuantizedParamOffset) { + builder.addFieldOffset(1, input1QuantizedParamOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} input2QuantizedParamOffset + */ +MNN.QuantizedAdd.addInput2QuantizedParam = function(builder, input2QuantizedParamOffset) { + builder.addFieldOffset(2, input2QuantizedParamOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputQuantizedParamOffset + */ +MNN.QuantizedAdd.addOutputQuantizedParam = function(builder, outputQuantizedParamOffset) { + builder.addFieldOffset(3, outputQuantizedParamOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedAdd.endQuantizedAdd = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.FusedActivation} activationType + * @param {flatbuffers.Offset} input1QuantizedParamOffset + * @param {flatbuffers.Offset} input2QuantizedParamOffset + * @param {flatbuffers.Offset} outputQuantizedParamOffset + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedAdd.createQuantizedAdd = function(builder, activationType, input1QuantizedParamOffset, input2QuantizedParamOffset, outputQuantizedParamOffset) { + MNN.QuantizedAdd.startQuantizedAdd(builder); + MNN.QuantizedAdd.addActivationType(builder, activationType); + MNN.QuantizedAdd.addInput1QuantizedParam(builder, input1QuantizedParamOffset); + MNN.QuantizedAdd.addInput2QuantizedParam(builder, input2QuantizedParamOffset); + MNN.QuantizedAdd.addOutputQuantizedParam(builder, outputQuantizedParamOffset); + return MNN.QuantizedAdd.endQuantizedAdd(builder); +} + +/** + * @constructor + */ +MNN.Dequantize = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Dequantize} + */ +MNN.Dequantize.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Dequantize=} obj + * @returns {MNN.Dequantize} + */ +MNN.Dequantize.getRootAsDequantize = function(bb, obj) { + return (obj || new MNN.Dequantize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Dequantize=} obj + * @returns {MNN.Dequantize} + */ +MNN.Dequantize.getSizePrefixedRootAsDequantize = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Dequantize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam|null} + */ +MNN.Dequantize.prototype.inputQuantizedParam = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new MNN.QuantizedParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @returns {MNN.QuantizeMode} + */ +MNN.Dequantize.prototype.mode = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.QuantizeMode} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.QuantizeMode.MIN_COMBINED; +}; + +/** + * @returns {MNN.ModeFormat} + */ +MNN.Dequantize.prototype.modelFormat = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {MNN.ModeFormat} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.ModeFormat.TENSORFLOW; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.Dequantize.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Dequantize.startDequantize = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputQuantizedParamOffset + */ +MNN.Dequantize.addInputQuantizedParam = function(builder, inputQuantizedParamOffset) { + builder.addFieldOffset(0, inputQuantizedParamOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.QuantizeMode} mode + */ +MNN.Dequantize.addMode = function(builder, mode) { + builder.addFieldInt8(1, mode, MNN.QuantizeMode.MIN_COMBINED); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.ModeFormat} modelFormat + */ +MNN.Dequantize.addModelFormat = function(builder, modelFormat) { + builder.addFieldInt8(2, modelFormat, MNN.ModeFormat.TENSORFLOW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} type + */ +MNN.Dequantize.addType = function(builder, type) { + builder.addFieldInt32(3, type, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Dequantize.endDequantize = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputQuantizedParamOffset + * @param {MNN.QuantizeMode} mode + * @param {MNN.ModeFormat} modelFormat + * @param {MNN.DataType} type + * @returns {flatbuffers.Offset} + */ +MNN.Dequantize.createDequantize = function(builder, inputQuantizedParamOffset, mode, modelFormat, type) { + MNN.Dequantize.startDequantize(builder); + MNN.Dequantize.addInputQuantizedParam(builder, inputQuantizedParamOffset); + MNN.Dequantize.addMode(builder, mode); + MNN.Dequantize.addModelFormat(builder, modelFormat); + MNN.Dequantize.addType(builder, type); + return MNN.Dequantize.endDequantize(builder); +} + +/** + * @constructor + */ +MNN.QuantizedAvgPool = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedAvgPool} + */ +MNN.QuantizedAvgPool.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedAvgPool=} obj + * @returns {MNN.QuantizedAvgPool} + */ +MNN.QuantizedAvgPool.getRootAsQuantizedAvgPool = function(bb, obj) { + return (obj || new MNN.QuantizedAvgPool).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedAvgPool=} obj + * @returns {MNN.QuantizedAvgPool} + */ +MNN.QuantizedAvgPool.getSizePrefixedRootAsQuantizedAvgPool = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedAvgPool).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.QuantizedAvgPool.prototype.kernelX = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedAvgPool.prototype.kernelY = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {MNN.ModeFormat} + */ +MNN.QuantizedAvgPool.prototype.modelFormat = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {MNN.ModeFormat} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.ModeFormat.TENSORFLOW; +}; + +/** + * @returns {number} + */ +MNN.QuantizedAvgPool.prototype.outputActivationMax = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedAvgPool.prototype.outputActivationMin = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {MNN.PoolPadType} + */ +MNN.QuantizedAvgPool.prototype.padType = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? /** @type {MNN.PoolPadType} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.PoolPadType.CAFFE; +}; + +/** + * @returns {number} + */ +MNN.QuantizedAvgPool.prototype.padX = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedAvgPool.prototype.padY = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedAvgPool.prototype.strideX = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedAvgPool.prototype.strideY = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.QuantizedAvgPool.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedAvgPool.startQuantizedAvgPool = function(builder) { + builder.startObject(11); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} kernelX + */ +MNN.QuantizedAvgPool.addKernelX = function(builder, kernelX) { + builder.addFieldInt32(0, kernelX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} kernelY + */ +MNN.QuantizedAvgPool.addKernelY = function(builder, kernelY) { + builder.addFieldInt32(1, kernelY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.ModeFormat} modelFormat + */ +MNN.QuantizedAvgPool.addModelFormat = function(builder, modelFormat) { + builder.addFieldInt8(2, modelFormat, MNN.ModeFormat.TENSORFLOW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputActivationMax + */ +MNN.QuantizedAvgPool.addOutputActivationMax = function(builder, outputActivationMax) { + builder.addFieldInt32(3, outputActivationMax, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputActivationMin + */ +MNN.QuantizedAvgPool.addOutputActivationMin = function(builder, outputActivationMin) { + builder.addFieldInt32(4, outputActivationMin, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.PoolPadType} padType + */ +MNN.QuantizedAvgPool.addPadType = function(builder, padType) { + builder.addFieldInt8(5, padType, MNN.PoolPadType.CAFFE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padX + */ +MNN.QuantizedAvgPool.addPadX = function(builder, padX) { + builder.addFieldInt32(6, padX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padY + */ +MNN.QuantizedAvgPool.addPadY = function(builder, padY) { + builder.addFieldInt32(7, padY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideX + */ +MNN.QuantizedAvgPool.addStrideX = function(builder, strideX) { + builder.addFieldInt32(8, strideX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideY + */ +MNN.QuantizedAvgPool.addStrideY = function(builder, strideY) { + builder.addFieldInt32(9, strideY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} type + */ +MNN.QuantizedAvgPool.addType = function(builder, type) { + builder.addFieldInt32(10, type, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedAvgPool.endQuantizedAvgPool = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} kernelX + * @param {number} kernelY + * @param {MNN.ModeFormat} modelFormat + * @param {number} outputActivationMax + * @param {number} outputActivationMin + * @param {MNN.PoolPadType} padType + * @param {number} padX + * @param {number} padY + * @param {number} strideX + * @param {number} strideY + * @param {MNN.DataType} type + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedAvgPool.createQuantizedAvgPool = function(builder, kernelX, kernelY, modelFormat, outputActivationMax, outputActivationMin, padType, padX, padY, strideX, strideY, type) { + MNN.QuantizedAvgPool.startQuantizedAvgPool(builder); + MNN.QuantizedAvgPool.addKernelX(builder, kernelX); + MNN.QuantizedAvgPool.addKernelY(builder, kernelY); + MNN.QuantizedAvgPool.addModelFormat(builder, modelFormat); + MNN.QuantizedAvgPool.addOutputActivationMax(builder, outputActivationMax); + MNN.QuantizedAvgPool.addOutputActivationMin(builder, outputActivationMin); + MNN.QuantizedAvgPool.addPadType(builder, padType); + MNN.QuantizedAvgPool.addPadX(builder, padX); + MNN.QuantizedAvgPool.addPadY(builder, padY); + MNN.QuantizedAvgPool.addStrideX(builder, strideX); + MNN.QuantizedAvgPool.addStrideY(builder, strideY); + MNN.QuantizedAvgPool.addType(builder, type); + return MNN.QuantizedAvgPool.endQuantizedAvgPool(builder); +} + +/** + * @constructor + */ +MNN.QuantizedBiasAdd = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedBiasAdd} + */ +MNN.QuantizedBiasAdd.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedBiasAdd=} obj + * @returns {MNN.QuantizedBiasAdd} + */ +MNN.QuantizedBiasAdd.getRootAsQuantizedBiasAdd = function(bb, obj) { + return (obj || new MNN.QuantizedBiasAdd).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedBiasAdd=} obj + * @returns {MNN.QuantizedBiasAdd} + */ +MNN.QuantizedBiasAdd.getSizePrefixedRootAsQuantizedBiasAdd = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedBiasAdd).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.QuantizedBiasAdd.prototype.bias = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedBiasAdd.prototype.biasLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.QuantizedBiasAdd.prototype.biasArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.QuantizedBiasAdd.prototype.inputType = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {number} + */ +MNN.QuantizedBiasAdd.prototype.max = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedBiasAdd.prototype.min = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.QuantizedBiasAdd.prototype.outputType = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedBiasAdd.startQuantizedBiasAdd = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasOffset + */ +MNN.QuantizedBiasAdd.addBias = function(builder, biasOffset) { + builder.addFieldOffset(0, biasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedBiasAdd.createBiasVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.QuantizedBiasAdd.startBiasVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} inputType + */ +MNN.QuantizedBiasAdd.addInputType = function(builder, inputType) { + builder.addFieldInt32(1, inputType, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} max + */ +MNN.QuantizedBiasAdd.addMax = function(builder, max) { + builder.addFieldInt32(2, max, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} min + */ +MNN.QuantizedBiasAdd.addMin = function(builder, min) { + builder.addFieldInt32(3, min, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} outputType + */ +MNN.QuantizedBiasAdd.addOutputType = function(builder, outputType) { + builder.addFieldInt32(4, outputType, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedBiasAdd.endQuantizedBiasAdd = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasOffset + * @param {MNN.DataType} inputType + * @param {number} max + * @param {number} min + * @param {MNN.DataType} outputType + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedBiasAdd.createQuantizedBiasAdd = function(builder, biasOffset, inputType, max, min, outputType) { + MNN.QuantizedBiasAdd.startQuantizedBiasAdd(builder); + MNN.QuantizedBiasAdd.addBias(builder, biasOffset); + MNN.QuantizedBiasAdd.addInputType(builder, inputType); + MNN.QuantizedBiasAdd.addMax(builder, max); + MNN.QuantizedBiasAdd.addMin(builder, min); + MNN.QuantizedBiasAdd.addOutputType(builder, outputType); + return MNN.QuantizedBiasAdd.endQuantizedBiasAdd(builder); +} + +/** + * @constructor + */ +MNN.QuantizedConcat = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedConcat} + */ +MNN.QuantizedConcat.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedConcat=} obj + * @returns {MNN.QuantizedConcat} + */ +MNN.QuantizedConcat.getRootAsQuantizedConcat = function(bb, obj) { + return (obj || new MNN.QuantizedConcat).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedConcat=} obj + * @returns {MNN.QuantizedConcat} + */ +MNN.QuantizedConcat.getSizePrefixedRootAsQuantizedConcat = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedConcat).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.FusedActivation} + */ +MNN.QuantizedConcat.prototype.activationType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.FusedActivation} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.FusedActivation.kTfLiteActNone; +}; + +/** + * @returns {number} + */ +MNN.QuantizedConcat.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.QuantizedConcat.prototype.inputScale = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedConcat.prototype.inputScaleLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +MNN.QuantizedConcat.prototype.inputScaleArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.QuantizedConcat.prototype.inputZeroPoint = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedConcat.prototype.inputZeroPointLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.QuantizedConcat.prototype.inputZeroPointArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam|null} + */ +MNN.QuantizedConcat.prototype.outputQuantizedParam = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new MNN.QuantizedParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedConcat.startQuantizedConcat = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.FusedActivation} activationType + */ +MNN.QuantizedConcat.addActivationType = function(builder, activationType) { + builder.addFieldInt8(0, activationType, MNN.FusedActivation.kTfLiteActNone); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +MNN.QuantizedConcat.addAxis = function(builder, axis) { + builder.addFieldInt32(1, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputScaleOffset + */ +MNN.QuantizedConcat.addInputScale = function(builder, inputScaleOffset) { + builder.addFieldOffset(2, inputScaleOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedConcat.createInputScaleVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.QuantizedConcat.startInputScaleVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputZeroPointOffset + */ +MNN.QuantizedConcat.addInputZeroPoint = function(builder, inputZeroPointOffset) { + builder.addFieldOffset(3, inputZeroPointOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedConcat.createInputZeroPointVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.QuantizedConcat.startInputZeroPointVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputQuantizedParamOffset + */ +MNN.QuantizedConcat.addOutputQuantizedParam = function(builder, outputQuantizedParamOffset) { + builder.addFieldOffset(4, outputQuantizedParamOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedConcat.endQuantizedConcat = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.FusedActivation} activationType + * @param {number} axis + * @param {flatbuffers.Offset} inputScaleOffset + * @param {flatbuffers.Offset} inputZeroPointOffset + * @param {flatbuffers.Offset} outputQuantizedParamOffset + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedConcat.createQuantizedConcat = function(builder, activationType, axis, inputScaleOffset, inputZeroPointOffset, outputQuantizedParamOffset) { + MNN.QuantizedConcat.startQuantizedConcat(builder); + MNN.QuantizedConcat.addActivationType(builder, activationType); + MNN.QuantizedConcat.addAxis(builder, axis); + MNN.QuantizedConcat.addInputScale(builder, inputScaleOffset); + MNN.QuantizedConcat.addInputZeroPoint(builder, inputZeroPointOffset); + MNN.QuantizedConcat.addOutputQuantizedParam(builder, outputQuantizedParamOffset); + return MNN.QuantizedConcat.endQuantizedConcat(builder); +} + +/** + * @constructor + */ +MNN.QuantizedLogistic = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedLogistic} + */ +MNN.QuantizedLogistic.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedLogistic=} obj + * @returns {MNN.QuantizedLogistic} + */ +MNN.QuantizedLogistic.getRootAsQuantizedLogistic = function(bb, obj) { + return (obj || new MNN.QuantizedLogistic).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedLogistic=} obj + * @returns {MNN.QuantizedLogistic} + */ +MNN.QuantizedLogistic.getSizePrefixedRootAsQuantizedLogistic = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedLogistic).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam|null} + */ +MNN.QuantizedLogistic.prototype.inputQuantizedParam = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new MNN.QuantizedParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam|null} + */ +MNN.QuantizedLogistic.prototype.outputQuantizedParam = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new MNN.QuantizedParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedLogistic.startQuantizedLogistic = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputQuantizedParamOffset + */ +MNN.QuantizedLogistic.addInputQuantizedParam = function(builder, inputQuantizedParamOffset) { + builder.addFieldOffset(0, inputQuantizedParamOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputQuantizedParamOffset + */ +MNN.QuantizedLogistic.addOutputQuantizedParam = function(builder, outputQuantizedParamOffset) { + builder.addFieldOffset(1, outputQuantizedParamOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedLogistic.endQuantizedLogistic = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputQuantizedParamOffset + * @param {flatbuffers.Offset} outputQuantizedParamOffset + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedLogistic.createQuantizedLogistic = function(builder, inputQuantizedParamOffset, outputQuantizedParamOffset) { + MNN.QuantizedLogistic.startQuantizedLogistic(builder); + MNN.QuantizedLogistic.addInputQuantizedParam(builder, inputQuantizedParamOffset); + MNN.QuantizedLogistic.addOutputQuantizedParam(builder, outputQuantizedParamOffset); + return MNN.QuantizedLogistic.endQuantizedLogistic(builder); +} + +/** + * @constructor + */ +MNN.QuantizedMatMul = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedMatMul} + */ +MNN.QuantizedMatMul.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedMatMul=} obj + * @returns {MNN.QuantizedMatMul} + */ +MNN.QuantizedMatMul.getRootAsQuantizedMatMul = function(bb, obj) { + return (obj || new MNN.QuantizedMatMul).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedMatMul=} obj + * @returns {MNN.QuantizedMatMul} + */ +MNN.QuantizedMatMul.getSizePrefixedRootAsQuantizedMatMul = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedMatMul).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +MNN.QuantizedMatMul.prototype.transposeA = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +MNN.QuantizedMatMul.prototype.transposeB = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedMatMul.startQuantizedMatMul = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} transposeA + */ +MNN.QuantizedMatMul.addTransposeA = function(builder, transposeA) { + builder.addFieldInt8(0, +transposeA, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} transposeB + */ +MNN.QuantizedMatMul.addTransposeB = function(builder, transposeB) { + builder.addFieldInt8(1, +transposeB, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedMatMul.endQuantizedMatMul = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} transposeA + * @param {boolean} transposeB + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedMatMul.createQuantizedMatMul = function(builder, transposeA, transposeB) { + MNN.QuantizedMatMul.startQuantizedMatMul(builder); + MNN.QuantizedMatMul.addTransposeA(builder, transposeA); + MNN.QuantizedMatMul.addTransposeB(builder, transposeB); + return MNN.QuantizedMatMul.endQuantizedMatMul(builder); +} + +/** + * @constructor + */ +MNN.QuantizedMaxPool = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedMaxPool} + */ +MNN.QuantizedMaxPool.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedMaxPool=} obj + * @returns {MNN.QuantizedMaxPool} + */ +MNN.QuantizedMaxPool.getRootAsQuantizedMaxPool = function(bb, obj) { + return (obj || new MNN.QuantizedMaxPool).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedMaxPool=} obj + * @returns {MNN.QuantizedMaxPool} + */ +MNN.QuantizedMaxPool.getSizePrefixedRootAsQuantizedMaxPool = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedMaxPool).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.QuantizedMaxPool.prototype.kernelX = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedMaxPool.prototype.kernelY = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {MNN.ModeFormat} + */ +MNN.QuantizedMaxPool.prototype.modelFormat = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {MNN.ModeFormat} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.ModeFormat.TENSORFLOW; +}; + +/** + * @returns {number} + */ +MNN.QuantizedMaxPool.prototype.outputActivationMax = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedMaxPool.prototype.outputActivationMin = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {MNN.PoolPadType} + */ +MNN.QuantizedMaxPool.prototype.padType = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? /** @type {MNN.PoolPadType} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.PoolPadType.CAFFE; +}; + +/** + * @returns {number} + */ +MNN.QuantizedMaxPool.prototype.padX = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedMaxPool.prototype.padY = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedMaxPool.prototype.strideX = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedMaxPool.prototype.strideY = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {MNN.DataType} + */ +MNN.QuantizedMaxPool.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedMaxPool.startQuantizedMaxPool = function(builder) { + builder.startObject(11); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} kernelX + */ +MNN.QuantizedMaxPool.addKernelX = function(builder, kernelX) { + builder.addFieldInt32(0, kernelX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} kernelY + */ +MNN.QuantizedMaxPool.addKernelY = function(builder, kernelY) { + builder.addFieldInt32(1, kernelY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.ModeFormat} modelFormat + */ +MNN.QuantizedMaxPool.addModelFormat = function(builder, modelFormat) { + builder.addFieldInt8(2, modelFormat, MNN.ModeFormat.TENSORFLOW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputActivationMax + */ +MNN.QuantizedMaxPool.addOutputActivationMax = function(builder, outputActivationMax) { + builder.addFieldInt32(3, outputActivationMax, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outputActivationMin + */ +MNN.QuantizedMaxPool.addOutputActivationMin = function(builder, outputActivationMin) { + builder.addFieldInt32(4, outputActivationMin, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.PoolPadType} padType + */ +MNN.QuantizedMaxPool.addPadType = function(builder, padType) { + builder.addFieldInt8(5, padType, MNN.PoolPadType.CAFFE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padX + */ +MNN.QuantizedMaxPool.addPadX = function(builder, padX) { + builder.addFieldInt32(6, padX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} padY + */ +MNN.QuantizedMaxPool.addPadY = function(builder, padY) { + builder.addFieldInt32(7, padY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideX + */ +MNN.QuantizedMaxPool.addStrideX = function(builder, strideX) { + builder.addFieldInt32(8, strideX, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideY + */ +MNN.QuantizedMaxPool.addStrideY = function(builder, strideY) { + builder.addFieldInt32(9, strideY, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} type + */ +MNN.QuantizedMaxPool.addType = function(builder, type) { + builder.addFieldInt32(10, type, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedMaxPool.endQuantizedMaxPool = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} kernelX + * @param {number} kernelY + * @param {MNN.ModeFormat} modelFormat + * @param {number} outputActivationMax + * @param {number} outputActivationMin + * @param {MNN.PoolPadType} padType + * @param {number} padX + * @param {number} padY + * @param {number} strideX + * @param {number} strideY + * @param {MNN.DataType} type + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedMaxPool.createQuantizedMaxPool = function(builder, kernelX, kernelY, modelFormat, outputActivationMax, outputActivationMin, padType, padX, padY, strideX, strideY, type) { + MNN.QuantizedMaxPool.startQuantizedMaxPool(builder); + MNN.QuantizedMaxPool.addKernelX(builder, kernelX); + MNN.QuantizedMaxPool.addKernelY(builder, kernelY); + MNN.QuantizedMaxPool.addModelFormat(builder, modelFormat); + MNN.QuantizedMaxPool.addOutputActivationMax(builder, outputActivationMax); + MNN.QuantizedMaxPool.addOutputActivationMin(builder, outputActivationMin); + MNN.QuantizedMaxPool.addPadType(builder, padType); + MNN.QuantizedMaxPool.addPadX(builder, padX); + MNN.QuantizedMaxPool.addPadY(builder, padY); + MNN.QuantizedMaxPool.addStrideX(builder, strideX); + MNN.QuantizedMaxPool.addStrideY(builder, strideY); + MNN.QuantizedMaxPool.addType(builder, type); + return MNN.QuantizedMaxPool.endQuantizedMaxPool(builder); +} + +/** + * @constructor + */ +MNN.QuantizedRelu = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedRelu} + */ +MNN.QuantizedRelu.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedRelu=} obj + * @returns {MNN.QuantizedRelu} + */ +MNN.QuantizedRelu.getRootAsQuantizedRelu = function(bb, obj) { + return (obj || new MNN.QuantizedRelu).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedRelu=} obj + * @returns {MNN.QuantizedRelu} + */ +MNN.QuantizedRelu.getSizePrefixedRootAsQuantizedRelu = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedRelu).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.QuantizedRelu.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedRelu.startQuantizedRelu = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} type + */ +MNN.QuantizedRelu.addType = function(builder, type) { + builder.addFieldInt32(0, type, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedRelu.endQuantizedRelu = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} type + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedRelu.createQuantizedRelu = function(builder, type) { + MNN.QuantizedRelu.startQuantizedRelu(builder); + MNN.QuantizedRelu.addType(builder, type); + return MNN.QuantizedRelu.endQuantizedRelu(builder); +} + +/** + * @constructor + */ +MNN.QuantizedRelu6 = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedRelu6} + */ +MNN.QuantizedRelu6.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedRelu6=} obj + * @returns {MNN.QuantizedRelu6} + */ +MNN.QuantizedRelu6.getRootAsQuantizedRelu6 = function(bb, obj) { + return (obj || new MNN.QuantizedRelu6).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedRelu6=} obj + * @returns {MNN.QuantizedRelu6} + */ +MNN.QuantizedRelu6.getSizePrefixedRootAsQuantizedRelu6 = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedRelu6).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.QuantizedRelu6.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedRelu6.startQuantizedRelu6 = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} type + */ +MNN.QuantizedRelu6.addType = function(builder, type) { + builder.addFieldInt32(0, type, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedRelu6.endQuantizedRelu6 = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} type + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedRelu6.createQuantizedRelu6 = function(builder, type) { + MNN.QuantizedRelu6.startQuantizedRelu6(builder); + MNN.QuantizedRelu6.addType(builder, type); + return MNN.QuantizedRelu6.endQuantizedRelu6(builder); +} + +/** + * @constructor + */ +MNN.QuantizedReshape = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedReshape} + */ +MNN.QuantizedReshape.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedReshape=} obj + * @returns {MNN.QuantizedReshape} + */ +MNN.QuantizedReshape.getRootAsQuantizedReshape = function(bb, obj) { + return (obj || new MNN.QuantizedReshape).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedReshape=} obj + * @returns {MNN.QuantizedReshape} + */ +MNN.QuantizedReshape.getSizePrefixedRootAsQuantizedReshape = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedReshape).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.QuantizedReshape.prototype.dims = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedReshape.prototype.dimsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.QuantizedReshape.prototype.dimsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.ModeFormat} + */ +MNN.QuantizedReshape.prototype.modelFormat = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.ModeFormat} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.ModeFormat.TENSORFLOW; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedReshape.startQuantizedReshape = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimsOffset + */ +MNN.QuantizedReshape.addDims = function(builder, dimsOffset) { + builder.addFieldOffset(0, dimsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedReshape.createDimsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.QuantizedReshape.startDimsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.ModeFormat} modelFormat + */ +MNN.QuantizedReshape.addModelFormat = function(builder, modelFormat) { + builder.addFieldInt8(1, modelFormat, MNN.ModeFormat.TENSORFLOW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedReshape.endQuantizedReshape = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimsOffset + * @param {MNN.ModeFormat} modelFormat + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedReshape.createQuantizedReshape = function(builder, dimsOffset, modelFormat) { + MNN.QuantizedReshape.startQuantizedReshape(builder); + MNN.QuantizedReshape.addDims(builder, dimsOffset); + MNN.QuantizedReshape.addModelFormat(builder, modelFormat); + return MNN.QuantizedReshape.endQuantizedReshape(builder); +} + +/** + * @constructor + */ +MNN.QuantizedSoftmax = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizedSoftmax} + */ +MNN.QuantizedSoftmax.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedSoftmax=} obj + * @returns {MNN.QuantizedSoftmax} + */ +MNN.QuantizedSoftmax.getRootAsQuantizedSoftmax = function(bb, obj) { + return (obj || new MNN.QuantizedSoftmax).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizedSoftmax=} obj + * @returns {MNN.QuantizedSoftmax} + */ +MNN.QuantizedSoftmax.getSizePrefixedRootAsQuantizedSoftmax = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizedSoftmax).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +MNN.QuantizedSoftmax.prototype.beta = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +MNN.QuantizedSoftmax.prototype.inputScale = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizedSoftmax.startQuantizedSoftmax = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + */ +MNN.QuantizedSoftmax.addBeta = function(builder, beta) { + builder.addFieldFloat32(0, beta, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} inputScale + */ +MNN.QuantizedSoftmax.addInputScale = function(builder, inputScale) { + builder.addFieldFloat32(1, inputScale, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedSoftmax.endQuantizedSoftmax = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + * @param {number} inputScale + * @returns {flatbuffers.Offset} + */ +MNN.QuantizedSoftmax.createQuantizedSoftmax = function(builder, beta, inputScale) { + MNN.QuantizedSoftmax.startQuantizedSoftmax(builder); + MNN.QuantizedSoftmax.addBeta(builder, beta); + MNN.QuantizedSoftmax.addInputScale(builder, inputScale); + return MNN.QuantizedSoftmax.endQuantizedSoftmax(builder); +} + +/** + * @constructor + */ +MNN.QuantizeV2 = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.QuantizeV2} + */ +MNN.QuantizeV2.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizeV2=} obj + * @returns {MNN.QuantizeV2} + */ +MNN.QuantizeV2.getRootAsQuantizeV2 = function(bb, obj) { + return (obj || new MNN.QuantizeV2).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.QuantizeV2=} obj + * @returns {MNN.QuantizeV2} + */ +MNN.QuantizeV2.getSizePrefixedRootAsQuantizeV2 = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.QuantizeV2).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.DataType} + */ +MNN.QuantizeV2.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.DataType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.DataType.DT_INVALID; +}; + +/** + * @returns {MNN.QuantizeMode} + */ +MNN.QuantizeV2.prototype.mode = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.QuantizeMode} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.QuantizeMode.MIN_COMBINED; +}; + +/** + * @returns {MNN.QuantizeRoundMode} + */ +MNN.QuantizeV2.prototype.roundMode = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {MNN.QuantizeRoundMode} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.QuantizeRoundMode.HALF_AWAY_FROM_ZERO; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.QuantizeV2.startQuantizeV2 = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} type + */ +MNN.QuantizeV2.addType = function(builder, type) { + builder.addFieldInt32(0, type, MNN.DataType.DT_INVALID); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.QuantizeMode} mode + */ +MNN.QuantizeV2.addMode = function(builder, mode) { + builder.addFieldInt8(1, mode, MNN.QuantizeMode.MIN_COMBINED); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.QuantizeRoundMode} roundMode + */ +MNN.QuantizeV2.addRoundMode = function(builder, roundMode) { + builder.addFieldInt8(2, roundMode, MNN.QuantizeRoundMode.HALF_AWAY_FROM_ZERO); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.QuantizeV2.endQuantizeV2 = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.DataType} type + * @param {MNN.QuantizeMode} mode + * @param {MNN.QuantizeRoundMode} roundMode + * @returns {flatbuffers.Offset} + */ +MNN.QuantizeV2.createQuantizeV2 = function(builder, type, mode, roundMode) { + MNN.QuantizeV2.startQuantizeV2(builder); + MNN.QuantizeV2.addType(builder, type); + MNN.QuantizeV2.addMode(builder, mode); + MNN.QuantizeV2.addRoundMode(builder, roundMode); + return MNN.QuantizeV2.endQuantizeV2(builder); +} + +/** + * @constructor + */ +MNN.RequantizationRange = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.RequantizationRange} + */ +MNN.RequantizationRange.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.RequantizationRange=} obj + * @returns {MNN.RequantizationRange} + */ +MNN.RequantizationRange.getRootAsRequantizationRange = function(bb, obj) { + return (obj || new MNN.RequantizationRange).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.RequantizationRange=} obj + * @returns {MNN.RequantizationRange} + */ +MNN.RequantizationRange.getSizePrefixedRootAsRequantizationRange = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.RequantizationRange).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.RequantizationRange.startRequantizationRange = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.RequantizationRange.endRequantizationRange = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.RequantizationRange.createRequantizationRange = function(builder) { + MNN.RequantizationRange.startRequantizationRange(builder); + return MNN.RequantizationRange.endRequantizationRange(builder); +} + +/** + * @constructor + */ +MNN.Requantize = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Requantize} + */ +MNN.Requantize.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Requantize=} obj + * @returns {MNN.Requantize} + */ +MNN.Requantize.getRootAsRequantize = function(bb, obj) { + return (obj || new MNN.Requantize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Requantize=} obj + * @returns {MNN.Requantize} + */ +MNN.Requantize.getSizePrefixedRootAsRequantize = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Requantize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Requantize.startRequantize = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Requantize.endRequantize = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Requantize.createRequantize = function(builder) { + MNN.Requantize.startRequantize(builder); + return MNN.Requantize.endRequantize(builder); +} + +/** + * @constructor + */ +MNN.TfQuantizedConv2D = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.TfQuantizedConv2D} + */ +MNN.TfQuantizedConv2D.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.TfQuantizedConv2D=} obj + * @returns {MNN.TfQuantizedConv2D} + */ +MNN.TfQuantizedConv2D.getRootAsTfQuantizedConv2D = function(bb, obj) { + return (obj || new MNN.TfQuantizedConv2D).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.TfQuantizedConv2D=} obj + * @returns {MNN.TfQuantizedConv2D} + */ +MNN.TfQuantizedConv2D.getSizePrefixedRootAsTfQuantizedConv2D = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.TfQuantizedConv2D).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.TfQuantizedConv2D.prototype.bias = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.TfQuantizedConv2D.prototype.biasLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.TfQuantizedConv2D.prototype.biasArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {boolean} + */ +MNN.TfQuantizedConv2D.prototype.biasflag = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {MNN.Convolution2DCommon=} obj + * @returns {MNN.Convolution2DCommon|null} + */ +MNN.TfQuantizedConv2D.prototype.common = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new MNN.Convolution2DCommon).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.TfQuantizedConv2D.prototype.weight = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readUint8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +MNN.TfQuantizedConv2D.prototype.weightLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint8Array} + */ +MNN.TfQuantizedConv2D.prototype.weightArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Uint8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.FusedActivation} + */ +MNN.TfQuantizedConv2D.prototype.activationType = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? /** @type {MNN.FusedActivation} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.FusedActivation.kTfLiteActNone; +}; + +/** + * @returns {number} + */ +MNN.TfQuantizedConv2D.prototype.multiplier = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.TfQuantizedConv2D.prototype.outMax = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.TfQuantizedConv2D.prototype.outMin = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.TfQuantizedConv2D.prototype.shift = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam|null} + */ +MNN.TfQuantizedConv2D.prototype.biasQuantizedParam = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? (obj || new MNN.QuantizedParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.TfQuantizedConv2D.prototype.depthMultiplier = function() { + var offset = this.bb.__offset(this.bb_pos, 24); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam|null} + */ +MNN.TfQuantizedConv2D.prototype.filterQuantizedParam = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 26); + return offset ? (obj || new MNN.QuantizedParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam|null} + */ +MNN.TfQuantizedConv2D.prototype.inputQuantizedParam = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 28); + return offset ? (obj || new MNN.QuantizedParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @returns {MNN.ModeFormat} + */ +MNN.TfQuantizedConv2D.prototype.modelFormat = function() { + var offset = this.bb.__offset(this.bb_pos, 30); + return offset ? /** @type {MNN.ModeFormat} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.ModeFormat.TENSORFLOW; +}; + +/** + * @param {MNN.QuantizedParam=} obj + * @returns {MNN.QuantizedParam|null} + */ +MNN.TfQuantizedConv2D.prototype.outputQuantizedParam = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 32); + return offset ? (obj || new MNN.QuantizedParam).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.TfQuantizedConv2D.startTfQuantizedConv2D = function(builder) { + builder.startObject(15); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasOffset + */ +MNN.TfQuantizedConv2D.addBias = function(builder, biasOffset) { + builder.addFieldOffset(0, biasOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.TfQuantizedConv2D.createBiasVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.TfQuantizedConv2D.startBiasVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} biasflag + */ +MNN.TfQuantizedConv2D.addBiasflag = function(builder, biasflag) { + builder.addFieldInt8(1, +biasflag, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} commonOffset + */ +MNN.TfQuantizedConv2D.addCommon = function(builder, commonOffset) { + builder.addFieldOffset(2, commonOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} weightOffset + */ +MNN.TfQuantizedConv2D.addWeight = function(builder, weightOffset) { + builder.addFieldOffset(3, weightOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.TfQuantizedConv2D.createWeightVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.TfQuantizedConv2D.startWeightVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.FusedActivation} activationType + */ +MNN.TfQuantizedConv2D.addActivationType = function(builder, activationType) { + builder.addFieldInt8(4, activationType, MNN.FusedActivation.kTfLiteActNone); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} multiplier + */ +MNN.TfQuantizedConv2D.addMultiplier = function(builder, multiplier) { + builder.addFieldInt32(5, multiplier, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outMax + */ +MNN.TfQuantizedConv2D.addOutMax = function(builder, outMax) { + builder.addFieldInt32(6, outMax, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} outMin + */ +MNN.TfQuantizedConv2D.addOutMin = function(builder, outMin) { + builder.addFieldInt32(7, outMin, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} shift + */ +MNN.TfQuantizedConv2D.addShift = function(builder, shift) { + builder.addFieldInt32(8, shift, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasQuantizedParamOffset + */ +MNN.TfQuantizedConv2D.addBiasQuantizedParam = function(builder, biasQuantizedParamOffset) { + builder.addFieldOffset(9, biasQuantizedParamOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} depthMultiplier + */ +MNN.TfQuantizedConv2D.addDepthMultiplier = function(builder, depthMultiplier) { + builder.addFieldInt32(10, depthMultiplier, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} filterQuantizedParamOffset + */ +MNN.TfQuantizedConv2D.addFilterQuantizedParam = function(builder, filterQuantizedParamOffset) { + builder.addFieldOffset(11, filterQuantizedParamOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputQuantizedParamOffset + */ +MNN.TfQuantizedConv2D.addInputQuantizedParam = function(builder, inputQuantizedParamOffset) { + builder.addFieldOffset(12, inputQuantizedParamOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.ModeFormat} modelFormat + */ +MNN.TfQuantizedConv2D.addModelFormat = function(builder, modelFormat) { + builder.addFieldInt8(13, modelFormat, MNN.ModeFormat.TENSORFLOW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputQuantizedParamOffset + */ +MNN.TfQuantizedConv2D.addOutputQuantizedParam = function(builder, outputQuantizedParamOffset) { + builder.addFieldOffset(14, outputQuantizedParamOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.TfQuantizedConv2D.endTfQuantizedConv2D = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} biasOffset + * @param {boolean} biasflag + * @param {flatbuffers.Offset} commonOffset + * @param {flatbuffers.Offset} weightOffset + * @param {MNN.FusedActivation} activationType + * @param {number} multiplier + * @param {number} outMax + * @param {number} outMin + * @param {number} shift + * @param {flatbuffers.Offset} biasQuantizedParamOffset + * @param {number} depthMultiplier + * @param {flatbuffers.Offset} filterQuantizedParamOffset + * @param {flatbuffers.Offset} inputQuantizedParamOffset + * @param {MNN.ModeFormat} modelFormat + * @param {flatbuffers.Offset} outputQuantizedParamOffset + * @returns {flatbuffers.Offset} + */ +MNN.TfQuantizedConv2D.createTfQuantizedConv2D = function(builder, biasOffset, biasflag, commonOffset, weightOffset, activationType, multiplier, outMax, outMin, shift, biasQuantizedParamOffset, depthMultiplier, filterQuantizedParamOffset, inputQuantizedParamOffset, modelFormat, outputQuantizedParamOffset) { + MNN.TfQuantizedConv2D.startTfQuantizedConv2D(builder); + MNN.TfQuantizedConv2D.addBias(builder, biasOffset); + MNN.TfQuantizedConv2D.addBiasflag(builder, biasflag); + MNN.TfQuantizedConv2D.addCommon(builder, commonOffset); + MNN.TfQuantizedConv2D.addWeight(builder, weightOffset); + MNN.TfQuantizedConv2D.addActivationType(builder, activationType); + MNN.TfQuantizedConv2D.addMultiplier(builder, multiplier); + MNN.TfQuantizedConv2D.addOutMax(builder, outMax); + MNN.TfQuantizedConv2D.addOutMin(builder, outMin); + MNN.TfQuantizedConv2D.addShift(builder, shift); + MNN.TfQuantizedConv2D.addBiasQuantizedParam(builder, biasQuantizedParamOffset); + MNN.TfQuantizedConv2D.addDepthMultiplier(builder, depthMultiplier); + MNN.TfQuantizedConv2D.addFilterQuantizedParam(builder, filterQuantizedParamOffset); + MNN.TfQuantizedConv2D.addInputQuantizedParam(builder, inputQuantizedParamOffset); + MNN.TfQuantizedConv2D.addModelFormat(builder, modelFormat); + MNN.TfQuantizedConv2D.addOutputQuantizedParam(builder, outputQuantizedParamOffset); + return MNN.TfQuantizedConv2D.endTfQuantizedConv2D(builder); +} + +/** + * @constructor + */ +MNN.GpuBuffer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.GpuBuffer} + */ +MNN.GpuBuffer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GpuBuffer=} obj + * @returns {MNN.GpuBuffer} + */ +MNN.GpuBuffer.getRootAsGpuBuffer = function(bb, obj) { + return (obj || new MNN.GpuBuffer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GpuBuffer=} obj + * @returns {MNN.GpuBuffer} + */ +MNN.GpuBuffer.getSizePrefixedRootAsGpuBuffer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.GpuBuffer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.ACCESS_TYPE} + */ +MNN.GpuBuffer.prototype.access = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.ACCESS_TYPE} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.ACCESS_TYPE.READ_ONLY; +}; + +/** + * @returns {MNN.STORAGE_TYPE} + */ +MNN.GpuBuffer.prototype.storage = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.STORAGE_TYPE} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.STORAGE_TYPE.BUFFER; +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.GpuBuffer.prototype.content = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.GpuBuffer.startGpuBuffer = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.ACCESS_TYPE} access + */ +MNN.GpuBuffer.addAccess = function(builder, access) { + builder.addFieldInt8(0, access, MNN.ACCESS_TYPE.READ_ONLY); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.STORAGE_TYPE} storage + */ +MNN.GpuBuffer.addStorage = function(builder, storage) { + builder.addFieldInt8(1, storage, MNN.STORAGE_TYPE.BUFFER); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} contentOffset + */ +MNN.GpuBuffer.addContent = function(builder, contentOffset) { + builder.addFieldOffset(2, contentOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.GpuBuffer.endGpuBuffer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.ACCESS_TYPE} access + * @param {MNN.STORAGE_TYPE} storage + * @param {flatbuffers.Offset} contentOffset + * @returns {flatbuffers.Offset} + */ +MNN.GpuBuffer.createGpuBuffer = function(builder, access, storage, contentOffset) { + MNN.GpuBuffer.startGpuBuffer(builder); + MNN.GpuBuffer.addAccess(builder, access); + MNN.GpuBuffer.addStorage(builder, storage); + MNN.GpuBuffer.addContent(builder, contentOffset); + return MNN.GpuBuffer.endGpuBuffer(builder); +} + +/** + * @constructor + */ +MNN.GpuPipeline = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.GpuPipeline} + */ +MNN.GpuPipeline.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GpuPipeline=} obj + * @returns {MNN.GpuPipeline} + */ +MNN.GpuPipeline.getRootAsGpuPipeline = function(bb, obj) { + return (obj || new MNN.GpuPipeline).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GpuPipeline=} obj + * @returns {MNN.GpuPipeline} + */ +MNN.GpuPipeline.getSizePrefixedRootAsGpuPipeline = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.GpuPipeline).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.GpuPipeline.prototype.localSize = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.GpuPipeline.prototype.localSizeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.GpuPipeline.prototype.localSizeArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.GpuPipeline.prototype.key = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.GpuPipeline.prototype.metal = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +MNN.GpuPipeline.prototype.metalLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int8Array} + */ +MNN.GpuPipeline.prototype.metalArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Int8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.GpuPipeline.prototype.vulkan = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +MNN.GpuPipeline.prototype.vulkanLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int8Array} + */ +MNN.GpuPipeline.prototype.vulkanArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Int8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.GpuPipeline.prototype.openglComputeShader = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.GpuPipeline.prototype.openclKernel = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.GpuPipeline.startGpuPipeline = function(builder) { + builder.startObject(6); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} localSizeOffset + */ +MNN.GpuPipeline.addLocalSize = function(builder, localSizeOffset) { + builder.addFieldOffset(0, localSizeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuPipeline.createLocalSizeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuPipeline.startLocalSizeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} keyOffset + */ +MNN.GpuPipeline.addKey = function(builder, keyOffset) { + builder.addFieldOffset(1, keyOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} metalOffset + */ +MNN.GpuPipeline.addMetal = function(builder, metalOffset) { + builder.addFieldOffset(2, metalOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuPipeline.createMetalVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuPipeline.startMetalVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} vulkanOffset + */ +MNN.GpuPipeline.addVulkan = function(builder, vulkanOffset) { + builder.addFieldOffset(3, vulkanOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuPipeline.createVulkanVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuPipeline.startVulkanVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} openglComputeShaderOffset + */ +MNN.GpuPipeline.addOpenglComputeShader = function(builder, openglComputeShaderOffset) { + builder.addFieldOffset(4, openglComputeShaderOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} openclKernelOffset + */ +MNN.GpuPipeline.addOpenclKernel = function(builder, openclKernelOffset) { + builder.addFieldOffset(5, openclKernelOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.GpuPipeline.endGpuPipeline = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} localSizeOffset + * @param {flatbuffers.Offset} keyOffset + * @param {flatbuffers.Offset} metalOffset + * @param {flatbuffers.Offset} vulkanOffset + * @param {flatbuffers.Offset} openglComputeShaderOffset + * @param {flatbuffers.Offset} openclKernelOffset + * @returns {flatbuffers.Offset} + */ +MNN.GpuPipeline.createGpuPipeline = function(builder, localSizeOffset, keyOffset, metalOffset, vulkanOffset, openglComputeShaderOffset, openclKernelOffset) { + MNN.GpuPipeline.startGpuPipeline(builder); + MNN.GpuPipeline.addLocalSize(builder, localSizeOffset); + MNN.GpuPipeline.addKey(builder, keyOffset); + MNN.GpuPipeline.addMetal(builder, metalOffset); + MNN.GpuPipeline.addVulkan(builder, vulkanOffset); + MNN.GpuPipeline.addOpenglComputeShader(builder, openglComputeShaderOffset); + MNN.GpuPipeline.addOpenclKernel(builder, openclKernelOffset); + return MNN.GpuPipeline.endGpuPipeline(builder); +} + +/** + * @constructor + */ +MNN.GpuStage = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.GpuStage} + */ +MNN.GpuStage.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GpuStage=} obj + * @returns {MNN.GpuStage} + */ +MNN.GpuStage.getRootAsGpuStage = function(bb, obj) { + return (obj || new MNN.GpuStage).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GpuStage=} obj + * @returns {MNN.GpuStage} + */ +MNN.GpuStage.getSizePrefixedRootAsGpuStage = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.GpuStage).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.GpuStage.prototype.pipeline = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.GpuStage.prototype.groupSize = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.GpuStage.prototype.groupSizeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.GpuStage.prototype.groupSizeArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.GpuStage.prototype.inputIndexes = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.GpuStage.prototype.inputIndexesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.GpuStage.prototype.inputIndexesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.GpuStage.prototype.outputIndexes = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.GpuStage.prototype.outputIndexesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.GpuStage.prototype.outputIndexesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @param {MNN.GpuBuffer=} obj + * @returns {MNN.GpuBuffer} + */ +MNN.GpuStage.prototype.middleBuffer = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new MNN.GpuBuffer).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.GpuStage.prototype.middleBufferLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @param {MNN.GpuBuffer=} obj + * @returns {MNN.GpuBuffer} + */ +MNN.GpuStage.prototype.constBuffer = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? (obj || new MNN.GpuBuffer).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.GpuStage.prototype.constBufferLength = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.GpuStage.prototype.globalSizeIndex = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.GpuStage.prototype.globalSizeDivide = function(index) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.GpuStage.prototype.globalSizeDivideLength = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.GpuStage.prototype.globalSizeDivideArray = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {boolean} + */ +MNN.GpuStage.prototype.requireSize = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.GpuStage.startGpuStage = function(builder) { + builder.startObject(9); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} pipelineOffset + */ +MNN.GpuStage.addPipeline = function(builder, pipelineOffset) { + builder.addFieldOffset(0, pipelineOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} groupSizeOffset + */ +MNN.GpuStage.addGroupSize = function(builder, groupSizeOffset) { + builder.addFieldOffset(1, groupSizeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuStage.createGroupSizeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuStage.startGroupSizeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputIndexesOffset + */ +MNN.GpuStage.addInputIndexes = function(builder, inputIndexesOffset) { + builder.addFieldOffset(2, inputIndexesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuStage.createInputIndexesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuStage.startInputIndexesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputIndexesOffset + */ +MNN.GpuStage.addOutputIndexes = function(builder, outputIndexesOffset) { + builder.addFieldOffset(3, outputIndexesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuStage.createOutputIndexesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuStage.startOutputIndexesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} middleBufferOffset + */ +MNN.GpuStage.addMiddleBuffer = function(builder, middleBufferOffset) { + builder.addFieldOffset(4, middleBufferOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuStage.createMiddleBufferVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuStage.startMiddleBufferVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} constBufferOffset + */ +MNN.GpuStage.addConstBuffer = function(builder, constBufferOffset) { + builder.addFieldOffset(5, constBufferOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuStage.createConstBufferVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuStage.startConstBufferVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} globalSizeIndex + */ +MNN.GpuStage.addGlobalSizeIndex = function(builder, globalSizeIndex) { + builder.addFieldInt32(6, globalSizeIndex, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} globalSizeDivideOffset + */ +MNN.GpuStage.addGlobalSizeDivide = function(builder, globalSizeDivideOffset) { + builder.addFieldOffset(7, globalSizeDivideOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuStage.createGlobalSizeDivideVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuStage.startGlobalSizeDivideVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} requireSize + */ +MNN.GpuStage.addRequireSize = function(builder, requireSize) { + builder.addFieldInt8(8, +requireSize, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.GpuStage.endGpuStage = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} pipelineOffset + * @param {flatbuffers.Offset} groupSizeOffset + * @param {flatbuffers.Offset} inputIndexesOffset + * @param {flatbuffers.Offset} outputIndexesOffset + * @param {flatbuffers.Offset} middleBufferOffset + * @param {flatbuffers.Offset} constBufferOffset + * @param {number} globalSizeIndex + * @param {flatbuffers.Offset} globalSizeDivideOffset + * @param {boolean} requireSize + * @returns {flatbuffers.Offset} + */ +MNN.GpuStage.createGpuStage = function(builder, pipelineOffset, groupSizeOffset, inputIndexesOffset, outputIndexesOffset, middleBufferOffset, constBufferOffset, globalSizeIndex, globalSizeDivideOffset, requireSize) { + MNN.GpuStage.startGpuStage(builder); + MNN.GpuStage.addPipeline(builder, pipelineOffset); + MNN.GpuStage.addGroupSize(builder, groupSizeOffset); + MNN.GpuStage.addInputIndexes(builder, inputIndexesOffset); + MNN.GpuStage.addOutputIndexes(builder, outputIndexesOffset); + MNN.GpuStage.addMiddleBuffer(builder, middleBufferOffset); + MNN.GpuStage.addConstBuffer(builder, constBufferOffset); + MNN.GpuStage.addGlobalSizeIndex(builder, globalSizeIndex); + MNN.GpuStage.addGlobalSizeDivide(builder, globalSizeDivideOffset); + MNN.GpuStage.addRequireSize(builder, requireSize); + return MNN.GpuStage.endGpuStage(builder); +} + +/** + * @constructor + */ +MNN.GpuFunction = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.GpuFunction} + */ +MNN.GpuFunction.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GpuFunction=} obj + * @returns {MNN.GpuFunction} + */ +MNN.GpuFunction.getRootAsGpuFunction = function(bb, obj) { + return (obj || new MNN.GpuFunction).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GpuFunction=} obj + * @returns {MNN.GpuFunction} + */ +MNN.GpuFunction.getSizePrefixedRootAsGpuFunction = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.GpuFunction).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @param {MNN.GpuStage=} obj + * @returns {MNN.GpuStage} + */ +MNN.GpuFunction.prototype.stags = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new MNN.GpuStage).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.GpuFunction.prototype.stagsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.GpuFunction.prototype.name = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.GpuFunction.startGpuFunction = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} stagsOffset + */ +MNN.GpuFunction.addStags = function(builder, stagsOffset) { + builder.addFieldOffset(0, stagsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuFunction.createStagsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuFunction.startStagsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + */ +MNN.GpuFunction.addName = function(builder, nameOffset) { + builder.addFieldOffset(1, nameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.GpuFunction.endGpuFunction = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} stagsOffset + * @param {flatbuffers.Offset} nameOffset + * @returns {flatbuffers.Offset} + */ +MNN.GpuFunction.createGpuFunction = function(builder, stagsOffset, nameOffset) { + MNN.GpuFunction.startGpuFunction(builder); + MNN.GpuFunction.addStags(builder, stagsOffset); + MNN.GpuFunction.addName(builder, nameOffset); + return MNN.GpuFunction.endGpuFunction(builder); +} + +/** + * @constructor + */ +MNN.GpuLibrary = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.GpuLibrary} + */ +MNN.GpuLibrary.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GpuLibrary=} obj + * @returns {MNN.GpuLibrary} + */ +MNN.GpuLibrary.getRootAsGpuLibrary = function(bb, obj) { + return (obj || new MNN.GpuLibrary).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.GpuLibrary=} obj + * @returns {MNN.GpuLibrary} + */ +MNN.GpuLibrary.getSizePrefixedRootAsGpuLibrary = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.GpuLibrary).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @param {MNN.GpuFunction=} obj + * @returns {MNN.GpuFunction} + */ +MNN.GpuLibrary.prototype.functions = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new MNN.GpuFunction).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.GpuLibrary.prototype.functionsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @param {MNN.GpuPipeline=} obj + * @returns {MNN.GpuPipeline} + */ +MNN.GpuLibrary.prototype.pipeline = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new MNN.GpuPipeline).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.GpuLibrary.prototype.pipelineLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.GpuLibrary.prototype.name = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.GpuLibrary.startGpuLibrary = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} functionsOffset + */ +MNN.GpuLibrary.addFunctions = function(builder, functionsOffset) { + builder.addFieldOffset(0, functionsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuLibrary.createFunctionsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuLibrary.startFunctionsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} pipelineOffset + */ +MNN.GpuLibrary.addPipeline = function(builder, pipelineOffset) { + builder.addFieldOffset(1, pipelineOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.GpuLibrary.createPipelineVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.GpuLibrary.startPipelineVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + */ +MNN.GpuLibrary.addName = function(builder, nameOffset) { + builder.addFieldOffset(2, nameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.GpuLibrary.endGpuLibrary = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} functionsOffset + * @param {flatbuffers.Offset} pipelineOffset + * @param {flatbuffers.Offset} nameOffset + * @returns {flatbuffers.Offset} + */ +MNN.GpuLibrary.createGpuLibrary = function(builder, functionsOffset, pipelineOffset, nameOffset) { + MNN.GpuLibrary.startGpuLibrary(builder); + MNN.GpuLibrary.addFunctions(builder, functionsOffset); + MNN.GpuLibrary.addPipeline(builder, pipelineOffset); + MNN.GpuLibrary.addName(builder, nameOffset); + return MNN.GpuLibrary.endGpuLibrary(builder); +} + +/** + * @constructor + */ +MNN.TensorConvertInfo = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.TensorConvertInfo} + */ +MNN.TensorConvertInfo.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.TensorConvertInfo=} obj + * @returns {MNN.TensorConvertInfo} + */ +MNN.TensorConvertInfo.getRootAsTensorConvertInfo = function(bb, obj) { + return (obj || new MNN.TensorConvertInfo).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.TensorConvertInfo=} obj + * @returns {MNN.TensorConvertInfo} + */ +MNN.TensorConvertInfo.getSizePrefixedRootAsTensorConvertInfo = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.TensorConvertInfo).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {MNN.MNN_DATA_FORMAT} + */ +MNN.TensorConvertInfo.prototype.source = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {MNN.MNN_DATA_FORMAT} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.MNN_DATA_FORMAT.NCHW; +}; + +/** + * @returns {MNN.MNN_DATA_FORMAT} + */ +MNN.TensorConvertInfo.prototype.dest = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.MNN_DATA_FORMAT} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.MNN_DATA_FORMAT.NCHW; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.TensorConvertInfo.startTensorConvertInfo = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.MNN_DATA_FORMAT} source + */ +MNN.TensorConvertInfo.addSource = function(builder, source) { + builder.addFieldInt8(0, source, MNN.MNN_DATA_FORMAT.NCHW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.MNN_DATA_FORMAT} dest + */ +MNN.TensorConvertInfo.addDest = function(builder, dest) { + builder.addFieldInt8(1, dest, MNN.MNN_DATA_FORMAT.NCHW); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.TensorConvertInfo.endTensorConvertInfo = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.MNN_DATA_FORMAT} source + * @param {MNN.MNN_DATA_FORMAT} dest + * @returns {flatbuffers.Offset} + */ +MNN.TensorConvertInfo.createTensorConvertInfo = function(builder, source, dest) { + MNN.TensorConvertInfo.startTensorConvertInfo(builder); + MNN.TensorConvertInfo.addSource(builder, source); + MNN.TensorConvertInfo.addDest(builder, dest); + return MNN.TensorConvertInfo.endTensorConvertInfo(builder); +} + +/** + * @constructor + */ +MNN.Plugin = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Plugin} + */ +MNN.Plugin.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Plugin=} obj + * @returns {MNN.Plugin} + */ +MNN.Plugin.getRootAsPlugin = function(bb, obj) { + return (obj || new MNN.Plugin).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Plugin=} obj + * @returns {MNN.Plugin} + */ +MNN.Plugin.getSizePrefixedRootAsPlugin = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Plugin).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.Plugin.prototype.type = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {number} index + * @param {MNN.Attribute=} obj + * @returns {MNN.Attribute} + */ +MNN.Plugin.prototype.attr = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new MNN.Attribute).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.Plugin.prototype.attrLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Plugin.startPlugin = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} typeOffset + */ +MNN.Plugin.addType = function(builder, typeOffset) { + builder.addFieldOffset(0, typeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} attrOffset + */ +MNN.Plugin.addAttr = function(builder, attrOffset) { + builder.addFieldOffset(1, attrOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Plugin.createAttrVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Plugin.startAttrVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Plugin.endPlugin = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} typeOffset + * @param {flatbuffers.Offset} attrOffset + * @returns {flatbuffers.Offset} + */ +MNN.Plugin.createPlugin = function(builder, typeOffset, attrOffset) { + MNN.Plugin.startPlugin(builder); + MNN.Plugin.addType(builder, typeOffset); + MNN.Plugin.addAttr(builder, attrOffset); + return MNN.Plugin.endPlugin(builder); +} + +/** + * @constructor + */ +MNN.Extra = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Extra} + */ +MNN.Extra.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Extra=} obj + * @returns {MNN.Extra} + */ +MNN.Extra.getRootAsExtra = function(bb, obj) { + return (obj || new MNN.Extra).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Extra=} obj + * @returns {MNN.Extra} + */ +MNN.Extra.getSizePrefixedRootAsExtra = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Extra).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.Extra.prototype.type = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.Extra.prototype.engine = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Extra.prototype.info = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +MNN.Extra.prototype.infoLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int8Array} + */ +MNN.Extra.prototype.infoArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Int8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @param {MNN.Attribute=} obj + * @returns {MNN.Attribute} + */ +MNN.Extra.prototype.attr = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new MNN.Attribute).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.Extra.prototype.attrLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Extra.startExtra = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} typeOffset + */ +MNN.Extra.addType = function(builder, typeOffset) { + builder.addFieldOffset(0, typeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} engineOffset + */ +MNN.Extra.addEngine = function(builder, engineOffset) { + builder.addFieldOffset(1, engineOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} infoOffset + */ +MNN.Extra.addInfo = function(builder, infoOffset) { + builder.addFieldOffset(2, infoOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Extra.createInfoVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Extra.startInfoVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} attrOffset + */ +MNN.Extra.addAttr = function(builder, attrOffset) { + builder.addFieldOffset(3, attrOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Extra.createAttrVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Extra.startAttrVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Extra.endExtra = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} typeOffset + * @param {flatbuffers.Offset} engineOffset + * @param {flatbuffers.Offset} infoOffset + * @param {flatbuffers.Offset} attrOffset + * @returns {flatbuffers.Offset} + */ +MNN.Extra.createExtra = function(builder, typeOffset, engineOffset, infoOffset, attrOffset) { + MNN.Extra.startExtra(builder); + MNN.Extra.addType(builder, typeOffset); + MNN.Extra.addEngine(builder, engineOffset); + MNN.Extra.addInfo(builder, infoOffset); + MNN.Extra.addAttr(builder, attrOffset); + return MNN.Extra.endExtra(builder); +} + +/** + * @constructor + */ +MNN.Op = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Op} + */ +MNN.Op.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Op=} obj + * @returns {MNN.Op} + */ +MNN.Op.getRootAsOp = function(bb, obj) { + return (obj || new MNN.Op).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Op=} obj + * @returns {MNN.Op} + */ +MNN.Op.getSizePrefixedRootAsOp = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Op).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Op.prototype.inputIndexes = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Op.prototype.inputIndexesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Op.prototype.inputIndexesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.OpParameter} + */ +MNN.Op.prototype.mainType = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {MNN.OpParameter} */ (this.bb.readUint8(this.bb_pos + offset)) : MNN.OpParameter.NONE; +}; + +/** + * @param {flatbuffers.Table} obj + * @returns {?flatbuffers.Table} + */ +MNN.Op.prototype.main = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__union(obj, this.bb_pos + offset) : null; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.Op.prototype.name = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +MNN.Op.prototype.outputIndexes = function(index) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +MNN.Op.prototype.outputIndexesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +MNN.Op.prototype.outputIndexesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {MNN.OpType} + */ +MNN.Op.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? /** @type {MNN.OpType} */ (this.bb.readInt32(this.bb_pos + offset)) : MNN.OpType.AbsVal; +}; + +/** + * @returns {MNN.MNN_DATA_FORMAT} + */ +MNN.Op.prototype.defaultDimentionFormat = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? /** @type {MNN.MNN_DATA_FORMAT} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.MNN_DATA_FORMAT.NHWC; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Op.startOp = function(builder) { + builder.startObject(7); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputIndexesOffset + */ +MNN.Op.addInputIndexes = function(builder, inputIndexesOffset) { + builder.addFieldOffset(0, inputIndexesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Op.createInputIndexesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Op.startInputIndexesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.OpParameter} mainType + */ +MNN.Op.addMainType = function(builder, mainType) { + builder.addFieldInt8(1, mainType, MNN.OpParameter.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} mainOffset + */ +MNN.Op.addMain = function(builder, mainOffset) { + builder.addFieldOffset(2, mainOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + */ +MNN.Op.addName = function(builder, nameOffset) { + builder.addFieldOffset(3, nameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputIndexesOffset + */ +MNN.Op.addOutputIndexes = function(builder, outputIndexesOffset) { + builder.addFieldOffset(4, outputIndexesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Op.createOutputIndexesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Op.startOutputIndexesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.OpType} type + */ +MNN.Op.addType = function(builder, type) { + builder.addFieldInt32(5, type, MNN.OpType.AbsVal); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.MNN_DATA_FORMAT} defaultDimentionFormat + */ +MNN.Op.addDefaultDimentionFormat = function(builder, defaultDimentionFormat) { + builder.addFieldInt8(6, defaultDimentionFormat, MNN.MNN_DATA_FORMAT.NHWC); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Op.endOp = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputIndexesOffset + * @param {MNN.OpParameter} mainType + * @param {flatbuffers.Offset} mainOffset + * @param {flatbuffers.Offset} nameOffset + * @param {flatbuffers.Offset} outputIndexesOffset + * @param {MNN.OpType} type + * @param {MNN.MNN_DATA_FORMAT} defaultDimentionFormat + * @returns {flatbuffers.Offset} + */ +MNN.Op.createOp = function(builder, inputIndexesOffset, mainType, mainOffset, nameOffset, outputIndexesOffset, type, defaultDimentionFormat) { + MNN.Op.startOp(builder); + MNN.Op.addInputIndexes(builder, inputIndexesOffset); + MNN.Op.addMainType(builder, mainType); + MNN.Op.addMain(builder, mainOffset); + MNN.Op.addName(builder, nameOffset); + MNN.Op.addOutputIndexes(builder, outputIndexesOffset); + MNN.Op.addType(builder, type); + MNN.Op.addDefaultDimentionFormat(builder, defaultDimentionFormat); + return MNN.Op.endOp(builder); +} + +/** + * @constructor + */ +MNN.TensorDescribe = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.TensorDescribe} + */ +MNN.TensorDescribe.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.TensorDescribe=} obj + * @returns {MNN.TensorDescribe} + */ +MNN.TensorDescribe.getRootAsTensorDescribe = function(bb, obj) { + return (obj || new MNN.TensorDescribe).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.TensorDescribe=} obj + * @returns {MNN.TensorDescribe} + */ +MNN.TensorDescribe.getSizePrefixedRootAsTensorDescribe = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.TensorDescribe).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {MNN.Blob=} obj + * @returns {MNN.Blob|null} + */ +MNN.TensorDescribe.prototype.blob = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new MNN.Blob).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.TensorDescribe.prototype.index = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.TensorDescribe.prototype.name = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.TensorDescribe.startTensorDescribe = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} blobOffset + */ +MNN.TensorDescribe.addBlob = function(builder, blobOffset) { + builder.addFieldOffset(0, blobOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} index + */ +MNN.TensorDescribe.addIndex = function(builder, index) { + builder.addFieldInt32(1, index, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + */ +MNN.TensorDescribe.addName = function(builder, nameOffset) { + builder.addFieldOffset(2, nameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.TensorDescribe.endTensorDescribe = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} blobOffset + * @param {number} index + * @param {flatbuffers.Offset} nameOffset + * @returns {flatbuffers.Offset} + */ +MNN.TensorDescribe.createTensorDescribe = function(builder, blobOffset, index, nameOffset) { + MNN.TensorDescribe.startTensorDescribe(builder); + MNN.TensorDescribe.addBlob(builder, blobOffset); + MNN.TensorDescribe.addIndex(builder, index); + MNN.TensorDescribe.addName(builder, nameOffset); + return MNN.TensorDescribe.endTensorDescribe(builder); +} + +/** + * @constructor + */ +MNN.Net = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {MNN.Net} + */ +MNN.Net.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Net=} obj + * @returns {MNN.Net} + */ +MNN.Net.getRootAsNet = function(bb, obj) { + return (obj || new MNN.Net).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {MNN.Net=} obj + * @returns {MNN.Net} + */ +MNN.Net.getSizePrefixedRootAsNet = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new MNN.Net).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +MNN.Net.prototype.bizCode = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {number} index + * @param {MNN.TensorDescribe=} obj + * @returns {MNN.TensorDescribe} + */ +MNN.Net.prototype.extraTensorDescribe = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new MNN.TensorDescribe).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.Net.prototype.extraTensorDescribeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {MNN.GpuLibrary=} obj + * @returns {MNN.GpuLibrary|null} + */ +MNN.Net.prototype.gpulibrary = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new MNN.GpuLibrary).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {number} index + * @param {MNN.Op=} obj + * @returns {MNN.Op} + */ +MNN.Net.prototype.oplists = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new MNN.Op).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +MNN.Net.prototype.oplistsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array} + */ +MNN.Net.prototype.outputName = function(index, optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__string(this.bb.__vector(this.bb_pos + offset) + index * 4, optionalEncoding) : null; +}; + +/** + * @returns {number} + */ +MNN.Net.prototype.outputNameLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {MNN.ForwardType} + */ +MNN.Net.prototype.preferForwardType = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? /** @type {MNN.ForwardType} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.ForwardType.CPU; +}; + +/** + * @returns {MNN.NetSource} + */ +MNN.Net.prototype.sourceType = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? /** @type {MNN.NetSource} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.NetSource.CAFFE; +}; + +/** + * @param {number} index + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array} + */ +MNN.Net.prototype.tensorName = function(index, optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.__string(this.bb.__vector(this.bb_pos + offset) + index * 4, optionalEncoding) : null; +}; + +/** + * @returns {number} + */ +MNN.Net.prototype.tensorNameLength = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +MNN.Net.prototype.tensorNumber = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {MNN.Usage} + */ +MNN.Net.prototype.usage = function() { + var offset = this.bb.__offset(this.bb_pos, 22); + return offset ? /** @type {MNN.Usage} */ (this.bb.readInt8(this.bb_pos + offset)) : MNN.Usage.INFERENCE; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +MNN.Net.startNet = function(builder) { + builder.startObject(10); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} bizCodeOffset + */ +MNN.Net.addBizCode = function(builder, bizCodeOffset) { + builder.addFieldOffset(0, bizCodeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} extraTensorDescribeOffset + */ +MNN.Net.addExtraTensorDescribe = function(builder, extraTensorDescribeOffset) { + builder.addFieldOffset(1, extraTensorDescribeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Net.createExtraTensorDescribeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Net.startExtraTensorDescribeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} gpulibraryOffset + */ +MNN.Net.addGpulibrary = function(builder, gpulibraryOffset) { + builder.addFieldOffset(2, gpulibraryOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} oplistsOffset + */ +MNN.Net.addOplists = function(builder, oplistsOffset) { + builder.addFieldOffset(3, oplistsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Net.createOplistsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Net.startOplistsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputNameOffset + */ +MNN.Net.addOutputName = function(builder, outputNameOffset) { + builder.addFieldOffset(4, outputNameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Net.createOutputNameVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Net.startOutputNameVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.ForwardType} preferForwardType + */ +MNN.Net.addPreferForwardType = function(builder, preferForwardType) { + builder.addFieldInt8(5, preferForwardType, MNN.ForwardType.CPU); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.NetSource} sourceType + */ +MNN.Net.addSourceType = function(builder, sourceType) { + builder.addFieldInt8(6, sourceType, MNN.NetSource.CAFFE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} tensorNameOffset + */ +MNN.Net.addTensorName = function(builder, tensorNameOffset) { + builder.addFieldOffset(7, tensorNameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +MNN.Net.createTensorNameVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +MNN.Net.startTensorNameVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} tensorNumber + */ +MNN.Net.addTensorNumber = function(builder, tensorNumber) { + builder.addFieldInt32(8, tensorNumber, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {MNN.Usage} usage + */ +MNN.Net.addUsage = function(builder, usage) { + builder.addFieldInt8(9, usage, MNN.Usage.INFERENCE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +MNN.Net.endNet = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} offset + */ +MNN.Net.finishNetBuffer = function(builder, offset) { + builder.finish(offset); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} offset + */ +MNN.Net.finishSizePrefixedNetBuffer = function(builder, offset) { + builder.finish(offset, undefined, true); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} bizCodeOffset + * @param {flatbuffers.Offset} extraTensorDescribeOffset + * @param {flatbuffers.Offset} gpulibraryOffset + * @param {flatbuffers.Offset} oplistsOffset + * @param {flatbuffers.Offset} outputNameOffset + * @param {MNN.ForwardType} preferForwardType + * @param {MNN.NetSource} sourceType + * @param {flatbuffers.Offset} tensorNameOffset + * @param {number} tensorNumber + * @param {MNN.Usage} usage + * @returns {flatbuffers.Offset} + */ +MNN.Net.createNet = function(builder, bizCodeOffset, extraTensorDescribeOffset, gpulibraryOffset, oplistsOffset, outputNameOffset, preferForwardType, sourceType, tensorNameOffset, tensorNumber, usage) { + MNN.Net.startNet(builder); + MNN.Net.addBizCode(builder, bizCodeOffset); + MNN.Net.addExtraTensorDescribe(builder, extraTensorDescribeOffset); + MNN.Net.addGpulibrary(builder, gpulibraryOffset); + MNN.Net.addOplists(builder, oplistsOffset); + MNN.Net.addOutputName(builder, outputNameOffset); + MNN.Net.addPreferForwardType(builder, preferForwardType); + MNN.Net.addSourceType(builder, sourceType); + MNN.Net.addTensorName(builder, tensorNameOffset); + MNN.Net.addTensorNumber(builder, tensorNumber); + MNN.Net.addUsage(builder, usage); + return MNN.Net.endNet(builder); +} + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports = { mnn_schema: MNN }; +} diff --git a/frontend/packages/core/public/netron/mnn.js b/frontend/packages/core/public/netron/mnn.js new file mode 100644 index 00000000..c018d97e --- /dev/null +++ b/frontend/packages/core/public/netron/mnn.js @@ -0,0 +1,627 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var mnn = mnn || {}; +var base = base || require('./base'); +var flatbuffers = flatbuffers || require('flatbuffers').flatbuffers; + +mnn.ModelFactory = class { + + match(context) { + const extension = context.identifier.split('.').pop().toLowerCase(); + if (extension == 'mnn') { + return true; + } + return false; + } + + open(context, host) { + return host.require('./mnn-schema').then((schema) => { + return mnn.Metadata.open(host).then((metadata) => { + const identifier = context.identifier; + try { + mnn.schema = schema.mnn_schema; + const byteBuffer = new flatbuffers.ByteBuffer(context.buffer); + const net = mnn.schema.Net.getRootAsNet(byteBuffer); + return new mnn.Model(metadata, net); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new mnn.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + }); + } +}; + +mnn.Model = class { + + constructor(metadata, net) { + switch (net.sourceType()) { + case mnn.schema.NetSource.CAFFE: this._source = 'Caffe'; break; + case mnn.schema.NetSource.TENSORFLOW: this._source = 'TensorFlow'; break; + case mnn.schema.NetSource.TFLITE: this._source = 'TensorFlow Lite'; break; + case mnn.schema.NetSource.ONNX: this._source = 'ONNX'; break; + } + this._graphs = [ new mnn.Graph(metadata, net) ]; + } + + get format() { + return 'MNN v2'; + } + + get source() { + return this._source || ''; + } + + get graphs() { + return this._graphs; + } +}; + +mnn.Graph = class { + + constructor(metadata, net) { + this._nodes = []; + this._inputs = []; + this._outputs = []; + let inputSet = new Set(); + for (let i = 0; i < net.oplistsLength(); i++) { + const op = net.oplists(i); + if (mnn.schema.OpTypeName[op.type()] === 'Input') { + let args = []; + for (let j = 0; j < op.outputIndexesLength(); j++) { + const index = op.outputIndexes(j); + const name = net.tensorName(index); + const extraTensorDescribe = net.extraTensorDescribe(index); + const blob = extraTensorDescribe ? extraTensorDescribe.blob() : null; + const type = blob ? mnn.Graph._blobTensorType(blob) : null; + args.push(new mnn.Argument(name, type, null)); + } + this._inputs.push(new mnn.Parameter(op.name(), true, args)); + } + else { + this._nodes.push(new mnn.Node(metadata, op, net)); + } + for (let k = 0; k < op.inputIndexesLength(); k++) { + const index = op.inputIndexes(k); + inputSet.add(index); + } + } + + for (let i = 0; i < net.tensorNameLength(); i++) { + if (!inputSet.has(i)) { + const name = net.tensorName(i); + const extraTensorDescribe = net.extraTensorDescribe(i); + const blob = extraTensorDescribe ? extraTensorDescribe.blob() : null; + const type = blob ? mnn.Graph._blobTensorType(blob) : null; + this._outputs.push(new mnn.Parameter(name, true, [ + new mnn.Argument(name, type, null) + ])); + } + } + } + + get name() { + return ''; + } + + get groups() { + return false; + } + + get nodes() { + return this._nodes; + } + + get outputs() { + return this._outputs; + } + + get inputs() { + return this._inputs; + } + + static _blobTensorType(blob) { + mnn.Graph._blobTensorTypeMap = mnn.Graph._blobTensorTypeMap || new Map([ + [ mnn.schema.DataType.DT_INVALID, '?' ], + [ mnn.schema.DataType.DT_FLOAT, 'float32' ], + [ mnn.schema.DataType.DT_DOUBLE, 'float64' ], + [ mnn.schema.DataType.DT_INT32, 'int32' ], + [ mnn.schema.DataType.DT_UINT8, 'uint8' ], + [ mnn.schema.DataType.DT_INT16, 'int16' ], + [ mnn.schema.DataType.DT_INT8, 'int8' ], + [ mnn.schema.DataType.DT_STRING, 'string' ], + [ mnn.schema.DataType.DT_COMPLEX64, 'complex64' ], + [ mnn.schema.DataType.DT_INT64, 'int64' ], + [ mnn.schema.DataType.DT_BOOL, 'boolean' ], + [ mnn.schema.DataType.DT_QINT8, 'qint8' ], + [ mnn.schema.DataType.DT_QUINT8, 'quint8' ], + [ mnn.schema.DataType.DT_QINT32, 'qint32' ], + [ mnn.schema.DataType.DT_BFLOAT16, 'bfloat16' ], + [ mnn.schema.DataType.DT_QINT16, 'qint16' ], + [ mnn.schema.DataType.DT_QUINT16, 'quint16' ], + [ mnn.schema.DataType.DT_UINT16, 'uint16' ], + [ mnn.schema.DataType.DT_COMPLEX128, 'complex128' ], + [ mnn.schema.DataType.DT_HALF, 'float16' ], + [ mnn.schema.DataType.DT_RESOURCE, 'resource' ], + [ mnn.schema.DataType.DT_VARIANT, 'variant' ], + ]); + const dataType = mnn.Graph._blobTensorTypeMap.has(blob.dataType()) ? mnn.Graph._blobTensorTypeMap.get(blob.dataType()) : '?'; + const dimensions = blob.dimsArray() || []; + return new mnn.TensorType(dataType, new mnn.TensorShape(dimensions)); + } +}; + +mnn.Node = class { + + constructor(metadata, op, net) { + this._metadata = metadata; + this._type = mnn.schema.OpTypeName[op.type()] || '(' + op.type().toString() + ')'; + this._name = op.name() || ''; + this._attributes = []; + this._inputs = []; + this._outputs = []; + this._chains = []; + let inputs = []; + for (let i = 0; i < op.inputIndexesLength(); i++) { + const index = op.inputIndexes(i); + const id = net.tensorName(index); + inputs.push(new mnn.Argument(id, null, null)); + } + this._inputs.push(new mnn.Parameter('input', true, inputs)); + let outputs = []; + for (let i = 0; i < op.outputIndexesLength(); i++) { + const index = op.outputIndexes(i); + const name = net.tensorName(index); + outputs.push(new mnn.Argument(name, null, null)); + } + this._outputs.push(new mnn.Parameter('output', true, outputs)); + + const parameterType = mnn.schema.OpParameterName[op.mainType()]; + const parameterConstructor = mnn.schema[parameterType]; + if (typeof parameterConstructor === 'function') { + const parameter = op.main(Reflect.construct(parameterConstructor, [])); + if (parameter !== null && parameter instanceof mnn.schema.Blob) { + const type = mnn.Graph._blobTensorType(parameter); + let data = null; + switch (type.dataType) { + case 'int32': data = parameter.int32sArray(); break; + case 'float32': data = parameter.float32sArray(); break; + } + this._inputs.push(new mnn.Parameter('value', true, [ + new mnn.Argument('', null, new mnn.Tensor('Blob', type, data)) + ])); + } + else { + // weights & bias + let invisibleAttributes = null; + switch (parameterType) { + case 'Convolution2D': { + const common = parameter.common(); + const outputCount = common.outputCount(); + const inputCount = common.inputCount(); + const kernelX = common.kernelX(); + const kernelY = common.kernelY(); + this._buildTensor('float32', 'weight', [ outputCount, inputCount, kernelX, kernelY ], parameter.weightArray()); + this._buildTensor('float32', 'bias', [ outputCount ], parameter.biasArray()); + invisibleAttributes = { "weight": true, "bias": true }; + break; + } + case 'InnerProduct': { + const outputCount = parameter.outputCount(); + const inputCount = parameter.weightSize() / outputCount; + this._buildTensor('float32', 'weight', [ outputCount, inputCount ], parameter.weightArray()); + this._buildTensor('float32', 'bias', [ outputCount ], parameter.biasArray()); + invisibleAttributes = { 'weight': true, 'bias': true }; + break; + } + case 'Scale': { + const scaleDataCount = parameter.channels(); + this._buildTensor('float32', 'scale', [ scaleDataCount ], parameter.scaleDataArray()); + this._buildTensor('float32', 'bias', [ scaleDataCount ], parameter.biasDataArray()); + invisibleAttributes = { 'scaleData': true, 'biasData': true }; + break; + } + case 'BatchNorm': { + const channels = parameter.channels(); + this._buildTensor('float32', 'mean', [ channels ], parameter.meanDataArray()); + this._buildTensor('float32', 'slope', [ channels ], parameter.slopeDataArray()); + this._buildTensor('float32', 'variance', [ channels ], parameter.varDataArray()); + this._buildTensor('float32', 'bias', [ channels ], parameter.biasDataArray()); + invisibleAttributes = { 'slopeData': true, 'meanData': true, 'varData': true, 'biasData': true }; + break; + } + case 'PRelu': { + this._buildTensor('float32', 'slope', [ parameter.slopeCount() ], parameter.slopeArray()); + invisibleAttributes = { 'slope': true }; + break; + } + case 'Normalize': { + this._buildTensor('float32', 'scale', [ parameter.scaleLength() ], parameter.scaleArray()); + invisibleAttributes = { 'scale': true }; + break; + } + } + this._recursivelyBuildAttributes(metadata, net, parameter, parameterType, invisibleAttributes, this._attributes); + } + } + } + + _buildTensor(dataType, name, dimensions, value) { + this._inputs.push(new mnn.Parameter(name, true, [ + new mnn.Argument('', null, new mnn.Tensor('Weight', new mnn.TensorType(dataType, new mnn.TensorShape(dimensions)), value)) + ])); + } + + _recursivelyBuildAttributes(metadata, net, parameter, paramterType, invisibleAttributes, attributeHolders) { + if (!parameter) return; + + let attributeNames = []; + let attributeNamesMap = {}; + for (const attributeName of Object.keys(Object.getPrototypeOf(parameter))) { + if (attributeName != '__init') { + attributeNames.push(attributeName); + } + attributeNamesMap[attributeName] = true; + } + + let attributeArrayNamesMap = {}; + for (const attributeName of Object.keys(attributeNamesMap)) { + if (attributeNamesMap[attributeName + 'Length']) { // some bugs without array + attributeArrayNamesMap[attributeName] = true; + attributeNames = attributeNames.filter((item) => item != (attributeName + 'Array') && item != (attributeName + 'Length')); + } + } + + for (const attributeName of attributeNames) { + + if (invisibleAttributes && invisibleAttributes[attributeName]) { + continue; + } + + if (parameter[attributeName] && typeof parameter[attributeName] == 'function') { + let value = null; + if (attributeArrayNamesMap[attributeName]) { + let array = []; + const length = parameter[attributeName + 'Length'](); + for (let i = 0; i < length; i++) { + array.push(parameter[attributeName](i)); + } + value = array; + } + else { + value = parameter[attributeName](); + if (typeof value === 'object') { + let name = null; + for (const key of Object.getOwnPropertyNames(mnn.schema)) { + const type = mnn.schema[key]; + if (typeof type === "function" && value instanceof type) { + name = key; + break; + } + } + this._recursivelyBuildAttributes(metadata, net, value, name, null, attributeHolders); + value = null; + } + } + + if (value != null) { + const schema = metadata.attribute(this.type, attributeName); + attributeHolders.push(new mnn.Attribute(schema, attributeName, value)); + } + } + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get domain() { + return null; + } + + get metadata() { + return this._metadata.type(this.type); + } + + get group() { + return null; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get chain() { + return this._chains; + } + + get attributes() { + return this._attributes; + } +}; + +mnn.Attribute = class { + + constructor(schema, name, value, visible) { + this._type = null; + this._value = value; + this._name = name; + this._visible = visible; + if (schema) { + if (schema.type) { + this._type = schema.type; + const type = mnn.schema[this._type + 'Name']; + if (type) { + this._value = type[this._value]; + } + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +mnn.Parameter = class { + + constructor(name, visible, args) { + this._name = name; + this._visible = visible; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get arguments() { + return this._arguments; + } +}; + +mnn.Argument = class { + + constructor(name, type, initializer) { + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +mnn.Tensor = class { + + constructor(kind, type, data) { + this._kind = kind; + this._type = type; + this._data = data; + } + + get kind() { + return this._kind; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state; + } + + get value() { + let context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + let context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + let context = {}; + context.state = null; + if (!this._data) { + context.state = 'Tensor data is empty.'; + return context; + } + context.index = 0; + context.count = 0; + context.dataType = this._type.dataType; + context.dimensions = this._type.shape.dimensions; + context.data = this._dataType; + return context; + } + + _decode(context, dimension) { + let shape = context.dimensions; + if (shape.length == 0) { + shape = [ 1 ]; + } + let results = []; + let size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._data[context.index]); + context.index++; + context.count++; + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + if (context.dimensions.length == 0) { + return results[0]; + } + return results; + } +}; + +mnn.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType || '?'; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +mnn.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (this._dimensions && this._dimensions.length > 0) { + return '[' + this._dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',') + ']'; + } + return ''; + } +}; + +mnn.Metadata = class { + + static open(host) { + if (mnn.Metadata._metadata) { + return Promise.resolve(mnn.Metadata._metadata); + } + return host.request(null, 'mnn-metadata.json', 'utf-8').then((data) => { + mnn.Metadata._metadata = new mnn.Metadata(data); + return mnn.Metadata._metadata; + }).catch(() => { + mnn.Metadata._metadata = new mnn.Metadata(null); + return mnn.Metadata._metadata; + }); + } + + constructor(data) { + this._map = new Map(); + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map.set(item.name, item.schema); + } + } + } + } + } + + type(name) { + return this._map.has(name) ? this._map.get(name) : null; + } + + attribute(type, name) { + const schema = this.type(type); + if (schema) { + let attributeMap = schema.attributeMap; + if (!attributeMap) { + attributeMap = {}; + if (schema.attributes) { + for (const attribute of schema.attributes) { + attributeMap[attribute.name] = attribute; + } + } + schema.attributeMap = attributeMap; + } + const attributeSchema = attributeMap[name]; + if (attributeSchema) { + return attributeSchema; + } + } + return null; + } +}; + +mnn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading MNN model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = mnn.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/mxnet-metadata.json b/frontend/packages/core/public/netron/mxnet-metadata.json new file mode 100644 index 00000000..9651d1b6 --- /dev/null +++ b/frontend/packages/core/public/netron/mxnet-metadata.json @@ -0,0 +1,871 @@ +[ + { + "name": "Convolution", + "schema": { + "attributes": [ + { + "default": false, + "name": "cudnn_off", + "type": "boolean" + }, + { + "default": "off", + "name": "cudnn_tune" + }, + { + "default": [ 1, null ], + "name": "dilate", + "type": "int32[]" + }, + { + "name": "kernel", + "type": "int32[]" + }, + { + "visible": false, + "name": "no_bias", + "type": "boolean" + }, + { + "type": "int32", + "default": 1, + "name": "num_group" + }, + { + "type": "int32", + "name": "num_filter" + }, + { + "default": [0, null], + "name": "pad", + "type": "int32[]" + }, + { + "default": [1, null], + "name": "stride", + "type": "int32[]" + }, + { + "type": "int32", + "default": "1024", + "name": "workspace" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "weight" + }, + { + "name": "bias", + "option": "optional" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "Deconvolution", + "schema": { + "attributes": [ + { + "visible": false, + "name": "no_bias" + }, + { + "default": "1", + "name": "num_group" + }, + { + "type": "int32", + "default": "1024", + "name": "workspace" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "weight" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "FullyConnected", + "schema": { + "attributes": [ + { + "type": "boolean", + "default": true, + "name": "flatten" + }, + { + "type": "boolean", + "visible": false, + "name": "no_bias" + }, + { + "type": "int32", + "name": "num_hidden" + } + + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "weight" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "Dropout", + "schema": { + "category": "Dropout", + "attributes": [ + { + "type": "float32", + "default": 0.5, + "name": "p" + } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "LRN", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0.0001 }, + { "name": "beta", "type": "float32", "default": 0.75 }, + { "name": "knorm", "type": "float32", "default": 2 }, + { "name": "nsize", "type": "int32" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "SoftmaxOutput", + "schema": { + "attributes": [ + { + "default": "1", + "name": "grad_scale" + }, + { + "default": "-1", + "name": "ignore_label" + }, + { + "default": false, + "name": "multi_output" + }, + { + "default": "null", + "name": "normalization" + }, + { + "default": false, + "name": "out_grad" + }, + { + "default": "0", + "name": "smooth_alpha" + }, + { + "default": false, + "name": "use_ignore" + }, + { + "default": false, + "name": "preserve_shape" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "input" + }, + { + "name": "label" + } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "SoftmaxActivation", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "LeakyReLU", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Activation", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Pooling", + "schema": { + "attributes": [ + { + "default": false, + "name": "cudnn_off" + }, + { + "default": false, + "name": "global_pool" + }, + { + "name": "kernel", + "type": "int32[]" + }, + { + "default": [ 0, null ], + "name": "pad", + "type": "int32[]" + }, + { + "default": "valid", + "name": "pooling_convention" + }, + { + "default": "max", + "name": "pool_type" + }, + { + "default": [ 1, null ], + "name": "stride", + "type": "int32[]" + } + ], + "category": "Pool", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Flatten", + "schema": { + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Concat", + "schema": { + "attributes": [ + { + "default": "1", + "name": "dim" + }, + { + "visible": false, + "name": "num_args" + } + ], + "category": "Tensor", + "inputs": [ + { + "name": "inputs", + "option": "variadic" + } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "SliceChannel", + "schema": { + "inputs": [ + { + "name": "inputs" + } + ], + "outputs": [ + { "name": "outputs", "option": "variadic" } + ] + } + }, + { + "name": "_Plus", + "schema": { + "inputs": [ + { + "name": "inputs", + "option": "variadic" + } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "elemwise_add", + "schema": { + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + } + }, + { + "name": "elemwise_sub", + "schema": { + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + } + }, + { + "name": "elemwise_div", + "schema": { + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + } + }, + { + "name": "BatchNorm", + "schema": { + "attributes": [ + { + "type": "int32", + "default": 1, + "name": "axis" + }, + { + "type": "float64", + "default": 0.001, + "name": "eps" + }, + { + "type": "float32", + "default": 0.9, + "name": "momentum" + }, + { + "type": "boolean", + "default": true, + "name": "fix_gamma" + }, + { + "type": "boolean", + "default": false, + "name": "use_global_stats" + } + ], + "category": "Normalization", + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" }, + { "name": "mean" }, + { "name": "variance" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "CuDNNBatchNorm", + "schema": { + "category": "Normalization", + "inputs": [ + { + "name": "input" + }, + { + "name": "gamma" + }, + { + "name": "beta" + } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "ElementWiseSum", + "schema": { + "category": "Normalization", + "inputs": [ + { + "name": "inputs", + "option": "variadic" + } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Embedding", + "schema": { + "category": "Transform", + "attributes": [ + { + "type": "int32", + "name": "input_dim" + }, + { + "type": "int32", + "name": "output_dim" + } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "RNN", + "schema": { + "category": "Layer", + "attributes": [ + { + "type": "boolean", + "name": "bidirectional", + "default": false + }, + { + "name": "lstm_parameters", + "visible": false + }, + { + "type": "int32", + "name": "num_layers" + }, + { + "type": "boolean", + "default": false, + "name": "state_outputs" + }, + { + "type": "int32", + "name": "state_size" + }, + { + "type": "float32", + "name": "p", + "default": 0.0 + } + ], + "inputs": [ + { "name": "input" }, + { "name": "state_0" }, + { "name": "state_1" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Reshape", + "schema": { + "category": "Shape", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "_minus_scalar", + "schema": { + "attributes": [ + { "name": "scalar", "type": "float32" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "_rminus_scalar", + "schema": { + "attributes": [ + { "name": "scalar", "type": "float32" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "_mul_scalar", + "schema": { + "attributes": [ + { "name": "scalar", "type": "float32" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "broadcast_mul", + "schema": { + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + } + }, + { + "name": "broadcast_add", + "schema": { + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + } + }, + { + "name": "broadcast_div", + "schema": { + "inputs": [ + { "name": "lhs" }, + { "name": "rhs" } + ], + "outputs": [ + { "name": "out" } + ] + } + }, + { + "name": "_copy", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "_minus_scalar", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "_mul_scalar", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "slice_axis", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Pad", + "schema": { + "category": "Tensor", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "relu", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "softmax", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "_linalg_gemm2", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "_zeros", + "schema": { + "category": "Constant", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "_sub", + "schema": { + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + } + }, + { + "name": "_mul", + "schema": { + "inputs": [ + { "name": "x" }, + { "name": "y" } + ], + "outputs": [ + { "name": "z" } + ] + } + }, + { + "name": "MakeLoss", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "transpose", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "sum", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "square", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "sqrt", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "mean", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "log", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "_plus_scalar", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + } +] diff --git a/frontend/packages/core/public/netron/mxnet.js b/frontend/packages/core/public/netron/mxnet.js new file mode 100644 index 00000000..95e1eb21 --- /dev/null +++ b/frontend/packages/core/public/netron/mxnet.js @@ -0,0 +1,1314 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var mxnet = mxnet || {}; +var long = long || { Long: require('long') }; +var zip = zip || require('./zip'); +var ndarray = ndarray || {}; + +mxnet.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'model' || extension === 'mar') { + if (context.entries('zip').length > 0) { + return true; + } + } + else if (extension == 'json') { + const json = context.text; + if (json.indexOf('"nodes":', 0) != -1) { + try { + const symbol = JSON.parse(json); + if (symbol && symbol.nodes && symbol.arg_nodes && symbol.heads) { + return true; + } + } + catch (err) { + // continue regardless of error + } + } + } + else if (extension == 'params') { + const buffer = context.buffer; + const signature = [ 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ]; + if (buffer && buffer.length > signature.length && signature.every((v, i) => v == buffer[i])) { + return true; + } + } + return false; + } + + open(context, host) { + const identifier = context.identifier; + const extension = context.identifier.split('.').pop().toLowerCase(); + let symbol = null; + let params = null; + let format = null; + let basename = null; + switch (extension) { + case 'json': + try { + symbol = JSON.parse(context.text); + if (symbol && symbol.nodes && symbol.nodes.some((node) => node && node.op == 'tvm_op')) { + format = 'TVM'; + } + basename = mxnet.ModelFactory._basename(identifier, 'json', 'symbol'); + if (basename) { + return context.request(basename + '-0000.params', null).then((params) => { + return this._openModel(identifier, format, null, symbol, null, params, host); + }).catch(() => { + return this._openModel(identifier, format, null, symbol, null, params, host); + }); + } + return this._openModel(identifier, format, null, symbol, null, null, host); + } + catch (error) { + host.exception(error, false); + throw new mxnet.Error(error.message), null; + } + case 'params': + params = context.buffer; + basename = mxnet.ModelFactory._basename(context.identifier, 'params'); + if (basename) { + return context.request(basename + '-symbol.json', 'utf-8').then((text) => { + symbol = JSON.parse(text); + if (symbol && symbol.nodes && symbol.nodes.some((node) => node && node.op == 'tvm_op')) { + format = 'TVM'; + } + return this._openModel(identifier, format, null, symbol, null, params, host); + }).catch(() => { + return this._openModel(identifier, format, null, null, null, params, host); + }); + } + else { + return this._openModel(identifier, format, null, null, null, params, host); + } + case 'mar': + case 'model': { + const entries = new Map(); + try { + for (const entry of context.entries('zip')) { + entries.set(entry.name, entry); + } + } + catch (err) { + throw new mxnet.Error('Failed to decompress ZIP archive. ' + err.message); + } + + let manifestEntry = entries.get(entries.has('MANIFEST.json') ? 'MANIFEST.json' : 'MAR-INF/MANIFEST.json'); + let rootFolder = ''; + if (!manifestEntry) { + const folders = Array.from(entries.keys()).filter((name) => name.endsWith('/')).filter((name) => entries.get(name + 'MANIFEST.json')); + if (folders.length != 1) { + throw new mxnet.Error("Manifest not found in '" + context.identifier + "'."); + } + rootFolder = folders[0]; + manifestEntry = entries.get(rootFolder + 'MANIFEST.json'); + } + + const decoder = new TextDecoder('utf-8'); + let manifest = null; + try { + manifest = JSON.parse(decoder.decode(manifestEntry.data)); + } + catch (err) { + throw new mxnet.Error('Failed to read manifest. ' + err.message); + } + + let modelFormat = null; + let symbolEntry = null; + let signatureEntry = null; + let paramsEntry = null; + if (manifest.Model) { + modelFormat = manifest.Model['Model-Format']; + if (modelFormat && modelFormat != 'MXNet-Symbolic') { + throw new mxnet.Error('Model format \'' + modelFormat + '\' not supported.'); + } + format = 'MXNet Model Server'; + if (manifest['Model-Archive-Version']) { + format += ' v' + manifest['Model-Archive-Version'].toString(); + } + if (!manifest.Model.Symbol) { + throw new mxnet.Error('Manifest does not contain symbol entry.'); + } + symbolEntry = entries.get(rootFolder + manifest.Model.Symbol); + if (manifest.Model.Signature) { + signatureEntry = entries.get(rootFolder + manifest.Model.Signature); + } + if (manifest.Model.Parameters) { + paramsEntry = entries.get(rootFolder + manifest.Model.Parameters); + } + } + else if (manifest.model) { + format = 'MXNet Model Archive'; + if (manifest.specificationVersion) { + format += ' v' + manifest.specificationVersion.toString(); + } + if (manifest.model.modelName) { + symbolEntry = entries.get(rootFolder + manifest.model.modelName + '-symbol.json'); + let key = null; + for (key of Array.from(entries.keys())) { + key = key.substring(rootFolder.length); + if (key.endsWith('.params') && key.startsWith(manifest.model.modelName)) { + paramsEntry = entries.get(key); + break; + } + } + if (!symbolEntry && !paramsEntry) { + for (key of Object.keys(entries)) { + key = key.substring(rootFolder.length); + if (key.endsWith('.params')) { + paramsEntry = entries.get(key); + break; + } + } + } + } + } + else { + throw new mxnet.Error('Manifest does not contain model.'); + } + + if (!symbolEntry && !paramsEntry) { + throw new mxnet.Error("Model does not contain symbol entry."); + } + + try { + if (symbolEntry) { + symbol = JSON.parse(decoder.decode(symbolEntry.data)); + } + } + catch (err) { + throw new mxnet.Error('Failed to load symbol entry.' + err.message); + } + + if (paramsEntry) { + params = paramsEntry.data; + } + let signature = null; + try { + if (signatureEntry) { + signature = JSON.parse(decoder.decode(signatureEntry.data)); + } + } + catch (err) { + // continue regardless of error + } + + try { + return this._openModel(identifier, format, manifest, symbol, signature, params, host); + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new mxnet.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + } + default: + throw new mxnet.Error('Unsupported file extension.'); + } + } + + _openModel(identifier, format, manifest, symbol, signature, params, host) { + return mxnet.Metadata.open(host).then((metadata) => { + const parameters = new Map(); + if (params) { + try { + const stream = new ndarray.Stream(params); + for (const key of Object.keys(stream.arrays)) { + const name = (key.startsWith('arg:') || key.startsWith('aux:')) ? key.substring(4) : key; + parameters.set(name, stream.arrays[key]); + } + } + catch (error) { + // continue regardless of error + } + } + try { + return new mxnet.Model(metadata, format, manifest, symbol, signature, parameters); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new mxnet.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + } + + static _basename(identifier, extension, suffix) { + const dots = identifier.split('.'); + if (dots.length >= 2 && dots.pop().toLowerCase() === extension) { + const dashes = dots.join('.').split('-'); + if (dashes.length >= 2) { + const token = dashes.pop(); + if (suffix) { + if (token != suffix) { + return null; + } + } + else { + for (let i = 0; i < token.length; i++) { + const c = token.charAt(i); + if ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')) { + continue; + } + return null; + } + } + return dashes.join('-'); + } + } + return null; + } +}; + +mxnet.Model = class { + + constructor(metadata, format, manifest, symbol, signature, params) { + if (!symbol && !params) { + throw new mxnet.Error('JSON symbol data not available.'); + } + if (symbol) { + if (!Object.prototype.hasOwnProperty.call(symbol, 'nodes')) { + throw new mxnet.Error('JSON file does not contain an MXNet \'nodes\' property.'); + } + if (!Object.prototype.hasOwnProperty.call(symbol, 'arg_nodes')) { + throw new mxnet.Error('JSON file does not contain an MXNet \'arg_nodes\' property.'); + } + if (!Object.prototype.hasOwnProperty.call(symbol, 'heads')) { + throw new mxnet.Error('JSON file does not contain an MXNet \'heads\' property.'); + } + } + + if (manifest) { + if (manifest.Model && manifest.Model['Model-Name']) { + this._name = manifest.Model['Model-Name']; + } + if (manifest.Model && manifest.Model.Description && this._name != manifest.Model.Description) { + this._description = manifest.Model.Description; + } + if (manifest.Engine && manifest.Engine.MXNet) { + const engineVersion = mxnet.Model._convert_version(manifest.Engine.MXNet); + this._runtime = 'MXNet v' + (engineVersion ? engineVersion : manifest.Engine.MXNet.toString()); + } + if (manifest.License) { + this._license = manifest.License; + } + if (manifest.model && manifest.model.modelName) { + this._name = manifest.model.modelName; + } + if (manifest.model && manifest.model.modelVersion) { + this._version = manifest.model.modelVersion; + } + if (manifest.model && manifest.model.modelName && this._name != manifest.model.description) { + this._description = manifest.model.description; + } + if (manifest.runtime) { + this._runtime = manifest.runtime; + } + if (manifest.engine && manifest.engine.engineName) { + const engine = manifest.engine.engineVersion ? manifest.engine.engineName + ' ' + manifest.engine.engineVersion : manifest.engine.engineName; + this._runtime = this._runtime ? (this._runtime + ' (' + engine + ')') : engine; + } + if (manifest.publisher && manifest.publisher.author) { + this._author = manifest.publisher.author; + if (manifest.publisher.email) { + this._author = this._author + ' <' + manifest.publisher.email + '>'; + } + } + if (manifest.license) { + this._license = manifest.license; + } + } + + this._format = format; + if (!this._format && symbol && symbol.attrs && symbol.attrs.mxnet_version) { + const version = mxnet.Model._convert_version(symbol.attrs.mxnet_version); + if (version) { + this._format = 'MXNet v' + version; + } + } + if (!this._format) { + this._format = 'MXNet'; + } + + this._graphs = []; + this._graphs.push(new mxnet.Graph(metadata, manifest, symbol, signature, params)); + } + + get format() { + return this._format; + } + + get name() { + return this._name; + } + + get version() { + return this._version; + } + + get description() { + return this._description; + } + + get author() { + return this._author; + } + + get license() { + return this._license; + } + + get runtime() { + return this._runtime; + } + + get graphs() { + return this._graphs; + } + + static _convert_version(value) { + if (Array.isArray(value)) { + if (value.length == 2 && value[0] == 'int') { + const major = Math.floor(value[1] / 10000) % 100; + const minor = Math.floor(value[1] / 100) % 100; + const patch = Math.floor(value[1]) % 100; + return [ major.toString(), minor.toString(), patch.toString() ].join('.'); + } + } + return null; + } +}; + +mxnet.Graph = class { + + constructor(metadata, manifest, symbol, signature, params) { + this._metadata = metadata; + this._nodes = []; + this._inputs = []; + this._outputs = []; + + const tensors = new Map(); + if (params) { + for (const pair of params) { + const key = pair[0]; + const value = pair[1]; + tensors.set(key, new mxnet.Tensor('Initializer', key, new mxnet.TensorType(value.dataType, new mxnet.TensorShape(value.shape.dimensions)), value.data)); + } + } + + if (symbol) { + const nodes = symbol.nodes; + const inputs = {}; + if (signature && signature.inputs) { + for (const input of signature.inputs) { + inputs[input.data_name] = input; + } + } + const outputs = {}; + if (signature && signature.outputs) { + for (const output of signature.outputs) { + outputs[output.data_name] = output; + } + } + + for (const node of nodes) { + node.outputs = []; + } + for (const node of nodes) { + node.inputs = node.inputs.map((input) => { + return mxnet.Graph._updateOutput(nodes, input); + }); + } + + const outputCountMap = {}; + for (const node of nodes) { + for (const output of node.outputs) { + outputCountMap[output] = (outputCountMap[output] || 0) + 1; + } + } + + const argumentMap = {}; + for (const index of symbol.arg_nodes) { + argumentMap[index] = (index < nodes.length) ? nodes[index] : null; + } + + for (let i = 0; i < symbol.heads.length; i++) { + const head = symbol.heads[i]; + const outputId = mxnet.Graph._updateOutput(nodes, head); + const outputName = nodes[outputId[0]] ? nodes[outputId[0]].name : ('output' + ((i == 0) ? '' : (i + 1).toString())); + let outputType = null; + const outputSignature = outputs[outputName]; + if (outputSignature && outputSignature.data_shape) { + outputType = new mxnet.TensorType(-1, new mxnet.TensorShape(outputSignature.data_shape)); + } + this._outputs.push(new mxnet.Parameter(outputName, [ new mxnet.Argument('[' + outputId.join(',') + ']', outputType, null) ])); + } + + const initializerMap = {}; + for (const node of nodes.filter((node, index) => !argumentMap[index])) { + this._nodes.push(new mxnet.Node(this._metadata, node, argumentMap, initializerMap, tensors)); + } + + for (const argumentKey of Object.keys(argumentMap)) { + const argument = argumentMap[argumentKey]; + if (argument && (!argument.inputs || argument.inputs.length == 0) && (argument.outputs && argument.outputs.length == 1)) { + const inputId = argument.outputs[0]; + const inputName = argument.name; + let inputType = null; + const inputSignature = inputs[inputName]; + if (inputSignature && inputSignature.data_shape) { + inputType = new mxnet.TensorType(-1, new mxnet.TensorShape(inputSignature.data_shape)); + } + this._inputs.push(new mxnet.Parameter(inputName, [ new mxnet.Argument('[' + inputId.join(',') + ']', inputType) ])); + } + } + } + else if (params) { + let block = null; + const blocks = []; + let separator = Object.keys(params).every((k) => k.indexOf('_') != -1) ? '_' : ''; + if (separator.length == 0) { + separator = Object.keys(params).every((k) => k.indexOf('.') != -1) ? '.' : ''; + } + if (separator.length > 0) { + const blockMap = {}; + for (const id of Object.keys(params)) { + const parts = id.split(separator); + let argumentName = parts.pop(); + if (id.endsWith('moving_mean') || id.endsWith('moving_var')) { + argumentName = [ parts.pop(), argumentName ].join(separator); + } + const nodeName = parts.join(separator); + block = blockMap[nodeName]; + if (!block) { + block = { name: nodeName, op: 'Weights', params: [] }; + blockMap[nodeName] = block; + blocks.push(block); + } + blockMap[nodeName].params.push({ name: argumentName, id: id }); + } + } + else { + throw new mxnet.Error("Unsupported key format in params."); + } + + for (block of blocks) { + this._nodes.push(new mxnet.Node(metadata, block, {}, {}, params)); + } + } + } + + get name() { + return ''; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + static _updateOutput(nodes, input) { + const nodeIndex = input[0]; + const node = nodes[nodeIndex]; + const outputIndex = input[1]; + if (node) { + while (outputIndex >= node.outputs.length) { + node.outputs.push([ nodeIndex, node.outputs.length ]); + } + } + return [ nodeIndex, outputIndex ]; + } +}; + +mxnet.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +mxnet.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new mxnet.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + if (this._initializer) { + return this._initializer.name; + } + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +mxnet.Node = class { + + constructor(metadata, node, argumentMap, initializerMap, tensors) { + this._metadata = metadata; + this._type = node.op; + this._name = node.name; + this._attributes = []; + this._inputs = []; + this._outputs = []; + + const attrs = node.attrs || node.attr || node.param; + if (attrs) { + if (this._type == 'tvm_op' && attrs.func_name) { + this._type = attrs.func_name; + } + for (const attributeName of Object.keys(attrs)) { + if (this._type != 'tvm_op' && attributeName != 'func_name') { + this._attributes.push(new mxnet.Attribute(this._metadata, this.type, attributeName, attrs[attributeName])); + } + } + } + + let initializer = null; + const schema = metadata.type(this.type); + if (node.inputs) { + let inputs = node.inputs; + if (this._type == 'RNN') { + inputs = inputs.map((input) => { + const argumentNodeIndex = input[0]; + const argument = argumentMap[argumentNodeIndex]; + if (argument && argument.op == 'null' && argument.name && + argument.name.endsWith('_parameters') && argument.attr && argument.attr.__init__) { + this._attributes.push(new mxnet.Attribute(this._metadata, this.type, argument.name, argument.attr.__init__)); + delete argumentMap[argumentNodeIndex]; + return null; + } + return input; + }); + inputs = inputs.filter((item) => item != null); + } + const initializers = {}; + for (const input of inputs) { + const id = '[' + input.join(',') + ']'; + initializer = initializerMap[id]; + if (!initializer) { + const argumentNodeIndex = input[0]; + const argument = argumentMap[argumentNodeIndex]; + if (argument && argument.name && + (!argument.inputs || argument.inputs.length == 0) && + (argument.outputs && argument.outputs.length == 1)) { + initializer = tensors.get(argument.name) || null; + if (initializer) { + delete argumentMap[argumentNodeIndex]; + } + else { + let prefix = this._name; + if (prefix.endsWith('_fwd')) { + prefix = prefix.slice(0, -3); + } + if (argument.name && (argument.name.startsWith(prefix + '_') || argument.name.startsWith(prefix + '.'))) { + let dataType = -1; + let shape = []; + if (argument.attrs && argument.attrs.__dtype__ && argument.attrs.__shape__) { + try { + dataType = parseInt(argument.attrs.__dtype__); + shape = JSON.parse('[' + argument.attrs.__shape__.replace('(', '').replace(')', '').split(' ').join('').split(',').map((dimension => dimension || '"?"' )).join(',') + ']'); + } + catch (err) { + // continue regardless of error + } + } + let argumentType = null; + if (dataType !== -1 || shape.length > 0) { + argumentType = new mxnet.TensorType(dataType, new mxnet.TensorShape(shape)); + } + else { + argumentType = new mxnet.TensorType(-1, new mxnet.TensorShape(null)); + } + initializer = new mxnet.Tensor('Initializer', argument.name, argumentType, null); + delete argumentMap[argumentNodeIndex]; + } + } + } + } + if (initializer) { + initializers[id] = initializer; + initializerMap[id] = initializer; + } + } + + let inputIndex = 0; + if (schema && schema.inputs) { + for (const inputDef of schema.inputs) { + if (inputIndex < inputs.length || inputDef.option != 'optional') { + const inputCount = (inputDef.option == 'variadic') ? (inputs.length - inputIndex) : 1; + let inputArguments = []; + for (const input of inputs.slice(inputIndex, inputIndex + inputCount)) { + const inputId = '[' + input.join(',') + ']'; + if (inputId != '' || inputDef.option != 'optional') { + inputArguments.push(new mxnet.Argument(inputId, inputDef.type, initializers[inputId])); + } + } + this._inputs.push(new mxnet.Parameter(inputDef.name, inputArguments)); + inputIndex += inputCount; + } + } + } + if (inputIndex < inputs.length) { + this._inputs = this._inputs.concat(inputs.slice(inputIndex).map((input, index) => { + const inputId = '[' + input.join(',') + ']'; + return new mxnet.Parameter((inputIndex + index).toString(), [ + new mxnet.Argument(inputId, null, initializers[inputId]) + ]); + })); + } + } + + if (node.outputs) { + const outputs = node.outputs; + let outputIndex = 0; + if (schema && schema.outputs) { + for (const outputDef of schema.outputs) { + if (outputIndex < outputs.length || outputDef.option != 'optional') { + const outputArguments = []; + const outputCount = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1; + for (const output of outputs.slice(outputIndex, outputIndex + outputCount)) { + outputArguments.push(new mxnet.Argument('[' + output.join(',') + ']', null, null)); + } + this._outputs.push(new mxnet.Parameter(outputDef.name, outputArguments)); + outputIndex += outputCount; + } + } + } + if (outputIndex < outputs.length) { + this._outputs = this._outputs.concat(outputs.slice(outputIndex).map((output, index) => { + return new mxnet.Parameter((outputIndex + index).toString(), [ + new mxnet.Argument('[' + output.join(',') + ']', null, null) + ]); + })); + } + } + + if (node.params) { + for (const param of node.params) { + this._inputs.push(new mxnet.Parameter(param.name, [ + new mxnet.Argument(param.id, null, tensors.get(param.id) || null) + ])); + } + } + } + + get type() { + return this._type; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } +}; + +mxnet.Attribute = class { + + constructor(metadata, type, name, value) { + this._name = name; + this._value = value; + + let number; + const schema = metadata.attribute(type, name); + if (schema && schema.type) { + switch (schema.type) { + case 'boolean': + switch (value) { + case 'True': + this._value = true; + break; + case 'False': + this._value = false; + break; + } + break; + case 'int32': + number = Number.parseInt(this._value, 10); + this._value = Number.isNaN(this._value - number) ? value : number; + break; + case 'float32': + case 'float64': + number = Number.parseFloat(this._value); + this._value = Number.isNaN(this._value - number) ? value : number; + break; + case 'int32[]': + if (this._value.length > 2 && this._value.startsWith('(') && this._value.endsWith(')')) { + let array = []; + const items = this._value.substring(1, this._value.length - 1).split(',') + .map((item) => item.trim()) + .map((item) => item.endsWith('L') ? item.substring(0, item.length - 1) : item); + for (const item of items) { + number = Number.parseInt(item, 10); + if (Number.isNaN(item - number)) { + array = null; + } + else if (array != null) { + array.push(number); + } + } + if (array != null) { + this._value = array; + } + } + break; + } + } + + if (schema) { + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + let defaultValue = schema.default; + if (this._value == defaultValue) { + this._visible = false; + } + else if (Array.isArray(this._value) && Array.isArray(defaultValue)) { + defaultValue = defaultValue.slice(0, defaultValue.length); + if (defaultValue.length > 1 && defaultValue[defaultValue.length - 1] == null) { + defaultValue.pop(); + while (defaultValue.length < this._value.length) { + defaultValue.push(defaultValue[defaultValue.length - 1]); + } + } + if (this._value.every((item, index) => { return item == defaultValue[index]; })) { + this._visible = false; + } + } + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +mxnet.Tensor = class { + + constructor(kind, name, type, data) { + this._kind = kind; + this._name = name; + this._type = type; + this._data = data; + } + + get kind() { + return 'Initializer'; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + + const context = {}; + context.state = null; + context.index = 0; + context.count = 0; + + if (!this._data) { + context.state = 'Tensor data is empty.'; + return context; + } + + if (!this._type && this._type.dataType === '?') { + context.state = 'Tensor has no data type.'; + return context; + } + + if (this._type.shape.length < 1) { + context.state = 'Tensor has unknown shape.'; + return context; + } + + context.dataType = this._type.dataType; + context.dimensions = this._type.shape.dimensions; + context.data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + return context; + } + + _decode(context, dimension) { + const results = []; + const size = context.dimensions[dimension]; + if (dimension == context.dimensions.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (context.dataType) { + case 'float32': + results.push(context.data.getFloat32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'float64': + results.push(context.data.getFloat64(context.index, true)); + context.index += 8; + context.count++; + break; + case 'float16': + results.push(mxnet.Tensor._decodeNumberFromFloat16(context.data.getUint16(context.index, true))); + context.index += 2; + context.count++; + break; + case 'uint8': + results.push(context.data.getUint8(context.index, true)); + context.index += 1; + context.count++; + break; + case 'int32': + results.push(context.data.getInt32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'int8': + results.push(context.data.getInt8(context.index, true)); + context.index += 1; + context.count++; + break; + case 'int64': + results.push(new long.Long(context.data.getUint32(context.index, true), context.data.getUint32(context.index + 4, true), false)); + context.index += 8; + context.count++; + break; + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + return results; + } + + static _decodeNumberFromFloat16(value) { + const s = (value & 0x8000) >> 15; + const e = (value & 0x7C00) >> 10; + const f = value & 0x03FF; + if(e == 0) { + return (s ? -1 : 1) * Math.pow(2, -14) * (f / Math.pow(2, 10)); + } + else if (e == 0x1F) { + return f ? NaN : ((s ? -1 : 1) * Infinity); + } + return (s ? -1 : 1) * Math.pow(2, e-15) * (1 + (f / Math.pow(2, 10))); + } +}; + +mxnet.TensorType = class { + + constructor(dataType, shape) { + switch (dataType) { + case 0: this._dataType = 'float32'; break; + case 1: this._dataType = 'float64'; break; + case 2: this._dataType = 'float16'; break; + case 3: this._dataType = 'uint8'; break; + case 4: this._dataType = 'int32'; break; + case 5: this._dataType = 'int8'; break; + case 6: this._dataType = 'int64'; break; + case -1: this._dataType = '?'; break; + default: throw new mxnet.Error("Unknown type '" + dataType + "'."); + } + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +mxnet.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (this._dimensions) { + if (this._dimensions.length == 0) { + return ''; + } + return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']'; + } + return ''; + } +}; + +mxnet.Metadata = class { + + static open(host) { + if (mxnet.Metadata._metadata) { + return Promise.resolve(mxnet.Metadata._metadata); + } + return host.request(null, 'mxnet-metadata.json', 'utf-8').then((data) => { + mxnet.Metadata._metadata = new mxnet.Metadata(data); + return mxnet.Metadata._metadata; + }).catch(() => { + mxnet.Metadata._metadata = new mxnet.Metadata(null); + return mxnet.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + this._attributeCache = {}; + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map[item.name] = item.schema; + } + } + } + } + } + + type(name) { + return this._map[name] || null; + } + + attribute(type, name) { + let map = this._attributeCache[type]; + if (!map) { + map = {}; + const schema = this.type(type); + if (schema && schema.attributes) { + for (const attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[type] = map; + } + return map[name] || null; + } +}; + +mxnet.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading MXNet model.'; + } +}; + +ndarray.Stream = class { + + constructor(buffer) { + + this._arrays = {}; + + const reader = new ndarray.Reader(buffer); + if (!reader.checkSignature([ 0x12, 1, 0, 0, 0, 0, 0, 0 ])) { + throw new ndarray.Error('Invalid signature.'); + } + if (!reader.checkSignature([ 0, 0, 0, 0, 0, 0, 0, 0 ])) { + throw new ndarray.Error('Invalid reserved block.'); + } + + const data = []; + for (let dataSize = reader.uint64(); dataSize > 0; dataSize--) { + data.push(new ndarray.Array(reader)); + } + + const decoder = new TextDecoder('ascii'); + const names = []; + for (let namesSize = reader.uint64(); namesSize > 0; namesSize--) { + const name = decoder.decode(reader.read(reader.uint64())); + names.push(name); + } + + if (names.length != data.length) { + throw new ndarray.Error('Label count mismatch.'); + } + + for (let i = 0; i < names.length; i++) { + this._arrays[names[i]] = data[i]; + } + } + + get arrays() { + return this._arrays; + } + +}; + +ndarray.Array = class { + + constructor(reader) { + + ndarray.Array._dataTypeSizeTable = [ 4, 8, 2, 1, 4, 1, 8 ]; + + if (reader.checkSignature([ 0xc9, 0xfa, 0x93, 0xF9 ])) { + this._loadV2(reader); + } + else if (reader.checkSignature([ 0xc8, 0xfa, 0x93, 0xF9 ])) { + this._loadV1(reader); + } + else { + this._loadV0(reader); + } + } + + _loadV2(reader) { + const stype = reader.uint32(); + let num_aux_data = 0; + switch (stype) { + case 0: num_aux_data = 0; break; // kDefaultStorage + case 1: num_aux_data = 1; break; // kRowSparseStorage + case 2: num_aux_data = 2; break; // kCSRStorage + } + this.sshape = null; + if (num_aux_data > 0) { + this.sshape = new ndarray.Shape(reader, true); + } + this._shape = new ndarray.Shape(reader, true); + if (this._shape.dimensions.length == 0) { + return; + } + this._context = new ndarray.Context(reader); + this._dataType = reader.uint32(); + if (num_aux_data > 0) { + throw new ndarray.Error('Not implemented.'); + } + const dataTypeSize = (this._dataType < ndarray.Array._dataTypeSizeTable.length) ? ndarray.Array._dataTypeSizeTable[this._dataType] : 0; + const size = dataTypeSize * this._shape.size(); + this._data = reader.read(size); + } + + _loadV1(reader) { + this._shape = new ndarray.Shape(reader, true); + if (this._shape.dimensions.length == 0) { + return; + } + this._context = new ndarray.Context(reader); + this._dataType = reader.uint32(); + const dataTypeSize = (this._dataType < ndarray.Array._dataTypeSizeTable.length) ? ndarray.Array._dataTypeSizeTable[this._dataType] : 0; + const size = dataTypeSize * this._shape.size(); + this._data = reader.read(size); + } + + _loadV0(reader) { + this._shape = new ndarray.Shape(reader, false); + this._context = new ndarray.Context(reader); + this._dataType = reader.uint32(); + const dataTypeSize = (this._dataType < ndarray.Array._dataTypeSizeTable.length) ? ndarray.Array._dataTypeSizeTable[this._dataType] : 0; + const size = dataTypeSize * this._shape.size(); + this._data = reader.read(size); + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + get data() { + return this._data; + } +}; + +ndarray.Shape = class { + + constructor(reader, uint64) { + const ndim = reader.uint32(); + this._dimensions = []; + for (let i = 0; i < ndim; i++) { + this._dimensions.push(uint64 ? reader.uint64() : reader.uint32()); + } + } + + get dimensions() { + return this._dimensions; + } + + size() { + return this._dimensions.reduce((a, b) => a * b); + } +}; + +ndarray.Context = class { + + constructor(reader) { + this._deviceType = reader.uint32(); + this._deviceId = reader.uint32(); + } +}; + +ndarray.Reader = class { + + constructor(buffer) { + this._buffer = buffer; + this._position = 0; + this._end = buffer.length; + } + + checkSignature(signature) { + if (this._position + signature.length <= this._end) { + for (let i = 0; i < signature.length; i++) { + if (this._buffer[this._position + i] != signature[i]) { + return false; + } + } + } + this._position += signature.length; + return true; + } + + read(size) { + if (this._position + size > this._end) { + throw new ndarray.Error('Data not available.'); + } + const data = this._buffer.subarray(this._position, this._position + size); + this._position += size; + return data; + } + + uint16() { + if (this._position + 2 > this._end) { + throw new ndarray.Error('Data not available.'); + } + const value = this._buffer[this._position] | (this._buffer[this._position + 1] << 8); + this._position += 2; + return value; + } + + uint32() { + return this.uint16() | (this.uint16() << 16); + } + + uint64() { + const value = this.uint32(); + if (this.uint32() != 0) { + throw new ndarray.Error('Large int64 value.'); + } + return value; + } +}; + +ndarray.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'NDArray Error'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = mxnet.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/ncnn-metadata.json b/frontend/packages/core/public/netron/ncnn-metadata.json new file mode 100644 index 00000000..4432d802 --- /dev/null +++ b/frontend/packages/core/public/netron/ncnn-metadata.json @@ -0,0 +1,665 @@ +[ + { + "name": "AbsVal", + "schema": { + "operator": 0 + } + }, + { + "name": "ArgMax", + "schema": { + "operator": 1 + } + }, + { + "name": "BatchNorm", + "schema": { + "operator": 2, + "category": "Normalization", + "attributes": [ + { "name": "channels", "type": "int32", "default": 0 }, + { "name": "eps", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "Bias", + "schema": { + "operator": 3, + "category": "Layer", + "attributes": [ + { "name": "bias_data_size", "default": 0, "visible": false } + ] + } + }, + { + "name": "BNLL", + "schema": { + "operator": 4 + } + }, + { + "name": "Concat", + "schema": { + "operator": 5, + "category": "Tensor", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Convolution", + "schema": { + "operator": 6, + "category": "Layer", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 0 }, + { "name": "kernel_w", "type": "int32", "default": 0 }, + { "name": "dilation_w", "type": "int32", "default": 1 }, + { "name": "stride_w", "type": "int32", "default": 1 }, + { "name": "pad_w", "type": "int32", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "type": "int32", "default": 0, "visible": false }, + { "name": "group", "type": "int32", "default": 0 }, + { "name": "int8_scale_term", "default": 0 }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "kernel_h", "type": "int32", "default": 0 }, + { "name": "dilation_h", "type": "int32", "default": 1 }, + { "name": "stride_h", "type": "int32", "default": 1 }, + { "name": "pad_h", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Crop", + "schema": { + "operator": 7, + "category": "Data", + "attributes": [ + { "name": "woffset", "default": 0 }, + { "name": "hoffset", "default": 0 }, + { "name": "coffset", "default": 0 }, + { "name": "outw", "default": 0 }, + { "name": "outh", "default": 0 }, + { "name": "outc", "default": 0 } + ] + } + }, + { + "name": "Deconvolution", + "schema": { + "operator": 8, + "category": "Layer", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "dilation_w", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_w", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "group", "default": 0 }, + { "name": "int8_scale_term", "default": 0 }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "kernel_h", "default": 0 }, + { "name": "dilation_h", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "pad_h", "default": 0 } + ] + } + }, + { + "name": "Dropout", + "schema": { + "operator": 9, + "category": "Dropout", + "attributes": [ + { "name": "scale", "type": "float32", "default": 1 } + ] + } + }, + { + "name": "Eltwise", + "schema": { + "operator": 10, + "attributes": [ + { "name": "op_type", "default": 0 }, + { "name": "coeffs", "default": [] } + ], + "inputs": [ + { "name": "inputs", "option": "variadic" } + ] + } + }, + { + "name": "ELU", + "schema": { + "operator": 11 + } + }, + { + "name": "Embed", + "schema": { + "operator": 12, + "category": "Transform", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "input_dim", "default": 0 }, + { "name": "bias_term", "default": 0 }, + { "name": "weight_data_size", "default": 0 } + ] + } + }, + { + "name": "Exp", + "schema": { + "operator": 13 + } + }, + { + "name": "Flatten", + "schema": { + "operator": 14, + "category": "Shape" + } + }, + { + "name": "InnerProduct", + "schema": { + "operator": 15, + "category": "Layer", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "int8_scale_term", "default": 0, "id": "8" }, + { "name": "activation_type", "default": 0, "id": "9" }, + { "name": "activation_params", "default": 0, "id": "10" } + ] + } + }, + { + "name": "Input", + "schema": { + "operator": 16 + } + }, + { + "name": "Exp", + "schema": { + "operator": 17 + } + }, + { + "name": "LRN", + "schema": { + "operator": 18, + "category": "Normalization", + "attributes": [ + { "name": "region_type", "default": 0 }, + { "name": "local_size", "default": 5 }, + { "name": "alpha", "default": 1 }, + { "name": "beta", "default": 0.75 }, + { "name": "bias", "default": 1 } + ] + } + }, + { + "name": "Exp", + "schema": { + "operator": 19 + } + }, + { + "name": "MVN", + "schema": { + "operator": 20 + } + }, + { + "name": "Pooling", + "schema": { + "operator": 21, + "category": "Pool", + "attributes": [ + { "name": "pooling_type", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_left", "default": 0 }, + { "name": "global_pooling", "default": 0 }, + { "name": "pad_mode", "default": 0 }, + { "name": "kernel_h", "default": 0, "id": 11 }, + { "name": "stride_h", "default": 1, "id": 12 }, + { "name": "pad_top", "default": 0 , "id": 13 }, + { "name": "pad_right", "default": 0, "id": 14 }, + { "name": "pad_bottom", "default": 0, "id": 15 } + ] + } + }, + { + "name": "Power", + "schema": { + "operator": 22 + } + }, + { + "name": "PReLU", + "schema": { + "operator": 23, + "category": "Activation", + "attributes": [ + { "name": "num_slope", "type": "int32", "default": 0, "visible": false } + ] + } + }, + { + "name": "Proposal", + "schema": { + "operator": 24 + } + }, + { + "name": "Reducation", + "schema": { + "operator": 25 + } + }, + { + "name": "ReLU", + "schema": { + "operator": 26, + "category": "Activation", + "attributes": [ + { "name": "slope", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "Reshape", + "schema": { + "operator": 27, + "category": "Shape", + "attributes": [ + { "name": "w", "default": -233 }, + { "name": "h", "default": -233 }, + { "name": "c", "default": -233 }, + { "name": "permute", "default": 0 } + ] + } + }, + { + "name": "ROIPooling", + "schema": { + "operator": 28 + } + }, + { + "name": "Scale", + "schema": { + "operator": 29, + "category": "Layer", + "attributes": [ + { "name": "scale_data_size", "default": 0, "visible": false }, + { "name": "bias_term", "default": 0, "visible": false } + ] + } + }, + { + "name": "Sigmoid", + "schema": { + "operator": 30, + "category": "Activation" + } + }, + { + "name": "Slice", + "schema": { + "operator": 31, + "category": "Tensor", + "attributes": [ + { "name": "slices", "default": [] }, + { "name": "axis", "default": 0 } + ] + } + }, + { + "name": "Softmax", + "schema": { + "operator": 32, + "category": "Activation", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "fixbug0", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Split", + "schema": { + "operator": 33, + "category": "Tensor", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output", "option": "variadic" } + ] + } + }, + { + "name": "SPP", + "schema": { + "operator": 34, + "category": "Activation" + } + }, + { + "name": "TanH", + "schema": { + "operator": 35, + "category": "Activation" + } + }, + { + "name": "Threshold", + "schema": { + "operator": 36 + } + }, + { + "name": "Tile", + "schema": { + "operator": 37 + } + }, + { + "name": "RNN", + "schema": { + "operator": 38, + "category": "Layer" + } + }, + { + "name": "LSTM", + "schema": { + "operator": 39, + "category": "Layer" + } + }, + { + "name": "BinaryOp", + "schema": { + "operator": 40, + "attributes": [ + { "name": "op_type", "type": "int32", "default": 0 }, + { "name": "with_scalar", "type": "int32", "default": 0 }, + { "name": "b", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "UnaryOp", + "schema": { + "operator": 41 + } + }, + { + "name": "ConvolutionDepthWise", + "schema": { + "operator": 42, + "category": "Layer", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "dilation_w", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_w", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "group", "default": 0 }, + { "name": "int8_scale_term", "default": 0 }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "kernel_h", "default": 0 }, + { "name": "dilation_h", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "pad_h", "default": 0 } + ] + } + }, + { + "name": "Padding", + "schema": { + "operator": 43 + } + }, + { + "name": "Squeeze", + "schema": { + "operator": 44 + } + }, + { + "name": "ExpandDims", + "schema": { + "operator": 45 + } + }, + { + "name": "Normalize", + "schema": { + "operator": 46 + } + }, + { + "name": "Permute", + "schema": { + "operator": 47, + "category": "Shape", + "attributes": [ + { "name": "order_type", "default": 0 } + ] + } + }, + { + "name": "PriorBox", + "schema": { + "operator": 48, + "attributes": [ + { "name": "min_sizes", "default": [] }, + { "name": "max_sizes", "default": [] }, + { "name": "aspect_ratios", "default": [] }, + { "name": "varainces0", "type": "float32", "default": 0 }, + { "name": "varainces1", "type": "float32", "default": 0 }, + { "name": "varainces2", "type": "float32", "default": 0 }, + { "name": "varainces3", "type": "float32", "default": 0 }, + { "name": "flip", "default": 1 }, + { "name": "clip", "default": 0 }, + { "name": "image_width", "default": 0 }, + { "name": "image_height", "default": 0 }, + { "name": "step_width", "default": -233 }, + { "name": "step_height", "default": -233 }, + { "name": "offset", "default": 0 } + ] + } + }, + { + "name": "DetectionOutput", + "schema": { + "operator": 49, + "attributes": [ + { "name": "num_class", "default": 0 }, + { "name": "nms_threshold", "default": 0.05 }, + { "name": "nms_top_k", "default": 300 }, + { "name": "keep_top_k", "default": 100 }, + { "name": "confidence_threshold", "default": 0.5 }, + { "name": "varainces0", "default": 0.1 }, + { "name": "varainces1", "default": 0.1 }, + { "name": "varainces2", "default": 0.2 }, + { "name": "varainces3", "default": 0.2 } + ] + } + }, + { + "name": "Interp", + "schema": { + "operator": 50 + } + }, + { + "name": "DeconvolutionDepthWise", + "schema": { + "operator": 51, + "category": "Layer", + "attributes": [ + { "name": "num_output", "default": 0 }, + { "name": "kernel_w", "default": 0 }, + { "name": "dilation_w", "default": 1 }, + { "name": "stride_w", "default": 1 }, + { "name": "pad_w", "default": 0 }, + { "name": "bias_term", "default": 0, "visible": false }, + { "name": "weight_data_size", "default": 0, "visible": false }, + { "name": "group", "default": 0 }, + { "name": "int8_scale_term", "default": 0 }, + { "name": "activation_type", "default": 0 }, + { "name": "activation_params", "default": [] }, + { "name": "kernel_h", "default": 0 }, + { "name": "dilation_h", "default": 1 }, + { "name": "stride_h", "default": 1 }, + { "name": "pad_h", "default": 0 } + ] + } + }, + { + "name": "ShuffleChannel", + "schema": { + "operator": 52, + "attributes": [ + { "name": "group", "default": 1 } + ] + } + }, + { + "name": "InstanceNorm", + "schema": { + "operator": 53 + } + }, + { + "name": "Clip", + "schema": { + "operator": 54 + } + }, + { + "name": "Reorg", + "schema": { + "operator": 55 + } + }, + { + "name": "YoloDetectionOutput", + "schema": { + "operator": 56, + "attributes": [ + { "name": "num_class", "type": "int32", "default": 20 }, + { "name": "num_box", "type": "int32", "default": 5 }, + { "name": "confidence_threshold", "type": "float32", "default": 0.01 }, + { "name": "nms_threshold", "type": "float32", "default": 0.45 }, + { "name": "biases" } + ], + "inputs": [ + { "name": "input", "option": "variadic" } + ] + } + }, + { + "name": "Quantize", + "schema": { + "operator": 57 + } + }, + { + "name": "Dequantize", + "schema": { + "operator": 58 + } + }, + { + "name": "Yolov3DetectionOutput", + "schema": { + "operator": 59, + "attributes": [ + { "name": "num_class", "type": "int32", "default": 20 }, + { "name": "num_box", "type": "int32", "default": 5 }, + { "name": "confidence_threshold", "type": "float32", "default": 0.01 }, + { "name": "nms_threshold", "type": "float32", "default": 0.45 }, + { "name": "biases", "type": "float32[]" }, + { "name": "mask", "type": "float32[]" }, + { "name": "anchors_scale", "type": "float32[]" } + ], + "inputs": [ + { "name": "input", "option": "variadic" } + ] + } + }, + { + "name": "PSROIPooling", + "schema": { + "operator": 60 + } + }, + { + "name": "ROIAlign", + "schema": { + "operator": 61 + } + }, + { + "name": "Packing", + "schema": { + "operator": 62 + } + }, + { + "name": "Requantize", + "schema": { + "operator": 63 + } + }, + { + "name": "Cast", + "schema": { + "operator": 64 + } + }, + { + "name": "HardSigmoid", + "schema": { + "operator": 65, + "category": "Activation" + } + }, + { + "name": "SELU", + "schema": { + "operator": 66, + "category": "Activation" + } + }, + { + "name": "ReLU6", + "schema": { + "category": "Activation" + } + } +] \ No newline at end of file diff --git a/frontend/packages/core/public/netron/ncnn.js b/frontend/packages/core/public/netron/ncnn.js new file mode 100644 index 00000000..075bf894 --- /dev/null +++ b/frontend/packages/core/public/netron/ncnn.js @@ -0,0 +1,869 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var ncnn = ncnn || {}; +var base = base || require('./base'); + +// https://github.com/Tencent/ncnn/wiki/param-and-model-file-structure +// https://github.com/Tencent/ncnn/wiki/operation-param-weight-table + +ncnn.ModelFactory = class { + + match(context) { + const identifier = context.identifier.toLowerCase(); + if (identifier.endsWith('.param') || identifier.endsWith('.cfg.ncnn')) { + let text = context.text; + text = text.substring(0, Math.min(text.length, 32)); + const signature = text.split('\n').shift().trim(); + if (signature === '7767517') { + return true; + } + } + if (identifier.endsWith('.param.bin')) { + const buffer = context.buffer; + if (buffer.length > 4) { + const signature = buffer[0] | buffer[1] << 8 | buffer[2] << 16 | buffer [3] << 24; + if (signature == 0x007685DD) { + return true; + } + } + } + if (identifier.endsWith('.bin') || identifier.endsWith('.weights.ncnn')) { + if (identifier == 'snapshot_blob.bin' || identifier === 'v8_context_snapshot.bin') { + return false; + } + const buffer = context.buffer; + if (buffer.length > 4) { + const signature = buffer[0] | buffer[1] << 8 | buffer[2] << 16 | buffer [3] << 24; + if (signature === 0x00000000 || signature === 0x00000001 || + signature === 0x01306B47 || signature === 0x000D4B38 || signature === 0x0002C056) { + return true; + } + } + } + return false; + } + + open(context, host) { + return ncnn.Metadata.open(host).then((metadata) => { + const identifier = context.identifier.toLowerCase(); + const param = (param, bin) => { + try { + return new ncnn.Model(metadata, param, bin); + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new ncnn.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }; + let bin = null; + if (identifier.endsWith('.param') || identifier.endsWith('.cfg.ncnn')) { + if (identifier.endsWith('.param')) { + bin = context.identifier.substring(0, context.identifier.length - 6) + '.bin'; + } + else if (identifier.endsWith('.cfg.ncnn')) { + bin = context.identifier.substring(0, context.identifier.length - 9) + '.weights.ncnn'; + } + return context.request(bin, null).then((bin) => { + return param(context.text, bin); + }).catch(() => { + return param(context.text, null); + }); + } + else if (identifier.endsWith('.param.bin')) { + bin = context.identifier.substring(0, context.identifier.length - 10) + '.bin'; + return context.request(bin, null).then((bin) => { + return param(context.buffer, bin); + }).catch(() => { + return param(context.buffer, null); + }); + } + else if (identifier.endsWith('.bin') || identifier.endsWith('.weights.ncnn')) { + let text = null; + if (identifier.endsWith('bin')) { + text = context.identifier.substring(0, context.identifier.length - 4) + '.param'; + } + else if (identifier.endsWith('.weights.ncnn')) { + text = context.identifier.substring(0, context.identifier.length - 13) + '.cfg.ncnn'; + } + return context.request(text, 'utf-8').then((text) => { + return param(text, context.buffer); + }).catch((error) => { + const message = error && error.message ? error.message : error.toString(); + throw new ncnn.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + }); + } + }); + } +}; + +ncnn.Model = class { + + constructor(metadata, param, bin) { + this._graphs = []; + this._graphs.push(new ncnn.Graph(metadata, param, bin)); + } + + get format() { + return 'ncnn'; + } + + get graphs() { + return this._graphs; + } +}; + +ncnn.Graph = class { + + constructor(metadata, param, bin) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + + const blobReader = new ncnn.BlobReader(bin); + + const layers = (typeof param == 'string') ? + this._param(metadata, param, bin) : + this._param_bin(metadata, param, bin); + + for (const layer of layers) { + if (layer.type == 'Input') { + const dimensions = layer.attributes.map((a) => !isNaN(parseInt(a.value, 10)) ? parseInt(a.value, 10) : a.value); + const shape = new ncnn.TensorShape(dimensions); + const type = new ncnn.TensorType('float32', shape); + this._inputs.push(new ncnn.Parameter(layer.name, true, layer.outputs.map((output) => new ncnn.Argument(output, type, null)))); + } + else { + this._nodes.push(new ncnn.Node(metadata, blobReader, layer)); + } + } + } + + _param(metadata, param) { + const lines = param.split(/\r?\n/); + const signature = lines.shift(); + if (signature !== '7767517') { + throw new ncnn.Error('Invalid signature.'); + } + const header = lines.shift().split(' '); + if (header.length !== 2) { + throw new ncnn.Error('Invalid header count.'); + } + + const layers = []; + let layer; + while (lines.length > 0) { + const line = lines.shift().trim(); + if (line.length > 0) { + const columns = line.split(' ').filter((s) => s.length != 0); + layer = {}; + layer.type = columns.shift(); + layer.name = columns.shift(); + const inputCount = parseInt(columns.shift(), 10); + const outputCount = parseInt(columns.shift(), 10); + layer.inputs = columns.splice(0, inputCount); + layer.outputs = columns.splice(0, outputCount); + layer.attr = {}; + layer.attributes = []; + for (const column of columns) { + const parts = column.split('='); + if (parts.length === 2) { + let key = parts[0].trim(); + let value = parts[1].trim(); + const keyInt = parseInt(key, 10); + if (keyInt < 0) { + value = value.split(',').map((v) => v.trim()); + value.shift(); + key = (-(keyInt + 23300)).toString(); + } + layer.attr[key] = value; + layer.attributes.push({ key: key, value: value }); + } + } + layers.push(layer); + } + } + return layers; + } + + _param_bin(metadata, param) { + const reader = new ncnn.BinaryReader(param); + if (reader.int32() !== 0x007685DD) { + throw new ncnn.Error('Invalid signature.'); + } + const layerCount = reader.int32(); + /* const blobCount = */ reader.int32(); + const layers = []; + for (let i = 0; i < layerCount; i++) { + const layer = {}; + const typeIndex = reader.int32(); + const operator = metadata.operator(typeIndex); + layer.type = operator || typeIndex.toString(); + layer.name = i.toString(); + layer.inputs = []; + layer.outputs = []; + layer.attr = {}; + layer.attributes = []; + const inputCount = reader.int32(); + const outputCount = reader.int32(); + for (let j = 0; j < inputCount; j++) { + layer.inputs.push(reader.int32().toString()); + } + for (let k = 0; k < outputCount; k++) { + layer.outputs.push(reader.int32().toString()); + } + let id = reader.int32(); + while (id != -233) { + let isArray = id <= -23300; + if (isArray) { + id = -id - 23300; + } + if (isArray) { + const len = reader.int32(); + const values = []; + for (let i = 0; i < len; i++) { + values.push(reader.int32()); + } + layer.attributes.push({ key: id.toString(), value: values.toString() }); + layer.attr[id.toString()] = values; + } + else { + const value = reader.int32(); + layer.attributes.push({ key: id.toString(), value: value.toString() }); + layer.attr[id.toString()] = value.toString(); + } + id = reader.int32(); + } + layers.push(layer); + } + return layers; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +ncnn.Parameter = class { + + constructor(name, visible, args) { + this._name = name; + this._visible = visible; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get arguments() { + return this._arguments; + } +}; + +ncnn.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new ncnn.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +ncnn.Node = class { + + constructor(metadata, blobReader, layer) { + this._metadata = metadata; + this._inputs = []; + this._outputs = []; + this._attributes = []; + this._type = layer.type; + this._name = layer.name; + + const operator = metadata.operator(this._type); + if (operator) { + this._type = operator; + } + + const schema = metadata.type(this._type); + + const attributeMetadata = {}; + if (schema && schema.attributes) { + for (let i = 0; i < schema.attributes.length; i++) { + const id = schema.attributes[i].id || i.toString(); + attributeMetadata[id] = schema.attributes[i]; + } + } + for (const attribute of layer.attributes) { + const attributeSchema = attributeMetadata[attribute.key]; + this._attributes.push(new ncnn.Attribute(attributeSchema, attribute.key, attribute.value)); + } + + const inputs = layer.inputs; + let inputIndex = 0; + if (schema && schema.inputs) { + for (const inputDef of schema.inputs) { + if (inputIndex < inputs.length || inputDef.option != 'optional') { + const inputCount = (inputDef.option == 'variadic') ? (inputs.length - inputIndex) : 1; + const inputArguments = inputs.slice(inputIndex, inputIndex + inputCount).filter((id) => id != '' || inputDef.option != 'optional').map((id) => { + return new ncnn.Argument(id, null, null); + }); + this._inputs.push(new ncnn.Parameter(inputDef.name, true, inputArguments)); + inputIndex += inputCount; + } + } + } + this._inputs = this._inputs.concat(inputs.slice(inputIndex).map((input, index) => { + const inputName = ((inputIndex + index) == 0) ? 'input' : (inputIndex + index).toString(); + return new ncnn.Parameter(inputName, true, [ + new ncnn.Argument(input, null, null) + ]); + })); + + const outputs = layer.outputs; + let outputIndex = 0; + if (schema && schema.outputs) { + for (const outputDef of schema.outputs) { + if (outputIndex < outputs.length || outputDef.option != 'optional') { + const outputCount = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1; + const outputArguments = outputs.slice(outputIndex, outputIndex + outputCount).map((id) => { + return new ncnn.Argument(id, null, null); + }); + this._outputs.push(new ncnn.Parameter(outputDef.name, true, outputArguments)); + outputIndex += outputCount; + } + } + } + this._outputs = this._outputs.concat(outputs.slice(outputIndex).map((output, index) => { + const outputName = ((outputIndex + index) == 0) ? 'output' : (outputIndex + index).toString(); + return new ncnn.Parameter(outputName, true, [ + new ncnn.Argument(output, null, null) + ]); + })); + + let num_output; + let weight_data_size; + let channels; + let scale_data_size; + let bias_data_size; + switch (this._type) { + case 'BatchNorm': { + channels = parseInt(layer.attr['0'] || 0, 10); + this._weight(blobReader, 'slope', [ channels ], 'float32'); + this._weight(blobReader, 'mean', [ channels ], 'float32'); + this._weight(blobReader, 'variance', [ channels ], 'float32'); + this._weight(blobReader, 'bias', [ channels ], 'float32'); + break; + } + case 'InnerProduct': { + num_output = parseInt(layer.attr['0'] || 0, 10); + weight_data_size = parseInt(layer.attr['2'] || 0, 10); + this._weight(blobReader, 'weight', [ num_output, weight_data_size / num_output ]); + if (layer.attr['1'] == '1') { + this._weight(blobReader, 'bias', [ num_output ], 'float32'); + } + break; + } + case 'Bias': { + bias_data_size = parseInt(layer.attr['0'] || 0, 10); + this._weight(blobReader, 'bias', [ bias_data_size ], 'float32'); + break; + } + case 'Embed': { + num_output = parseInt(layer.attr['0'] || 0, 10); + weight_data_size = parseInt(layer.attr['3'] || 0, 10); + this._weight(blobReader, 'weight', [ weight_data_size ]); + if (layer.attr['2'] == '1') { + this._weight(blobReader, 'bias', [ num_output], 'float32'); + } + break; + } + case 'Convolution': + case 'ConvolutionDepthWise': + case 'Deconvolution': + case 'DeconvolutionDepthWise': { + num_output = parseInt(layer.attr['0'] || 0, 10); + const kernel_w = parseInt(layer.attr['1'] || 0, 10); + const kernel_h = parseInt(layer.attr['11'] || kernel_w, 10); + weight_data_size = parseInt(layer.attr['6'] || 0, 10); + this._weight(blobReader, 'weight', [ num_output, weight_data_size / ( num_output * kernel_w * kernel_h), kernel_w, kernel_h ]); + if (layer.attr['5'] == '1') { + this._weight(blobReader, 'bias', [ num_output ], 'float32'); + } + break; + } + case 'Dequantize': { + if (layer.attr['1'] == '1') { + bias_data_size = parseInt(layer.attr['2'] || 0, 10); + this._weight(blobReader, 'bias', [ bias_data_size ], 'float32'); + } + break; + } + case 'Requantize': { + if (layer.attr['2'] == '1') { + bias_data_size = parseInt(layer.attr['3'] || 0, 10); + this._weight(blobReader, 'bias', [ bias_data_size ], 'float32'); + } + break; + } + case 'InstanceNorm': { + channels = parseInt(layer.attr['0'] || 0, 10); + this._weight(blobReader, 'gamma', [ channels ], 'float32'); + this._weight(blobReader, 'beta', [ channels ], 'float32'); + break; + } + case 'Scale': { + scale_data_size = parseInt(layer.attr['0'] || 0, 10); + if (scale_data_size != -233) { + this._weight(blobReader, 'scale', [ scale_data_size], 'float32'); + if (layer.attr['1'] == '1') { + this._weight(blobReader, 'bias', [ scale_data_size ], 'float32'); + } + } + break; + } + case 'Normalize': { + scale_data_size = parseInt(layer.attr['3'] || 0, 10); + this._weight(blobReader, 'scale', [ scale_data_size ], 'float32'); + break; + } + case 'PReLU': { + const num_slope = parseInt(layer.attr['0'] || 0, 10); + this._weight(blobReader, 'slope', [ num_slope ], 'float32'); + break; + } + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + _weight(blobReader, name, dimensions, dataType) { + const blob = blobReader.read(dimensions, dataType); + dataType = blob ? (blob.dataType || '?') : (dataType || '?'); + const data = blob ? blob.data : null; + this._inputs.push(new ncnn.Parameter(name, true, [ + new ncnn.Argument('', null, new ncnn.Tensor(new ncnn.TensorType(dataType, new ncnn.TensorShape(dimensions)), data)) + ])); + } +}; + +ncnn.Attribute = class { + + constructor(schema, key, value) { + this._type = ''; + this._name = key; + this._value = value; + if (schema) { + this._name = schema.name; + if (schema.type) { + this._type = schema.type; + } + switch (this._type) { + case 'int32': + this._value = parseInt(this._value, 10); + break; + case 'float32': + this._value = parseFloat(this._value); + break; + case 'float32[]': + this._value = this._value.map((v) => parseFloat(v)); + break; + } + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + if (this._value == schema.default || (this._value && this._value.toString() == schema.default.toString())) { + this._visible = false; + } + } + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +ncnn.Tensor = class { + + constructor(type, data) { + this._type = type; + this._data = data; + } + + get kind() { + return 'Weight'; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state || null; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + const context = {}; + context.index = 0; + context.count = 0; + context.state = null; + + if (this._type.dataType == '?') { + context.state = 'Tensor has unknown data type.'; + return context; + } + if (!this._type.shape) { + context.state = 'Tensor has no dimensions.'; + return context; + } + + if (!this._data) { + context.state = 'Tensor data is empty.'; + return context; + } + + switch (this._type.dataType) { + case 'float16': + case 'float32': + context.data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + break; + default: + context.state = 'Tensor data type is not implemented.'; + break; + } + + context.dataType = this._type.dataType; + context.shape = this._type.shape.dimensions; + return context; + } + + _decode(context, dimension) { + const shape = context.shape.length !== 0 ? context.shape : [ 1 ]; + const results = []; + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (this._type.dataType) { + case 'float32': + results.push(context.data.getFloat32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'float16': + results.push(context.data.getFloat16(context.index, true)); + context.index += 2; + context.count++; + break; + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + if (context.shape.length == 0) { + return results[0]; + } + return results; + } +}; + +ncnn.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType || '?'; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +ncnn.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return this._dimensions ? ('[' + this._dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',') + ']') : ''; + } +}; + +ncnn.Metadata = class { + + static open(host) { + if (ncnn.Metadata._metadata) { + return Promise.resolve(ncnn.Metadata._metadata); + } + return host.request(null, 'ncnn-metadata.json', 'utf-8').then((data) => { + ncnn.Metadata._metadata = new ncnn.Metadata(data); + return ncnn.Metadata._metadata; + }).catch(() => { + ncnn.Metadata._metadata = new ncnn.Metadata(null); + return ncnn.Metadata._metadatas; + }); + } + + constructor(data) { + this._operatorMap = new Map(); + this._map = new Map(); + this._attributeCache = new Map(); + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map.set(item.name, item.schema); + if (Object.prototype.hasOwnProperty.call(item.schema, 'operator')) { + this._operatorMap.set(item.schema.operator, item.name); + } + } + } + } + } + } + + operator(code) { + return this._operatorMap.get(code); + } + + type(name) { + return this._map.get(name); + } + + attribute(type, name) { + const key = type + ':' + name; + if (!this._attributeCache.has(key)) { + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + this._attributeCache.set(type + ':' + attribute.name, attribute); + } + } + if (!this._attributeCache.has(key)) { + this._attributeCache.set(key, null); + } + } + return this._attributeCache.get(key); + } +}; + +ncnn.BinaryReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._dataView = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._position = 0; + } + + int32() { + const position = this._position; + this._position += 4; + if (this._position > this._buffer.length) { + throw new ncnn.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + return this._dataView.getInt32(position, true); + } +}; + +ncnn.BlobReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._position = 0; + } + + read(shape, dataType) { + if (this._buffer) { + if (!dataType) { + if (this._buffer && this._position + 4 < this._buffer.length) { + const f0 = this._buffer[this._position++]; + const f1 = this._buffer[this._position++]; + const f2 = this._buffer[this._position++]; + const f3 = this._buffer[this._position++]; + const type = f0 | f1 << 8 | f2 << 16 | f3 << 24; + switch (type) { + case 0x00000000: + dataType = 'float32'; + break; + case 0x01306B47: + dataType = 'float16'; + break; + case 0x000D4B38: + dataType = 'int8'; + break; + case 0x00000001: + dataType = 'qint8'; + break; + case 0x0002C056: // size * sizeof(float) - raw data with extra scaling + default: + throw new ncnn.Error("Unknown weight type '" + type + "'."); + } + } + else { + this._buffer = null; + } + } + let data = null; + let size = 1; + if (shape) { + for (const dimension of shape) { + size *= dimension; + } + } + else { + this._buffer = null; + } + if (this._buffer) { + if (dataType) { + const position = this._position; + switch (dataType) { + case 'float32': + size *= 4; + this._position += size; + data = this._buffer.subarray(position, this._position); + break; + case 'float16': + size *= 2; + this._position += size; + data = this._buffer.subarray(position, this._position); + break; + case 'int8': + this._position += size; + data = this._buffer.subarray(position, this._position); + break; + case 'qint8': + this._position += size + 1024; + data = null; + break; + default: + throw new ncnn.Error("Unknown weight type '" + dataType + "'."); + } + } + } + return { dataType: dataType, data: data }; + } + return null; + } +}; + +ncnn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading ncnn model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = ncnn.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/numpy.js b/frontend/packages/core/public/netron/numpy.js new file mode 100644 index 00000000..81e41ab1 --- /dev/null +++ b/frontend/packages/core/public/netron/numpy.js @@ -0,0 +1,300 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var numpy = numpy || {}; + +numpy.Array = class { + + constructor(buffer) { + if (buffer) { + const reader = new numpy.Reader(buffer); + const signature = [ 0x93, 0x4E, 0x55, 0x4D, 0x50, 0x59 ]; + if (!reader.bytes(6).every((v, i) => v == signature[i])) { + throw new numpy.Error('Invalid signature.'); + } + const major = reader.byte(); + const minor = reader.byte(); + if (major !== 1 && minor !== 0) { + throw new numpy.Error("Invalid version '" + [ major, minor ].join('.') + "'."); + } + const header = JSON.parse(reader.string().trim().replace(/'/g, '"').replace("False", "false").replace("(", "[").replace(/,*\),*/g, "]")); + if (header.fortran_order) { + throw new numpy.Error("Fortran order is not supported.'"); + } + if (!header.descr || header.descr.length < 2) { + throw new numpy.Error("Missing property 'descr'."); + } + if (!header.shape) { + throw new numpy.Error("Missing property 'shape'."); + } + this._shape = header.shape; + this._byteOrder = header.descr[0]; + switch (this._byteOrder) { + case '|': { + this._dataType = header.descr.substring(1); + this._data = reader.bytes(reader.size - reader.position); + break; + } + case '>': + case '<': { + if (header.descr.length !== 3) { + throw new numpy.Error("Unsupported data type '" + header.descr + "'."); + } + this._dataType = header.descr.substring(1); + const size = parseInt(header.descr[2]) * this._shape.reduce((a, b) => a * b); + this._data = reader.bytes(size); + break; + } + default: + throw new numpy.Error("Unsupported data type '" + header.descr + "'."); + } + } + } + + get data() { + return this._data; + } + + set data(value) { + this._data = value; + } + + get dataType() { + return this._dataType; + } + + set dataType(value) { + this._dataType = value; + } + + get shape() { + return this._shape; + } + + set shape(value) { + this._shape = value; + } + + get byteOrder() { + return this._byteOrder; + } + + set byteOrder(value) { + this._byteOrder = value; + } + + toBuffer() { + + const writer = new numpy.Writer(); + + writer.bytes([ 0x93, 0x4E, 0x55, 0x4D, 0x50, 0x59 ]); // '\\x93NUMPY' + writer.byte(1); // major + writer.byte(0); // minor + + const context = { + itemSize: 1, + position: 0, + dataType: this._dataType, + byteOrder: this._byteOrder || '<', + shape: this._shape, + descr: '', + }; + + if (context.byteOrder !== '<' && context.byteOrder !== '>') { + throw new numpy.Error("Unknown byte order '" + this._byteOrder + "'."); + } + if (context.dataType.length !== 2 || (context.dataType[0] !== 'f' && context.dataType[0] !== 'i' && context.dataType[0] !== 'u')) { + throw new numpy.Error("Unsupported data type '" + this._dataType + "'."); + } + + context.itemSize = parseInt(context.dataType[1], 10); + + let shape = ''; + switch (this._shape.length) { + case 0: + throw new numpy.Error('Invalid shape.'); + case 1: + shape = '(' + this._shape[0].toString() + ',)'; + break; + default: + shape = '(' + this._shape.map((dimension) => dimension.toString()).join(', ') + ')'; + break; + } + + const properties = [ + "'descr': '" + context.byteOrder + context.dataType + "'", + "'fortran_order': False", + "'shape': " + shape + ]; + let header = '{ ' + properties.join(', ') + ' }'; + header += ' '.repeat(16 - ((header.length + 2 + 8 + 1) & 0x0f)) + '\n'; + writer.string(header); + + const size = context.itemSize * this._shape.reduce((a, b) => a * b); + context.data = new Uint8Array(size); + context.dataView = new DataView(context.data.buffer, context.data.byteOffset, size); + numpy.Array._encodeDimension(context, this._data, 0); + writer.bytes(context.data); + + return writer.toBuffer(); + } + + static _encodeDimension(context, data, dimension) { + const size = context.shape[dimension]; + const littleEndian = context.byteOrder === '<'; + if (dimension == context.shape.length - 1) { + for (let i = 0; i < size; i++) { + switch (context.dataType) { + case 'f2': + context.dataView.setFloat16(context.position, data[i], littleEndian); + break; + case 'f4': + context.dataView.setFloat32(context.position, data[i], littleEndian); + break; + case 'f8': + context.dataView.setFloat64(context.position, data[i], littleEndian); + break; + case 'i1': + context.dataView.setInt8(context.position, data[i], littleEndian); + break; + case 'i2': + context.dataView.setInt16(context.position, data[i], littleEndian); + break; + case 'i4': + context.dataView.setInt32(context.position, data[i], littleEndian); + break; + case 'i8': + context.data.set(data[i].toBytes(littleEndian), context.position); + break; + case 'u1': + context.dataView.setUint8(context.position, data[i], littleEndian); + break; + case 'u2': + context.dataView.setUint16(context.position, data[i], littleEndian); + break; + case 'u4': + context.dataView.setUint32(context.position, data[i], littleEndian); + break; + case 'u8': + context.data.set(data[i].toBytes(littleEndian), context.position); + break; + } + context.position += context.itemSize; + } + } + else { + for (let j = 0; j < size; j++) { + numpy.Array._encodeDimension(context, data[j], dimension + 1); + } + } + } +}; + +numpy.Reader = class { + + constructor(buffer) { + this._buffer = buffer; + this._position = 0; + } + + get position() { + return this._position; + } + + get size() { + return this._buffer.length; + } + + byte() { + return this._buffer[this._position++]; + } + + bytes(size) { + const value = this._buffer.slice(this._position, this._position + size); + this._position += size; + return value; + } + + uint16() { + return this.byte() | (this.byte() << 8); + } + + string() { + const size = this.uint16(); + let value = ''; + for (let i = 0; i < size; i++) { + value += String.fromCharCode(this.byte()); + } + return value; + } +}; + +numpy.Writer = class { + + constructor() { + this._length = 0; + this._head = null; + this._tail = null; + } + + byte(value) { + this.bytes([ value ]); + } + + uint16(value) { + this.bytes([ value & 0xff, (value >> 8) & 0xff ]); + } + + bytes(values) { + const array = new Uint8Array(values.length); + for (let i = 0; i < values.length; i++) { + array[i] = values[i]; + } + this._write(array); + } + + string(value) { + this.uint16(value.length); + const array = new Uint8Array(value.length); + for (let i = 0; i < value.length; i++) { + array[i] = value.charCodeAt(i); + } + this._write(array); + } + + _write(array) { + const node = { buffer: array, next: null }; + if (this._tail) { + this._tail.next = node; + } + else { + this._head = node; + } + this._tail = node; + this._length += node.buffer.length; + } + + toBuffer() { + const array = new Uint8Array(this._length); + let position = 0; + let head = this._head; + while (head != null) { + array.set(head.buffer, position); + position += head.buffer.length; + head = head.next; + } + return array; + } +}; + +numpy.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'NumPy Error'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.Array = numpy.Array; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/onnx-metadata.json b/frontend/packages/core/public/netron/onnx-metadata.json new file mode 100644 index 00000000..ca39ab62 --- /dev/null +++ b/frontend/packages/core/public/netron/onnx-metadata.json @@ -0,0 +1,25014 @@ +[ + { + "name": "Abs", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Absolute takes one input data (Tensor) and produces one output data\n(Tensor) where the absolute is, y = abs(x), is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Abs',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = abs(x)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_abs')", + "summary": "abs" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Abs", + "schema": { + "description": "Absolute takes one input data (Tensor) and produces one output data\n(Tensor) where the absolute is, y = abs(x), is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Abs',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = abs(x)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_abs')", + "summary": "abs" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Acos", + "schema": { + "description": "Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Acos',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-0.5, 0, 0.5]).astype(np.float32)\ny = np.arccos(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_acos_example')\n\nx = np.random.rand(3, 4, 5).astype(np.float32)\ny = np.arccos(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_acos')", + "summary": "acos" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The arccosine of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Acosh", + "schema": { + "description": "Calculates the hyperbolic arccosine of the given input tensor element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Acosh',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([10, np.e, 1]).astype(np.float32)\ny = np.arccosh(x) # expected output [2.99322295, 1.65745449, 0.]\nexpect(node, inputs=[x], outputs=[y],\n name='test_acosh_example')\n\nx = np.random.uniform(1.0, 10.0, (3, 4, 5)).astype(np.float32)\ny = np.arccosh(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_acosh')", + "summary": "acosh" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The hyperbolic arccosine values of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Adagrad", + "schema": { + "attributes": [ + { + "description": "The decay factor of learning rate after one update.The effective learning rate is computed by r = R / (1 + T * decay_factor). Default to 0 so that increasing update counts doesn't reduce the learning rate.", + "name": "decay_factor", + "required": false, + "type": "float32" + }, + { + "default": 9.999999974752427e-07, + "description": "Small scalar to avoid dividing by zero.", + "name": "epsilon", + "required": false, + "type": "float32" + }, + { + "description": "Regularization coefficient in 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization.", + "name": "norm_coefficient", + "required": false, + "type": "float32" + } + ], + "description": "Compute one iteration of ADAGRAD, a stochastic gradient based optimization\n algorithm. This operator can conduct the optimization of multiple tensor variables.\n\n Let's define the behavior of this operator. As you can imagine, ADAGRAD requires\n some parameters:\n \n - The initial learning-rate \"R\".\n - The update count \"T\". That is, the number of training iterations conducted.\n - A L2-norm regularization coefficient \"norm_coefficient\".\n - A learning-rate decay factor \"decay_factor\".\n - A small constant \"epsilon\" to avoid dividing-by-zero. \n\n At each ADAGRAD iteration, the optimized tensors are moved along a direction\n computed based on their estimated gradient and accumulated squared gradient. Assume\n that only a single tensor \"X\" is updated by this operator. We need the value of \"X\",\n its gradient \"G\", and its accumulated squared gradient \"H\". Therefore, variables in\n this operator's input list are sequentially \"R\", \"T\", \"X\", \"G\", and \"H\". Other\n parameters are given as attributes because they are usually constants. Also, the\n corresponding output tensors are the new value of \"X\" (called \"X_new\"), and then\n the new accumulated squared gradient (called \"H_new\"). Those outputs are computed\n from the given inputs following the pseudo code below.\n\n Let \"+\", \"-\", \"*\", and \"/\" are all element-wise arithmetic operations with\n numpy-style broadcasting support. The pseudo code to compute those outputs is:\n\n // Compute a scalar learning-rate factor. At the first update of X, T is generally\n // 0 (0-based update index) or 1 (1-based update index).\n r = R / (1 + T * decay_factor);\n\n // Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm.\n G_regularized = norm_coefficient * X + G;\n\n // Compute new accumulated squared gradient.\n H_new = H + G_regularized * G_regularized;\n\n // Compute the adaptive part of per-coordinate learning rate. Note that Sqrt(...)\n // computes element-wise square-root.\n H_adaptive = Sqrt(H_new) + epsilon\n\n // Compute the new value of \"X\".\n X_new = X - r * G_regularized / H_adaptive;\n\n If one assign this operators to optimize multiple inputs, for example, \"X_1\" and \"X_2\", the same\n pseudo code may be extended to handle all tensors jointly. More specifically, we can view \"X\" as a\n concatenation of \"X_1\" and \"X_2\" (of course, their gradient and accumulate gradient should\n be concatenated too) and then just reuse the entire pseudo code.\n\n Note that ADAGRAD was first proposed in http://jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.\n In that reference paper, this operator is a special case of the Figure 1's composite mirror\n descent update.\n", + "domain": "ai.onnx.preview.training", + "examples": [ + { + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nepsilon = 1e-5\ndecay_factor = 0.1\n\n# Create operator.\nnode = onnx.helper.make_node('Adagrad',\n inputs=['R', 'T', 'X', 'G', 'H'],\n outputs=['X_new', 'H_new'],\n norm_coefficient=norm_coefficient,\n epsilon=epsilon,\n decay_factor=decay_factor,\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN\n )\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\nx = np.array([1.0], dtype=np.float32)\ng = np.array([-1.0], dtype=np.float32)\nh = np.array([2.0], dtype=np.float32)\n\n# Compute expected outputs of Adagrad.\nx_new, h_new = apply_adagrad(r, t, x, g, h,\n norm_coefficient, epsilon, decay_factor)\n\n# Check results.\nexpect(node, inputs=[r, t, x, g, h],\n outputs=[x_new, h_new], name='test_adagrad',\n opset_imports=[onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])", + "summary": "adagrad" + }, + { + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nepsilon = 1e-5\ndecay_factor = 0.1\n\nnode = onnx.helper.make_node('Adagrad',\n inputs=['R', 'T', 'X1', 'X2',\n 'G1', 'G2', 'H1', 'H2'],\n outputs=['X1_new', 'X2_new',\n 'H1_new', 'H2_new'],\n norm_coefficient=norm_coefficient,\n epsilon=epsilon,\n decay_factor=decay_factor,\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN\n )\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\n\nx1 = np.array([1.0], dtype=np.float32)\ng1 = np.array([-1.0], dtype=np.float32)\nh1 = np.array([2.0], dtype=np.float32)\n\nx2 = np.array([1.0, 2.0], dtype=np.float32)\ng2 = np.array([-1.0, -3.0], dtype=np.float32)\nh2 = np.array([4.0, 1.0], dtype=np.float32)\n\n# Compute expected outputs of Adagrad.\nx1_new, h1_new = apply_adagrad(r, t, x1, g1, h1,\n norm_coefficient, epsilon, decay_factor)\nx2_new, h2_new = apply_adagrad(r, t, x2, g2, h2,\n norm_coefficient, epsilon, decay_factor)\n\n# Check results.\nexpect(node, inputs=[r, t, x1, x2, g1, g2, h1, h2],\n outputs=[x1_new, x2_new, h1_new, h2_new], name='test_adagrad_multiple',\n opset_imports=[onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])", + "summary": "adagrad_multiple" + } + ], + "inputs": [ + { + "description": "The initial learning rate.", + "name": "R", + "type": "T1" + }, + { + "description": "The update count of \"X\". It should be a scalar.", + "name": "T", + "type": "T2" + }, + { + "description": "The current values of optimized tensors, followed by their respective gradients, followed by their respective accumulated squared gradients.For example, if two tensor \"X_1\" and \"X_2\" are optimized, The input list would be [\"X_1\", \"X_2\", gradient of \"X_1\", gradient of \"X_2\", accumulated squared gradient of \"X_1\", accumulated squared gradient of \"X_2\"].", + "name": "inputs", + "option": "variadic", + "type": "T3" + } + ], + "inputs_range": "3 - ∞", + "max_input": 2147483647, + "max_output": 2147483647, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Updated values of optimized tensors, followed by their updated values of accumulated squared gradients. For example, if two tensor \"X_1\" and \"X_2\" are optimized, the output list would be [new value of \"X_1,\" new value of \"X_2\" new accumulated squared gradient of \"X_1\", new accumulated squared gradient of \"X_2\"].", + "name": "outputs", + "option": "variadic", + "type": "T3" + } + ], + "outputs_range": "1 - ∞", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input types to float scalars.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain input types to 64-bit integer scalars.", + "type_param_str": "T2" + }, + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T3" + } + ] + } + }, + { + "name": "Adam", + "schema": { + "attributes": [ + { + "default": 0.8999999761581421, + "description": "Coefficient of previously accumulated gradient in running average. Default to 0.9.", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "default": 0.9990000128746033, + "description": "Coefficient of previously accumulated squared-gradient in running average. Default to 0.999.", + "name": "beta", + "required": false, + "type": "float32" + }, + { + "default": 9.999999974752427e-07, + "description": "Small scalar to avoid dividing by zero.", + "name": "epsilon", + "required": false, + "type": "float32" + }, + { + "description": "Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization.", + "name": "norm_coefficient", + "required": false, + "type": "float32" + }, + { + "description": "Regularization coefficient of 0.5 * norm_coefficient * ||X||_2^2. Default to 0, which means no regularization.", + "name": "norm_coefficient_post", + "required": false, + "type": "float32" + } + ], + "description": "Compute one iteration of Adam, a stochastic gradient based optimization\n algorithm. This operator can conduct the optimization of multiple tensor variables.\n\n Let's define the behavior of this operator. First of all, Adam requires\n some parameters:\n \n - The learning-rate \"R\".\n - The update count \"T\". That is, the number of training iterations conducted.\n - A L2-norm regularization coefficient \"norm_coefficient\".\n - A small constant \"epsilon\" to avoid dividing-by-zero. \n - Two coefficients, \"alpha\" and \"beta\".\n\n At each Adam iteration, the optimized tensors are moved along a direction\n computed based on their exponentially-averaged historical gradient and\n exponentially-averaged historical squared gradient. Assume that only a tensor\n \"X\" is being optimized. The rest of required information is\n \n - the value of \"X\",\n - \"X\"'s gradient (denoted by \"G\"),\n - \"X\"'s exponentially-averaged historical gradient (denoted by \"V\"), and\n - \"X\"'s exponentially-averaged historical squared gradient (denoted by \"H\").\n\n Some of those parameters are passed into this operator as input tensors and others\n are stored as this operator's attributes. Specifically, this operator's input tensor\n list is [\"R\", \"T\", \"X\", \"G\", \"V\", \"H\"]. That is, \"R\" is the first input, \"T\" is\n the second input, and so on. Other parameters are given as attributes because they\n are constants. Moreover, the corresponding output tensors are \n \n - the new value of \"X\" (called \"X_new\"),\n - the new exponentially-averaged historical gradient (denoted by \"V_new\"), and\n - the new exponentially-averaged historical squared gradient (denoted by \"H_new\").\n\n Those outputs are computed following the pseudo code below.\n\n Let \"+\", \"-\", \"*\", and \"/\" are all element-wise arithmetic operations with\n numpy-style broadcasting support. The pseudo code to compute those outputs is:\n\n // Add gradient of 0.5 * norm_coefficient * ||X||_2^2, where ||X||_2 is the 2-norm.\n G_regularized = norm_coefficient * X + G\n\n // Update exponentially-averaged historical gradient.\n V_new = alpha * V + (1 - alpha) * G_regularized\n\n // Update exponentially-averaged historical squared gradient.\n H_new = beta * H + (1 - beta) * G_regularized * G_regularized\n\n // Compute the element-wise square-root of H_new. V_new will be element-wisely\n // divided by H_sqrt for a better update direction.\n H_sqrt = Sqrt(H_new) + epsilon\n\n // Compute learning-rate. Note that \"alpha**T\"/\"beta**T\" is alpha's/beta's T-th power.\n R_adjusted = T > 0 ? R * Sqrt(1 - beta**T) / (1 - alpha**T) : R\n\n // Compute new value of \"X\".\n X_new = X - R_adjusted * V_new / H_sqrt\n\n // Post-update regularization.\n X_final = (1 - norm_coefficient_post) * X_new \n\n If there are multiple inputs to be optimized, the pseudo code will be applied\n independently to each of them.\n", + "domain": "ai.onnx.preview.training", + "examples": [ + { + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nalpha = 0.95\nbeta = 0.1\nepsilon = 1e-7\n\n# Create operator.\nnode = onnx.helper.make_node('Adam',\n inputs=['R', 'T', 'X', 'G', 'V', 'H'],\n outputs=['X_new', 'V_new', 'H_new'],\n norm_coefficient=norm_coefficient,\n alpha=alpha,\n beta=beta,\n epsilon=epsilon,\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN\n )\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\nx = np.array([1.2, 2.8], dtype=np.float32)\ng = np.array([-0.94, -2.5], dtype=np.float32)\nv = np.array([1.7, 3.6], dtype=np.float32)\nh = np.array([0.1, 0.1], dtype=np.float32)\n\n# Compute expected outputs of Adam.\nx_new, v_new, h_new = apply_adam(r, t, x, g, v, h,\n norm_coefficient, 0.0, alpha, beta,\n epsilon)\n\n# Check results.\nexpect(node, inputs=[r, t, x, g, v, h],\n outputs=[x_new, v_new, h_new], name='test_adam',\n opset_imports=[onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])", + "summary": "adam" + }, + { + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nalpha = 0.95\nbeta = 0.85\nepsilon = 1e-2\n\nnode = onnx.helper.make_node('Adam',\n inputs=['R', 'T', 'X1', 'X2',\n 'G1', 'G2', 'V1', 'V2',\n 'H1', 'H2'],\n outputs=['X1_new', 'X2_new',\n 'V1_new', 'V2_new',\n 'H1_new', 'H2_new'],\n norm_coefficient=norm_coefficient,\n alpha=alpha,\n beta=beta,\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN\n )\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\n\nx1 = np.array([1.0], dtype=np.float32)\ng1 = np.array([-1.0], dtype=np.float32)\nv1 = np.array([2.0], dtype=np.float32)\nh1 = np.array([0.5], dtype=np.float32)\n\nx2 = np.array([1.0, 2.0], dtype=np.float32)\ng2 = np.array([-1.0, -3.0], dtype=np.float32)\nv2 = np.array([4.0, 1.0], dtype=np.float32)\nh2 = np.array([1.0, 10.0], dtype=np.float32)\n\n# Compute expected outputs of Adam.\nx1_new, v1_new, h1_new = apply_adam(r, t, x1, g1, v1, h1,\n norm_coefficient, 0.0, alpha, beta,\n epsilon)\nx2_new, v2_new, h2_new = apply_adam(r, t, x2, g2, v2, h2,\n norm_coefficient, 0.0, alpha, beta,\n epsilon)\n\n# Check results.\nexpect(node, inputs=[r, t, x1, x2, g1, g2, v1, v2, h1, h2],\n outputs=[x1_new, x2_new, v1_new, v2_new, h1_new, h2_new],\n name='test_adam_multiple',\n opset_imports=[onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])", + "summary": "adam_multiple" + } + ], + "inputs": [ + { + "description": "The initial learning rate.", + "name": "R", + "type": "T1" + }, + { + "description": "The update count of \"X\". It should be a scalar.", + "name": "T", + "type": "T2" + }, + { + "description": "The tensors to be optimized, followed by their respective gradients, followed by their respective accumulated gradients (aka momentum), followed by their respective accumulated squared gradients. For example, to optimize tensors \"X_1\" and \"X_2,\", the input list would be [\"X_1\", \"X_2\", gradient of \"X_1\", gradient of \"X_2\", accumulated gradient of \"X_1\", accumulated gradient of \"X_2\", accumulated squared gradient of \"X_1\", accumulated squared gradient of \"X_2\"].", + "name": "inputs", + "option": "variadic", + "type": "T3" + } + ], + "inputs_range": "3 - ∞", + "max_input": 2147483647, + "max_output": 2147483647, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "New values of optimized tensors, followed by their respective new accumulated gradients, followed by their respective new accumulated squared gradients. For example, if two tensors \"X_1\" and \"X_2\" are optimized, the outputs list would be [new value of \"X_1\", new value of \"X_2\", new accumulated gradient of \"X_1\", new accumulated gradient of \"X_2\", new accumulated squared gradient of \"X_1\", new accumulated squared gradient of \"X_2\"].", + "name": "outputs", + "option": "variadic", + "type": "T3" + } + ], + "outputs_range": "1 - ∞", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input types to float scalars.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain input types to 64-bit integer scalars.", + "type_param_str": "T2" + }, + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T3" + } + ] + } + }, + { + "name": "Add", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions. See doc for details.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + }, + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Performs element-wise binary addition (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Add',\n inputs=['x', 'y'],\n outputs=['sum'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y],\n name='test_add')", + "summary": "add" + }, + { + "code": "node = onnx.helper.make_node(\n 'Add',\n inputs=['x', 'y'],\n outputs=['sum'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y],\n name='test_add_bcast')", + "summary": "add_broadcast" + } + ], + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Add", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions. See doc for details.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + } + ], + "description": "Performs element-wise binary addition (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Add',\n inputs=['x', 'y'],\n outputs=['sum'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y],\n name='test_add')", + "summary": "add" + }, + { + "code": "node = onnx.helper.make_node(\n 'Add',\n inputs=['x', 'y'],\n outputs=['sum'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y],\n name='test_add_bcast')", + "summary": "add_broadcast" + } + ], + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Add", + "schema": { + "description": "Performs element-wise binary addition (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Add',\n inputs=['x', 'y'],\n outputs=['sum'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y],\n name='test_add')", + "summary": "add" + }, + { + "code": "node = onnx.helper.make_node(\n 'Add',\n inputs=['x', 'y'],\n outputs=['sum'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nexpect(node, inputs=[x, y], outputs=[x + y],\n name='test_add_bcast')", + "summary": "add_broadcast" + } + ], + "inputs": [ + { + "description": "First operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same element type as two inputs", + "name": "C", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "And", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + } + ], + "category": "Logic", + "description": "Returns the tensor resulted from performing the `and` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'And',\n inputs=['x', 'y'],\n outputs=['and'],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(np.bool)\ny = (np.random.randn(3, 4) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and2d')\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and3d')\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and4d')", + "summary": "and" + }, + { + "code": "node = onnx.helper.make_node(\n 'And',\n inputs=['x', 'y'],\n outputs=['and'],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(5) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and_bcast3v1d')\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(4, 5) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and_bcast3v2d')\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(5, 6) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and_bcast4v2d')\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and_bcast4v3d')\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and_bcast4v4d')", + "summary": "and_broadcast" + } + ], + "inputs": [ + { + "description": "Left input tensor for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Right input tensor for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains input to boolean tensor.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "And", + "schema": { + "category": "Logic", + "description": "Returns the tensor resulted from performing the `and` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'And',\n inputs=['x', 'y'],\n outputs=['and'],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(np.bool)\ny = (np.random.randn(3, 4) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and2d')\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and3d')\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and4d')", + "summary": "and" + }, + { + "code": "node = onnx.helper.make_node(\n 'And',\n inputs=['x', 'y'],\n outputs=['and'],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(5) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and_bcast3v1d')\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(4, 5) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and_bcast3v2d')\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(5, 6) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and_bcast4v2d')\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and_bcast4v3d')\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(np.bool)\nz = np.logical_and(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_and_bcast4v4d')", + "summary": "and_broadcast" + } + ], + "inputs": [ + { + "description": "First input operand for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Second input operand for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains input to boolean tensor.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "ArgMax", + "schema": { + "attributes": [ + { + "description": "The axis in which to compute the arg indices.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the indices of the max elements of the input tensor's element along the\nprovided axis. The resulted tensor has the same rank as the input if keepdims equal 1.\nIf keepdims equal 0, then the resulted tensor have the reduced dimension pruned.\nThe type of the output tensor is integer.", + "domain": "ai.onnx", + "examples": [ + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims)\n\n# result: [[1], [1]]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims,\n select_last_index=True)\n\n# result: [[1, 1]]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_random_select_last_index')", + "summary": "default_axes_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_random_select_last_index')", + "summary": "keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_random')", + "summary": "negative_axis_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_random_select_last_index')", + "summary": "negative_axis_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# result: [[0, 1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_random')", + "summary": "no_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1, 1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_random_select_last_index')", + "summary": "no_keepdims_select_last_index" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor with integer data type.", + "name": "reduced", + "type": "tensor(int64)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ArgMax", + "schema": { + "attributes": [ + { + "description": "The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the indices of the max elements of the input tensor's element along the \nprovided axis. The resulting tensor has the same rank as the input if keepdims equal 1. \nIf keepdims equal 0, then the resulting tensor have the reduced dimension pruned. \nThe type of the output tensor is integer.", + "domain": "ai.onnx", + "examples": [ + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims)\n\n# result: [[1], [1]]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims,\n select_last_index=True)\n\n# result: [[1, 1]]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_random_select_last_index')", + "summary": "default_axes_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_random_select_last_index')", + "summary": "keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_random')", + "summary": "negative_axis_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_random_select_last_index')", + "summary": "negative_axis_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# result: [[0, 1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_random')", + "summary": "no_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1, 1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_random_select_last_index')", + "summary": "no_keepdims_select_last_index" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor with integer data type.", + "name": "reduced", + "type": "tensor(int64)" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ArgMax", + "schema": { + "attributes": [ + { + "description": "The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + }, + { + "description": "Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index).", + "name": "select_last_index", + "required": false, + "type": "int64" + } + ], + "description": "Computes the indices of the max elements of the input tensor's element along the \nprovided axis. The resulting tensor has the same rank as the input if keepdims equal 1. \nIf keepdims equal 0, then the resulting tensor have the reduced dimension pruned. \nIf select_last_index is True (default False), the index of the last occurrence of the max \nis selected if the max appears more than once in the input. Otherwise the index of the \nfirst occurrence is selected.\nThe type of the output tensor is integer.", + "domain": "ai.onnx", + "examples": [ + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims)\n\n# result: [[1], [1]]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims,\n select_last_index=True)\n\n# result: [[1, 1]]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmax_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_default_axis_random_select_last_index')", + "summary": "default_axes_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_keepdims_random_select_last_index')", + "summary": "keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# result: [[0], [1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_random')", + "summary": "negative_axis_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_negative_axis_keepdims_random_select_last_index')", + "summary": "negative_axis_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# result: [[0, 1]]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_random')", + "summary": "no_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMax',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1, 1]]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmax_no_keepdims_random_select_last_index')", + "summary": "no_keepdims_select_last_index" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor with integer data type.", + "name": "reduced", + "type": "tensor(int64)" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ArgMin", + "schema": { + "attributes": [ + { + "description": "The axis in which to compute the arg indices.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the indices of the min elements of the input tensor's element along the\nprovided axis. The resulted tensor has the same rank as the input if keepdims equal 1.\nIf keepdims equal 0, then the resulted tensor have the reduced dimension pruned.\nThe type of the output tensor is integer.", + "domain": "ai.onnx", + "examples": [ + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims)\n\n# The content of result is : [[0], [0]]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims,\n select_last_index=True)\n\n# result: [[0, 0]]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_random_select_last_index')", + "summary": "default_axes_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_random_select_last_index')", + "summary": "keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_random')", + "summary": "negative_axis_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_random_select_last_index')", + "summary": "negative_axis_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# The content of result is : [[1, 0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_random')", + "summary": "no_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1, 0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_random_select_last_index')", + "summary": "no_keepdims_select_last_index" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor with integer data type.", + "name": "reduced", + "type": "tensor(int64)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ArgMin", + "schema": { + "attributes": [ + { + "description": "The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the indices of the min elements of the input tensor's element along the \nprovided axis. The resulting tensor has the same rank as the input if keepdims equal 1. \nIf keepdims equal 0, then the resulting tensor have the reduced dimension pruned. \nThe type of the output tensor is integer.", + "domain": "ai.onnx", + "examples": [ + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims)\n\n# The content of result is : [[0], [0]]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims,\n select_last_index=True)\n\n# result: [[0, 0]]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_random_select_last_index')", + "summary": "default_axes_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_random_select_last_index')", + "summary": "keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_random')", + "summary": "negative_axis_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_random_select_last_index')", + "summary": "negative_axis_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# The content of result is : [[1, 0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_random')", + "summary": "no_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1, 0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_random_select_last_index')", + "summary": "no_keepdims_select_last_index" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor with integer data type.", + "name": "reduced", + "type": "tensor(int64)" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ArgMin", + "schema": { + "attributes": [ + { + "description": "The axis in which to compute the arg indices. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + }, + { + "description": "Whether to select the last index or the first index if the {name} appears in multiple indices, default is False (first index).", + "name": "select_last_index", + "required": false, + "type": "int64" + } + ], + "description": "Computes the indices of the min elements of the input tensor's element along the \nprovided axis. The resulting tensor has the same rank as the input if keepdims equal 1. \nIf keepdims equal 0, then the resulting tensor have the reduced dimension pruned. \nIf select_last_index is True (default False), the index of the last occurrence of the min \nis selected if the min appears more than once in the input. Otherwise the index of the \nfirst occurrence is selected.\nThe type of the output tensor is integer.", + "domain": "ai.onnx", + "examples": [ + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims)\n\n# The content of result is : [[0], [0]]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n keepdims=keepdims,\n select_last_index=True)\n\n# result: [[0, 0]]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [1, 3, 4]\nresult = argmin_use_numpy_select_last_index(data, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_random_select_last_index')", + "summary": "default_axes_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 1, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_random_select_last_index')", + "summary": "keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# The content of result is : [[1], [0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_random')", + "summary": "negative_axis_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = -1\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1], [0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 3, 1]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_random_select_last_index')", + "summary": "negative_axis_keepdims_select_last_index" + }, + { + "code": "data = np.array([[2, 1], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims)\n# The content of result is : [[1, 0]]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_example')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_random')", + "summary": "no_keepdims" + }, + { + "code": "data = np.array([[2, 2], [3, 10]], dtype=np.float32)\naxis = 1\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ArgMin',\n inputs=['data'],\n outputs=['result'],\n axis=axis,\n keepdims=keepdims,\n select_last_index=True)\n# result: [[1, 0]]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_example_select_last_index')\n\ndata = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)\n# result's shape: [2, 4]\nresult = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)\nexpect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_random_select_last_index')", + "summary": "no_keepdims_select_last_index" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor with integer data type.", + "name": "reduced", + "type": "tensor(int64)" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ArrayFeatureExtractor", + "schema": { + "description": "Select elements of the input tensor based on the indices passed.
    \n The indices are applied to the last axes of the tensor.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Data to be selected", + "name": "X", + "type": "T" + }, + { + "description": "The indices, based on 0 as the first index of any dimension.", + "name": "Y", + "type": "tensor(int64)" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Selected output data as an array", + "name": "Z", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)", + "tensor(string)" + ], + "description": "The input must be a tensor of a numeric type or string. The output will be of the same tensor type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Asin", + "schema": { + "description": "Calculates the arcsine (inverse of sine) of the given input tensor, element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Asin',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-0.5, 0, 0.5]).astype(np.float32)\ny = np.arcsin(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_asin_example')\n\nx = np.random.rand(3, 4, 5).astype(np.float32)\ny = np.arcsin(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_asin')", + "summary": "asin" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The arcsine of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Asinh", + "schema": { + "description": "Calculates the hyperbolic arcsine of the given input tensor element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Asinh',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.arcsinh(x) # expected output [-0.88137358, 0., 0.88137358]\nexpect(node, inputs=[x], outputs=[y],\n name='test_asinh_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.arcsinh(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_asinh')", + "summary": "asinh" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The hyperbolic arcsine values of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Atan", + "schema": { + "description": "Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Atan',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.arctan(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_atan_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.arctan(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_atan')", + "summary": "atan" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The arctangent of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Atanh", + "schema": { + "description": "Calculates the hyperbolic arctangent of the given input tensor element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Atanh',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-0.5, 0, 0.5]).astype(np.float32)\ny = np.arctanh(x) # expected output [-0.54930615, 0., 0.54930615]\nexpect(node, inputs=[x], outputs=[y],\n name='test_atanh_example')\n\nx = np.random.uniform(0.0, 1.0, (3, 4, 5)).astype(np.float32)\ny = np.arctanh(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_atanh')", + "summary": "atanh" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The hyperbolic arctangent values of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "AveragePool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "AveragePool consumes an input tensor X and applies average pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n average pooling consisting of computing the average on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is divided by the number of elements exclude pad.\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "\"\"\"\ninput_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2]\nstrides = [1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_1d_default')", + "summary": "averagepool_1d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [6, 7.5],\n [12, 13.5]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_ceil')", + "summary": "averagepool_2d_ceil" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_default')", + "summary": "averagepool_2d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2]\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_pads')", + "summary": "averagepool_2d_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=0)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG', count_include_pad=1)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_pads_count_include_pad')", + "summary": "averagepool_2d_pads_count_include_pad" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 7.5, 8, 8.5, 9],\n [9.5, 10, 10.5, 11, 11.5],\n [12, 12.5, 13, 13.5, 14],\n [14.5, 15, 15.5, 16, 16.5],\n [17, 17.5, 18, 18.5, 19]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_pads')", + "summary": "averagepool_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n count_include_pad=1\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[2.5200, 3.6000, 4.8000, 4.0800, 3.2400],\n [4.5600, 6.4000, 8.4000, 7.0400, 5.5200],\n [7.2000, 10.0000, 13.0000, 10.8000, 8.4000],\n [6.9600, 9.6000, 12.4000, 10.2400, 7.9200],\n [6.1200, 8.4000, 10.8000, 8.8800, 6.8400]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_pads_count_include_pad')", + "summary": "averagepool_2d_precomputed_pads_count_include_pad" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[4, 5.5, 7],\n [11.5, 13, 14.5],\n [19, 20.5, 22]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_same_upper')", + "summary": "averagepool_2d_precomputed_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[4, 6],\n [14, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_strides')", + "summary": "averagepool_2d_precomputed_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_LOWER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides, out_shape)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_same_lower')", + "summary": "averagepool_2d_same_lower" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides, out_shape)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_same_upper')", + "summary": "averagepool_2d_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_strides')", + "summary": "averagepool_2d_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0, 0, 0], 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_3d_default')", + "summary": "averagepool_3d_default" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "AveragePool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "Whether include pad pixels when calculating values for the edges. Default is 0, doesn't count include pad.", + "name": "count_include_pad", + "required": false, + "type": "int64" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "AveragePool consumes an input tensor X and applies average pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n average pooling consisting of computing the average on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "\"\"\"\ninput_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2]\nstrides = [1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_1d_default')", + "summary": "averagepool_1d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [6, 7.5],\n [12, 13.5]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_ceil')", + "summary": "averagepool_2d_ceil" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_default')", + "summary": "averagepool_2d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2]\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_pads')", + "summary": "averagepool_2d_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=0)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG', count_include_pad=1)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_pads_count_include_pad')", + "summary": "averagepool_2d_pads_count_include_pad" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 7.5, 8, 8.5, 9],\n [9.5, 10, 10.5, 11, 11.5],\n [12, 12.5, 13, 13.5, 14],\n [14.5, 15, 15.5, 16, 16.5],\n [17, 17.5, 18, 18.5, 19]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_pads')", + "summary": "averagepool_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n count_include_pad=1\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[2.5200, 3.6000, 4.8000, 4.0800, 3.2400],\n [4.5600, 6.4000, 8.4000, 7.0400, 5.5200],\n [7.2000, 10.0000, 13.0000, 10.8000, 8.4000],\n [6.9600, 9.6000, 12.4000, 10.2400, 7.9200],\n [6.1200, 8.4000, 10.8000, 8.8800, 6.8400]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_pads_count_include_pad')", + "summary": "averagepool_2d_precomputed_pads_count_include_pad" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[4, 5.5, 7],\n [11.5, 13, 14.5],\n [19, 20.5, 22]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_same_upper')", + "summary": "averagepool_2d_precomputed_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[4, 6],\n [14, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_strides')", + "summary": "averagepool_2d_precomputed_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_LOWER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides, out_shape)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_same_lower')", + "summary": "averagepool_2d_same_lower" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides, out_shape)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_same_upper')", + "summary": "averagepool_2d_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_strides')", + "summary": "averagepool_2d_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0, 0, 0], 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_3d_default')", + "summary": "averagepool_3d_default" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used", + "name": "Y", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "AveragePool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "Whether to use ceil or floor (default) to compute the output shape.", + "name": "ceil_mode", + "required": false, + "type": "int64" + }, + { + "description": "Whether include pad pixels when calculating values for the edges. Default is 0, doesn't count include pad.", + "name": "count_include_pad", + "required": false, + "type": "int64" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "AveragePool consumes an input tensor X and applies average pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n average pooling consisting of computing the average on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "\"\"\"\ninput_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2]\nstrides = [1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_1d_default')", + "summary": "averagepool_1d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [6, 7.5],\n [12, 13.5]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_ceil')", + "summary": "averagepool_2d_ceil" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_default')", + "summary": "averagepool_2d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2]\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_pads')", + "summary": "averagepool_2d_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=0)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG', count_include_pad=1)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_pads_count_include_pad')", + "summary": "averagepool_2d_pads_count_include_pad" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 7.5, 8, 8.5, 9],\n [9.5, 10, 10.5, 11, 11.5],\n [12, 12.5, 13, 13.5, 14],\n [14.5, 15, 15.5, 16, 16.5],\n [17, 17.5, 18, 18.5, 19]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_pads')", + "summary": "averagepool_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n count_include_pad=1\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[2.5200, 3.6000, 4.8000, 4.0800, 3.2400],\n [4.5600, 6.4000, 8.4000, 7.0400, 5.5200],\n [7.2000, 10.0000, 13.0000, 10.8000, 8.4000],\n [6.9600, 9.6000, 12.4000, 10.2400, 7.9200],\n [6.1200, 8.4000, 10.8000, 8.8800, 6.8400]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_pads_count_include_pad')", + "summary": "averagepool_2d_precomputed_pads_count_include_pad" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[4, 5.5, 7],\n [11.5, 13, 14.5],\n [19, 20.5, 22]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_same_upper')", + "summary": "averagepool_2d_precomputed_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[4, 6],\n [14, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_strides')", + "summary": "averagepool_2d_precomputed_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_LOWER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides, out_shape)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_same_lower')", + "summary": "averagepool_2d_same_lower" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides, out_shape)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_same_upper')", + "summary": "averagepool_2d_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_strides')", + "summary": "averagepool_2d_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0, 0, 0], 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_3d_default')", + "summary": "averagepool_3d_default" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used", + "name": "Y", + "type": "T" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "AveragePool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "Whether to use ceil or floor (default) to compute the output shape.", + "name": "ceil_mode", + "required": false, + "type": "int64" + }, + { + "description": "Whether include pad pixels when calculating values for the edges. Default is 0, doesn't count include pad.", + "name": "count_include_pad", + "required": false, + "type": "int64" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "AveragePool consumes an input tensor X and applies average pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n average pooling consisting of computing the average on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is divided by the number of elements (exclude pad when attribute count_include_pad is zero).\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "\"\"\"\ninput_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2]\nstrides = [1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_1d_default')", + "summary": "averagepool_1d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [6, 7.5],\n [12, 13.5]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_ceil')", + "summary": "averagepool_2d_ceil" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_default')", + "summary": "averagepool_2d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2]\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_pads')", + "summary": "averagepool_2d_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2],\n count_include_pad=1,\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = 2\npad_top = 2\npad_right = 2\npad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=0)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG', count_include_pad=1)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_pads_count_include_pad')", + "summary": "averagepool_2d_pads_count_include_pad" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 7.5, 8, 8.5, 9],\n [9.5, 10, 10.5, 11, 11.5],\n [12, 12.5, 13, 13.5, 14],\n [14.5, 15, 15.5, 16, 16.5],\n [17, 17.5, 18, 18.5, 19]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_pads')", + "summary": "averagepool_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2],\n count_include_pad=1\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[2.5200, 3.6000, 4.8000, 4.0800, 3.2400],\n [4.5600, 6.4000, 8.4000, 7.0400, 5.5200],\n [7.2000, 10.0000, 13.0000, 10.8000, 8.4000],\n [6.9600, 9.6000, 12.4000, 10.2400, 7.9200],\n [6.1200, 8.4000, 10.8000, 8.8800, 6.8400]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_pads_count_include_pad')", + "summary": "averagepool_2d_precomputed_pads_count_include_pad" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[4, 5.5, 7],\n [11.5, 13, 14.5],\n [19, 20.5, 22]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_same_upper')", + "summary": "averagepool_2d_precomputed_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[4, 6],\n [14, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_precomputed_strides')", + "summary": "averagepool_2d_precomputed_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_LOWER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides, out_shape)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_same_lower')", + "summary": "averagepool_2d_same_lower" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides, out_shape)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_same_upper')", + "summary": "averagepool_2d_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_2d_strides')", + "summary": "averagepool_2d_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'AveragePool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0, 0, 0], 'AVG')\n\nexpect(node, inputs=[x], outputs=[y], name='test_averagepool_3d_default')", + "summary": "averagepool_3d_default" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used", + "name": "Y", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "BatchNormalization", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": true, + "type": "int64[]" + }, + { + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero, default is 1e-5f.", + "name": "epsilon", + "required": false, + "type": "float32" + }, + { + "description": "If set to nonzero, run spatial batch normalization in test mode, default is 0.", + "name": "is_test", + "required": false, + "type": "int64" + }, + { + "default": 0.8999999761581421, + "description": "Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum), default is 0.9f.", + "name": "momentum", + "required": false, + "type": "float32" + }, + { + "default": 1, + "description": "If true, compute the mean and variance across all spatial elements If false, compute the mean and variance across per feature.Default is 1.", + "name": "spatial", + "required": false, + "type": "int64" + } + ], + "category": "Normalization", + "description": "Carries out batch normalization as described in the paper\nhttps://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\nthere are multiple cases for the number of outputs, which we list below:\n\nOutput case #1: Y, mean, var, saved_mean, saved_var (training mode)\nOutput case #2: Y (test mode)\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5): # type: ignore\n dims_x = len(x.shape)\n dim_ones = (1,) * (dims_x - 2)\n s = s.reshape(-1, *dim_ones)\n bias = bias.reshape(-1, *dim_ones)\n mean = mean.reshape(-1, *dim_ones)\n var = var.reshape(-1, *dim_ones)\n return s * (x - mean) / np.sqrt(var + epsilon) + bias\n\n# input size: (1, 2, 1, 3)\nx = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)\ns = np.array([1.0, 1.5]).astype(np.float32)\nbias = np.array([0, 1]).astype(np.float32)\nmean = np.array([0, 3]).astype(np.float32)\nvar = np.array([1, 1.5]).astype(np.float32)\ny = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n)\n\n# output size: (1, 2, 1, 3)\nexpect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_example')\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\nepsilon = 1e-2\ny = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_epsilon')", + "summary": "batchnormalization" + } + ], + "inputs": [ + { + "description": "The input 4-dimensional tensor of shape NCHW.", + "name": "X", + "type": "T" + }, + { + "description": "The scale as a 1-dimensional tensor of size C to be applied to the output.", + "name": "scale", + "type": "T" + }, + { + "description": "The bias as a 1-dimensional tensor of size C to be applied to the output.", + "name": "B", + "type": "T" + }, + { + "description": "The running mean (training) or the estimated mean (testing) as a 1-dimensional tensor of size C.", + "name": "mean", + "type": "T" + }, + { + "description": "The running variance (training) or the estimated variance (testing) as a 1-dimensional tensor of size C.", + "name": "var", + "type": "T" + } + ], + "max_input": 5, + "max_output": 5, + "min_input": 5, + "min_output": 1, + "outputs": [ + { + "description": "The output 4-dimensional tensor of the same shape as X.", + "name": "Y", + "type": "T" + }, + { + "description": "The running mean after the BatchNormalization operator. Must be in-place with the input mean. Should not be used for testing.", + "name": "mean", + "option": "optional", + "type": "T" + }, + { + "description": "The running variance after the BatchNormalization operator. Must be in-place with the input var. Should not be used for testing.", + "name": "var", + "option": "optional", + "type": "T" + }, + { + "description": "Saved mean used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_mean", + "option": "optional", + "type": "T" + }, + { + "description": "Saved variance used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_var", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "1 - 5", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "BatchNormalization", + "schema": { + "attributes": [ + { + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero, default is 1e-5f.", + "name": "epsilon", + "required": false, + "type": "float32" + }, + { + "description": "If set to nonzero, run spatial batch normalization in test mode, default is 0.", + "name": "is_test", + "required": false, + "type": "int64" + }, + { + "default": 0.8999999761581421, + "description": "Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum), default is 0.9f.", + "name": "momentum", + "required": false, + "type": "float32" + }, + { + "default": 1, + "description": "If true, compute the mean and variance across all spatial elements If false, compute the mean and variance across per feature.Default is 1.", + "name": "spatial", + "required": false, + "type": "int64" + } + ], + "category": "Normalization", + "description": "Carries out batch normalization as described in the paper\nhttps://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\nthere are multiple cases for the number of outputs, which we list below:\n\nOutput case #1: Y, mean, var, saved_mean, saved_var (training mode)\nOutput case #2: Y (test mode)\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5): # type: ignore\n dims_x = len(x.shape)\n dim_ones = (1,) * (dims_x - 2)\n s = s.reshape(-1, *dim_ones)\n bias = bias.reshape(-1, *dim_ones)\n mean = mean.reshape(-1, *dim_ones)\n var = var.reshape(-1, *dim_ones)\n return s * (x - mean) / np.sqrt(var + epsilon) + bias\n\n# input size: (1, 2, 1, 3)\nx = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)\ns = np.array([1.0, 1.5]).astype(np.float32)\nbias = np.array([0, 1]).astype(np.float32)\nmean = np.array([0, 3]).astype(np.float32)\nvar = np.array([1, 1.5]).astype(np.float32)\ny = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n)\n\n# output size: (1, 2, 1, 3)\nexpect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_example')\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\nepsilon = 1e-2\ny = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_epsilon')", + "summary": "batchnormalization" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.", + "name": "X", + "type": "T" + }, + { + "description": "The scale as a 1-dimensional tensor of size C to be applied to the output.", + "name": "scale", + "type": "T" + }, + { + "description": "The bias as a 1-dimensional tensor of size C to be applied to the output.", + "name": "B", + "type": "T" + }, + { + "description": "The running mean (training) or the estimated mean (testing) as a 1-dimensional tensor of size C.", + "name": "mean", + "type": "T" + }, + { + "description": "The running variance (training) or the estimated variance (testing) as a 1-dimensional tensor of size C.", + "name": "var", + "type": "T" + } + ], + "max_input": 5, + "max_output": 5, + "min_input": 5, + "min_output": 1, + "outputs": [ + { + "description": "The output tensor of the same shape as X.", + "name": "Y", + "type": "T" + }, + { + "description": "The running mean after the BatchNormalization operator. Must be in-place with the input mean. Should not be used for testing.", + "name": "mean", + "option": "optional", + "type": "T" + }, + { + "description": "The running variance after the BatchNormalization operator. Must be in-place with the input var. Should not be used for testing.", + "name": "var", + "option": "optional", + "type": "T" + }, + { + "description": "Saved mean used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_mean", + "option": "optional", + "type": "T" + }, + { + "description": "Saved variance used during training to speed up gradient computation. Should not be used for testing.", + "name": "saved_var", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "1 - 5", + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "BatchNormalization", + "schema": { + "attributes": [ + { + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero.", + "name": "epsilon", + "required": false, + "type": "float32" + }, + { + "default": 0.8999999761581421, + "description": "Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum).", + "name": "momentum", + "required": false, + "type": "float32" + }, + { + "default": 1, + "description": "If true, compute the mean and variance across per activation. If false, compute the mean and variance across per feature over each mini-batch.", + "name": "spatial", + "required": false, + "type": "int64" + } + ], + "category": "Normalization", + "description": "Carries out batch normalization as described in the paper\n https://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\n there are multiple cases for the number of outputs, which we list below:\n \n Output case #1: Y, mean, var, saved_mean, saved_var (training mode)\n Output case #2: Y (test mode)\n This operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5): # type: ignore\n dims_x = len(x.shape)\n dim_ones = (1,) * (dims_x - 2)\n s = s.reshape(-1, *dim_ones)\n bias = bias.reshape(-1, *dim_ones)\n mean = mean.reshape(-1, *dim_ones)\n var = var.reshape(-1, *dim_ones)\n return s * (x - mean) / np.sqrt(var + epsilon) + bias\n\n# input size: (1, 2, 1, 3)\nx = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)\ns = np.array([1.0, 1.5]).astype(np.float32)\nbias = np.array([0, 1]).astype(np.float32)\nmean = np.array([0, 3]).astype(np.float32)\nvar = np.array([1, 1.5]).astype(np.float32)\ny = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n)\n\n# output size: (1, 2, 1, 3)\nexpect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_example')\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\nepsilon = 1e-2\ny = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_epsilon')", + "summary": "batchnormalization" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.", + "name": "X", + "type": "T" + }, + { + "description": "If spatial is true, the dimension of scale is (C). If spatial is false, the dimensions of scale are (C x D1 x ... x Dn)", + "name": "scale", + "type": "T" + }, + { + "description": "If spatial is true, the dimension of bias is (C). If spatial is false, the dimensions of bias are (C x D1 x ... x Dn)", + "name": "B", + "type": "T" + }, + { + "description": "If spatial is true, the dimension of the running mean (training) or the estimated mean (testing) is (C). If spatial is false, the dimensions of the running mean (training) or the estimated mean (testing) are (C x D1 x ... x Dn).", + "name": "mean", + "type": "T" + }, + { + "description": "If spatial is true, the dimension of the running variance(training) or the estimated variance (testing) is (C). If spatial is false, the dimensions of the running variance(training) or the estimated variance (testing) are (C x D1 x ... x Dn).", + "name": "var", + "type": "T" + } + ], + "max_input": 5, + "max_output": 5, + "min_input": 5, + "min_output": 1, + "outputs": [ + { + "description": "The output tensor of the same shape as X", + "name": "Y", + "type": "T" + }, + { + "description": "The running mean after the BatchNormalization operator.", + "name": "mean", + "option": "optional", + "type": "T" + }, + { + "description": "The running variance after the BatchNormalization operator.", + "name": "var", + "option": "optional", + "type": "T" + }, + { + "description": "Saved mean used during training to speed up gradient computation.", + "name": "saved_mean", + "option": "optional", + "type": "T" + }, + { + "description": "Saved variance used during training to speed up gradient computation.", + "name": "saved_var", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "1 - 5", + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "BatchNormalization", + "schema": { + "attributes": [ + { + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero.", + "name": "epsilon", + "required": false, + "type": "float32" + }, + { + "default": 0.8999999761581421, + "description": "Factor used in computing the running mean and variance.e.g., running_mean = running_mean * momentum + mean * (1 - momentum).", + "name": "momentum", + "required": false, + "type": "float32" + } + ], + "category": "Normalization", + "description": "Carries out batch normalization as described in the paper\nhttps://arxiv.org/abs/1502.03167. Depending on the mode it is being run,\nthere are multiple cases for the number of outputs, which we list below:\n\nOutput case #1: Y, mean, var, saved_mean, saved_var (training mode)\nOutput case #2: Y (test mode)\n\nFor previous (depreciated) non-spatial cases, implementors are suggested\nto flatten the input shape to (N x C*D1*D2 ..*Dn) before a BatchNormalization Op.\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "def _batchnorm_test_mode(x, s, bias, mean, var, epsilon=1e-5): # type: ignore\n dims_x = len(x.shape)\n dim_ones = (1,) * (dims_x - 2)\n s = s.reshape(-1, *dim_ones)\n bias = bias.reshape(-1, *dim_ones)\n mean = mean.reshape(-1, *dim_ones)\n var = var.reshape(-1, *dim_ones)\n return s * (x - mean) / np.sqrt(var + epsilon) + bias\n\n# input size: (1, 2, 1, 3)\nx = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)\ns = np.array([1.0, 1.5]).astype(np.float32)\nbias = np.array([0, 1]).astype(np.float32)\nmean = np.array([0, 3]).astype(np.float32)\nvar = np.array([1, 1.5]).astype(np.float32)\ny = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n)\n\n# output size: (1, 2, 1, 3)\nexpect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_example')\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nmean = np.random.randn(3).astype(np.float32)\nvar = np.random.rand(3).astype(np.float32)\nepsilon = 1e-2\ny = _batchnorm_test_mode(x, s, bias, mean, var, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'BatchNormalization',\n inputs=['x', 's', 'bias', 'mean', 'var'],\n outputs=['y'],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(node, inputs=[x, s, bias, mean, var], outputs=[y],\n name='test_batchnorm_epsilon')", + "summary": "batchnormalization" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size, C is the number of channels. Statistics are computed for every channel of C over N and D1 to Dn dimensions. For image data, input dimensions become (N x C x H x W). The op also accepts single dimension input of size N in which case C is assumed to be 1", + "name": "X", + "type": "T" + }, + { + "description": "Scale tensor of shape (C).", + "name": "scale", + "type": "T" + }, + { + "description": "Bias tensor of shape (C).", + "name": "B", + "type": "T" + }, + { + "description": "running (training) or estimated (testing) mean tensor of shape (C).", + "name": "mean", + "type": "T" + }, + { + "description": "running (training) or estimated (testing) variance tensor of shape (C).", + "name": "var", + "type": "T" + } + ], + "max_input": 5, + "max_output": 5, + "min_input": 5, + "min_output": 1, + "outputs": [ + { + "description": "The output tensor of the same shape as X", + "name": "Y", + "type": "T" + }, + { + "description": "The running mean after the BatchNormalization operator.", + "name": "mean", + "option": "optional", + "type": "T" + }, + { + "description": "The running variance after the BatchNormalization operator.", + "name": "var", + "option": "optional", + "type": "T" + }, + { + "description": "Saved mean used during training to speed up gradient computation.", + "name": "saved_mean", + "option": "optional", + "type": "T" + }, + { + "description": "Saved variance used during training to speed up gradient computation.", + "name": "saved_var", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "1 - 5", + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Binarizer", + "schema": { + "attributes": [ + { + "description": "Values greater than this are mapped to 1, others to 0.", + "name": "threshold", + "required": false, + "type": "float32" + } + ], + "description": "Maps the values of the input tensor to either 0 or 1, element-wise, based on the outcome of a comparison against a threshold value.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Data to be binarized", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Binarized output data", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ], + "description": "The input must be a tensor of a numeric type. The output will be of the same tensor type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "BitShift", + "schema": { + "attributes": [ + { + "description": "Direction of moving bits. It can be either \"RIGHT\" (for right shift) or \"LEFT\" (for left shift).", + "name": "direction", + "required": true, + "type": "string" + } + ], + "description": "Bitwise shift operator performs element-wise operation. For each input element, if the\n attribute \"direction\" is \"RIGHT\", this operator moves its binary representation toward\n the right side so that the input value is effectively decreased. If the attribute \"direction\"\n is \"LEFT\", bits of binary representation moves toward the left side, which results the\n increase of its actual value. The input X is the tensor to be shifted and another input\n Y specifies the amounts of shifting. For example, if \"direction\" is \"Right\", X is [1, 4],\n and S is [1, 1], the corresponding output Z would be [0, 2]. If \"direction\" is \"LEFT\" with\n X=[1, 2] and S=[1, 2], the corresponding output Y would be [2, 8].\n \n Because this operator supports Numpy-style broadcasting, X's and Y's shapes are\n not necessarily identical.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'BitShift',\n inputs=['x', 'y'],\n outputs=['z'],\n direction=\"LEFT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint16)\ny = np.array([1, 2, 3]).astype(np.uint16)\nz = x << y # expected output [32, 16, 8]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_bitshift_left_uint16')", + "summary": "left_unit16" + }, + { + "code": "node = onnx.helper.make_node(\n 'BitShift',\n inputs=['x', 'y'],\n outputs=['z'],\n direction=\"LEFT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint32)\ny = np.array([1, 2, 3]).astype(np.uint32)\nz = x << y # expected output [32, 16, 8]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_bitshift_left_uint32')", + "summary": "left_unit32" + }, + { + "code": "node = onnx.helper.make_node(\n 'BitShift',\n inputs=['x', 'y'],\n outputs=['z'],\n direction=\"LEFT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint64)\ny = np.array([1, 2, 3]).astype(np.uint64)\nz = x << y # expected output [32, 16, 8]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_bitshift_left_uint64')", + "summary": "left_unit64" + }, + { + "code": "node = onnx.helper.make_node(\n 'BitShift',\n inputs=['x', 'y'],\n outputs=['z'],\n direction=\"LEFT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint8)\ny = np.array([1, 2, 3]).astype(np.uint8)\nz = x << y # expected output [32, 16, 8]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_bitshift_left_uint8')", + "summary": "left_unit8" + }, + { + "code": "node = onnx.helper.make_node(\n 'BitShift',\n inputs=['x', 'y'],\n outputs=['z'],\n direction=\"RIGHT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint16)\ny = np.array([1, 2, 3]).astype(np.uint16)\nz = x >> y # expected output [8, 1, 0]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_bitshift_right_uint16')", + "summary": "right_unit16" + }, + { + "code": "node = onnx.helper.make_node(\n 'BitShift',\n inputs=['x', 'y'],\n outputs=['z'],\n direction=\"RIGHT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint32)\ny = np.array([1, 2, 3]).astype(np.uint32)\nz = x >> y # expected output [8, 1, 0]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_bitshift_right_uint32')", + "summary": "right_unit32" + }, + { + "code": "node = onnx.helper.make_node(\n 'BitShift',\n inputs=['x', 'y'],\n outputs=['z'],\n direction=\"RIGHT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint64)\ny = np.array([1, 2, 3]).astype(np.uint64)\nz = x >> y # expected output [8, 1, 0]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_bitshift_right_uint64')", + "summary": "right_unit64" + }, + { + "code": "node = onnx.helper.make_node(\n 'BitShift',\n inputs=['x', 'y'],\n outputs=['z'],\n direction=\"RIGHT\"\n)\n\nx = np.array([16, 4, 1]).astype(np.uint8)\ny = np.array([1, 2, 3]).astype(np.uint8)\nz = x >> y # expected output [8, 1, 0]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_bitshift_right_uint8')", + "summary": "right_unit8" + } + ], + "inputs": [ + { + "description": "First operand, input to be shifted.", + "name": "X", + "type": "T" + }, + { + "description": "Second operand, amounts of shift.", + "name": "Y", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Z", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)" + ], + "description": "Constrain input and output types to integer tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Cast", + "schema": { + "attributes": [ + { + "description": "The data type to which the elements of the input tensor are cast. Strictly must be one of the types from DataType enum in TensorProto", + "name": "to", + "required": true, + "type": "string" + } + ], + "description": "The operator casts the elements of a given input tensor to a data type\nspecified by the 'to' argument and returns an output tensor of the same size in\nthe converted type. The 'to' argument must be one of the data types specified\nin the 'DataType' enum field in the TensorProto message.\nNOTE: Casting to and from strings is not supported yet.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = (3, 4)\ntest_cases = [\n ('FLOAT', 'FLOAT16'),\n ('FLOAT', 'DOUBLE'),\n ('FLOAT16', 'FLOAT'),\n ('FLOAT16', 'DOUBLE'),\n ('DOUBLE', 'FLOAT'),\n ('DOUBLE', 'FLOAT16'),\n ('FLOAT', 'STRING'),\n ('STRING', 'FLOAT'),\n]\n\nfor from_type, to_type in test_cases:\n if 'STRING' != from_type:\n input = np.random.random_sample(shape).astype(\n TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, from_type)])\n if ('STRING' == to_type):\n # Converting input to str, then give it np.object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode('utf-8')\n su = s.decode('utf-8')\n ss.append(su)\n\n output = np.array(ss).astype(np.object).reshape([3, 4])\n else:\n output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])\n else:\n input = np.array([u'0.47892547', u'0.48033667', u'0.49968487', u'0.81910545',\n u'0.47031248', u'0.816468', u'0.21087195', u'0.7229038',\n u'NaN', u'INF', u'+INF', u'-INF'], dtype=np.dtype(np.object)).reshape([3, 4])\n output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])\n node = onnx.helper.make_node(\n 'Cast',\n inputs=['input'],\n outputs=['output'],\n to=getattr(TensorProto, to_type),\n )\n expect(node, inputs=[input], outputs=[output],\n name='test_cast_' + from_type + '_to_' + to_type)", + "summary": "cast" + } + ], + "inputs": [ + { + "description": "Input tensor to be cast.", + "name": "input", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor with the same shape as input with type specified by the 'to' argument", + "name": "output", + "type": "T2" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ], + "description": "Constrain input types. Casting from strings and complex are not supported.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ], + "description": "Constrain output types. Casting to strings and complex are not supported.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "Cast", + "schema": { + "attributes": [ + { + "description": "The data type to which the elements of the input tensor are cast. Strictly must be one of the types from DataType enum in TensorProto", + "name": "to", + "required": true, + "type": "int64" + } + ], + "description": "The operator casts the elements of a given input tensor to a data type\nspecified by the 'to' argument and returns an output tensor of the same size in\nthe converted type. The 'to' argument must be one of the data types specified\nin the 'DataType' enum field in the TensorProto message.\nNOTE: Casting to and from strings is not supported yet.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = (3, 4)\ntest_cases = [\n ('FLOAT', 'FLOAT16'),\n ('FLOAT', 'DOUBLE'),\n ('FLOAT16', 'FLOAT'),\n ('FLOAT16', 'DOUBLE'),\n ('DOUBLE', 'FLOAT'),\n ('DOUBLE', 'FLOAT16'),\n ('FLOAT', 'STRING'),\n ('STRING', 'FLOAT'),\n]\n\nfor from_type, to_type in test_cases:\n if 'STRING' != from_type:\n input = np.random.random_sample(shape).astype(\n TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, from_type)])\n if ('STRING' == to_type):\n # Converting input to str, then give it np.object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode('utf-8')\n su = s.decode('utf-8')\n ss.append(su)\n\n output = np.array(ss).astype(np.object).reshape([3, 4])\n else:\n output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])\n else:\n input = np.array([u'0.47892547', u'0.48033667', u'0.49968487', u'0.81910545',\n u'0.47031248', u'0.816468', u'0.21087195', u'0.7229038',\n u'NaN', u'INF', u'+INF', u'-INF'], dtype=np.dtype(np.object)).reshape([3, 4])\n output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])\n node = onnx.helper.make_node(\n 'Cast',\n inputs=['input'],\n outputs=['output'],\n to=getattr(TensorProto, to_type),\n )\n expect(node, inputs=[input], outputs=[output],\n name='test_cast_' + from_type + '_to_' + to_type)", + "summary": "cast" + } + ], + "inputs": [ + { + "description": "Input tensor to be cast.", + "name": "input", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor with the same shape as input with type specified by the 'to' argument", + "name": "output", + "type": "T2" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ], + "description": "Constrain input types. Casting from strings and complex are not supported.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ], + "description": "Constrain output types. Casting to strings and complex are not supported.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "Cast", + "schema": { + "attributes": [ + { + "description": "The data type to which the elements of the input tensor are cast. Strictly must be one of the types from DataType enum in TensorProto", + "name": "to", + "required": true, + "type": "int64" + } + ], + "description": "The operator casts the elements of a given input tensor to a data type\nspecified by the 'to' argument and returns an output tensor of the same size in\nthe converted type. The 'to' argument must be one of the data types specified\nin the 'DataType' enum field in the TensorProto message.\n\nCasting from string tensor in plain (e.g., \"3.14\" and \"1000\") and scientific numeric representations\n(e.g., \"1e-5\" and \"1E8\") to float types is supported. For example, converting string \"100.5\" to an integer may\nresult 100. There are some string literals reserved for special floating-point values;\n\"+INF\" (and \"INF\"), \"-INF\", and \"NaN\" are positive infinity, negative infinity, and not-a-number, respectively.\nAny string which can exactly match \"+INF\" in a case-insensitive way would be mapped to positive infinite. Similarly,\nthis case-insensitive rule is applied to \"INF\" and \"NaN\". When casting from numeric tensors\nto string tensors, plain floating-point representation (such as \"314.15926\") would be used. \nConverting non-numerical-literal string such as \"Hello World!\" is an undefined behavior. Cases \nof converting string representing floating-point arithmetic value, such as \"2.718\", to INT is an undefined behavior.\n\nConversion from a numerical type to any numerical type is always allowed.\nUser must be aware of precision loss and value change caused by range difference between two types.\nFor example, a 64-bit float 3.1415926459 may be round to a 32-bit float 3.141592. Similarly, converting\nan integer 36 to Boolean may produce 1 because we truncate bits which can't be stored in the targeted type.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = (3, 4)\ntest_cases = [\n ('FLOAT', 'FLOAT16'),\n ('FLOAT', 'DOUBLE'),\n ('FLOAT16', 'FLOAT'),\n ('FLOAT16', 'DOUBLE'),\n ('DOUBLE', 'FLOAT'),\n ('DOUBLE', 'FLOAT16'),\n ('FLOAT', 'STRING'),\n ('STRING', 'FLOAT'),\n]\n\nfor from_type, to_type in test_cases:\n if 'STRING' != from_type:\n input = np.random.random_sample(shape).astype(\n TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, from_type)])\n if ('STRING' == to_type):\n # Converting input to str, then give it np.object dtype for generating script\n ss = []\n for i in input.flatten():\n s = str(i).encode('utf-8')\n su = s.decode('utf-8')\n ss.append(su)\n\n output = np.array(ss).astype(np.object).reshape([3, 4])\n else:\n output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])\n else:\n input = np.array([u'0.47892547', u'0.48033667', u'0.49968487', u'0.81910545',\n u'0.47031248', u'0.816468', u'0.21087195', u'0.7229038',\n u'NaN', u'INF', u'+INF', u'-INF'], dtype=np.dtype(np.object)).reshape([3, 4])\n output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)])\n node = onnx.helper.make_node(\n 'Cast',\n inputs=['input'],\n outputs=['output'],\n to=getattr(TensorProto, to_type),\n )\n expect(node, inputs=[input], outputs=[output],\n name='test_cast_' + from_type + '_to_' + to_type)", + "summary": "cast" + } + ], + "inputs": [ + { + "description": "Input tensor to be cast.", + "name": "input", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor with the same shape as input with type specified by the 'to' argument", + "name": "output", + "type": "T2" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)" + ], + "description": "Constrain input types. Casting from complex is not supported.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)", + "tensor(string)" + ], + "description": "Constrain output types. Casting to complex is not supported.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "CastMap", + "schema": { + "attributes": [ + { + "default": "TO_FLOAT", + "description": "A string indicating the desired element type of the output tensor, one of 'TO_FLOAT', 'TO_STRING', 'TO_INT64'.", + "name": "cast_to", + "required": false, + "type": "string" + }, + { + "default": "DENSE", + "description": "Indicates whether to only output as many values as are in the input (dense), or position the input based on using the key of the map as the index of the output (sparse).
    One of 'DENSE', 'SPARSE'.", + "name": "map_form", + "required": false, + "type": "string" + }, + { + "default": 1, + "description": "If the value of map_form is 'SPARSE,' this attribute indicates the total length of the output tensor.", + "name": "max_map", + "required": false, + "type": "int64" + } + ], + "description": "Converts a map to a tensor.
    The map key must be an int64 and the values will be ordered\n in ascending order based on this key.
    The operator supports dense packing or sparse packing.\n If using sparse packing, the key cannot exceed the max_map-1 value.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "The input map that is to be cast to a tensor", + "name": "X", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "A tensor representing the same data as the input map, ordered by their keys", + "name": "Y", + "type": "T2" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "map(int64, string)", + "map(int64, float)" + ], + "description": "The input must be an integer map to either string or float.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(float)", + "tensor(int64)" + ], + "description": "The output is a 1-D tensor of string, float, or integer.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "CategoryMapper", + "schema": { + "attributes": [ + { + "description": "The integers of the map. This sequence must be the same length as the 'cats_strings' sequence.", + "name": "cats_int64s", + "required": false, + "type": "int64[]" + }, + { + "description": "The strings of the map. This sequence must be the same length as the 'cats_int64s' sequence", + "name": "cats_strings", + "required": false, + "type": "string[]" + }, + { + "default": -1, + "description": "An integer to use when an input string value is not found in the map.
    One and only one of the 'default_*' attributes must be defined.", + "name": "default_int64", + "required": false, + "type": "int64" + }, + { + "default": "_Unused", + "description": "A string to use when an input integer value is not found in the map.
    One and only one of the 'default_*' attributes must be defined.", + "name": "default_string", + "required": false, + "type": "string" + } + ], + "description": "Converts strings to integers and vice versa.
    \n Two sequences of equal length are used to map between integers and strings,\n with strings and integers at the same index detailing the mapping.
    \n Each operator converts either integers to strings or strings to integers, depending \n on which default value attribute is provided. Only one default value attribute\n should be defined.
    \n If the string default value is set, it will convert integers to strings.\n If the int default value is set, it will convert strings to integers.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Input data", + "name": "X", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data. If strings are input, the output values are integers, and vice versa.", + "name": "Y", + "type": "T2" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ], + "description": "The input must be a tensor of strings or integers, either [N,C] or [C].", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ], + "description": "The output is a tensor of strings or integers. Its shape will be the same as the input shape.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "Ceil", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Ceil takes one input data (Tensor) and produces one output data\n(Tensor) where the ceil is, y = ceil(x), is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Ceil',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1.5, 1.2]).astype(np.float32)\ny = np.ceil(x) # expected output [-1., 2.]\nexpect(node, inputs=[x], outputs=[y],\n name='test_ceil_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.ceil(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_ceil')", + "summary": "ceil" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Ceil", + "schema": { + "description": "Ceil takes one input data (Tensor) and produces one output data\n(Tensor) where the ceil is, y = ceil(x), is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Ceil',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1.5, 1.2]).astype(np.float32)\ny = np.ceil(x) # expected output [-1., 2.]\nexpect(node, inputs=[x], outputs=[y],\n name='test_ceil_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.ceil(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_ceil')", + "summary": "ceil" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Celu", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "The Alpha value in Celu formula which control the shape of the unit. The default value is 1.0.", + "name": "alpha", + "required": false, + "type": "float32" + } + ], + "description": "Continuously Differentiable Exponential Linear Units:\nPerform the linear unit element-wise on the input tensor X\nusing formula: \n\n```\nmax(0,x) + min(0,alpha*(exp(x/alpha)-1))\n```\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "alpha = 2.0\nnode = onnx.helper.make_node(\n 'Celu',\n inputs=['X'],\n outputs=['Y'],\n alpha=alpha,\n)\n\ninput_data = np.array([[[[0.8439683], [0.5665144], [0.05836735]],\n [[0.02916367], [0.12964272], [0.5060197]],\n [[0.79538304], [0.9411346], [0.9546573]]],\n [[[0.17730942], [0.46192095], [0.26480448]],\n [[0.6746842], [0.01665257], [0.62473077]],\n [[0.9240844], [0.9722341], [0.11965699]]],\n [[[0.41356155], [0.9129373], [0.59330076]],\n [[0.81929934], [0.7862604], [0.11799799]],\n [[0.69248444], [0.54119414], [0.07513223]]]], dtype=np.float32)\n\n# Calculate expected output data\npositive_input = np.maximum(0, input_data)\nnegative_input = np.minimum(0, alpha * (np.exp(input_data / alpha) - 1))\nexpected_output = positive_input + negative_input\n\nexpect(node, inputs=[input_data], outputs=[expected_output],\n name='test_celu')", + "summary": "celu" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)" + ], + "description": "Constrain input and output types to float32 tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Clip", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + }, + { + "description": "Maximum value, above which element is replaced by max", + "name": "max", + "required": false, + "type": "float32" + }, + { + "description": "Minimum value, under which element is replaced by min", + "name": "min", + "required": false, + "type": "float32" + } + ], + "description": "Clip operator limits the given input within an interval. The interval is\nspecified with arguments 'min' and 'max'. They default to\nnumeric_limits::lowest() and numeric_limits::max() respectively.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min', 'max'],\n outputs=['y'],\n)\n\nx = np.array([-2, 0, 2]).astype(np.float32)\nmin_val = np.float32(-1)\nmax_val = np.float32(1)\ny = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.]\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, max_val)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip')\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min', 'max'],\n outputs=['y'],\n)\n\nmin_val = np.float32(-5)\nmax_val = np.float32(5)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_inbounds')\n\nx = np.array([-6, 0, 6]).astype(np.float32)\ny = np.array([-5, 0, 5]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_outbounds')\n\nx = np.array([-1, 0, 6]).astype(np.float32)\ny = np.array([-1, 0, 5]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_splitbounds')", + "summary": "clip" + }, + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min'],\n outputs=['y'],\n)\nmin_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, np.inf)\nexpect(node, inputs=[x, min_val], outputs=[y],\n name='test_clip_default_min')\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, 'max'],\n outputs=['y'],\n)\nmax_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, -np.inf, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y],\n name='test_clip_default_max')\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, no_max],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_clip_default_inbounds')", + "summary": "clip_default" + }, + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min'],\n outputs=['y'],\n)\nmin_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, min_val, np.iinfo(np.int8).max)\nexpect(node, inputs=[x, min_val], outputs=[y],\n name='test_clip_default_int8_min')\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, 'max'],\n outputs=['y'],\n)\nmax_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, np.iinfo(np.int8).min, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y],\n name='test_clip_default_int8_max')\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, no_max],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.int8)\ny = np.array([-1, 0, 1]).astype(np.int8)\nexpect(node, inputs=[x], outputs=[y],\n name='test_clip_default_int8_inbounds')", + "summary": "clip_default_int8" + } + ], + "inputs": [ + { + "description": "Input tensor whose elements to be clipped", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor with clipped input elements", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Clip", + "schema": { + "attributes": [ + { + "default": 3.4028234663852886e+38, + "description": "Maximum value, above which element is replaced by max", + "name": "max", + "required": false, + "type": "float32" + }, + { + "default": -3.4028234663852886e+38, + "description": "Minimum value, under which element is replaced by min", + "name": "min", + "required": false, + "type": "float32" + } + ], + "description": "Clip operator limits the given input within an interval. The interval is\nspecified with arguments 'min' and 'max'. They default to\nnumeric_limits::lowest() and numeric_limits::max() respectively.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min', 'max'],\n outputs=['y'],\n)\n\nx = np.array([-2, 0, 2]).astype(np.float32)\nmin_val = np.float32(-1)\nmax_val = np.float32(1)\ny = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.]\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, max_val)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip')\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min', 'max'],\n outputs=['y'],\n)\n\nmin_val = np.float32(-5)\nmax_val = np.float32(5)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_inbounds')\n\nx = np.array([-6, 0, 6]).astype(np.float32)\ny = np.array([-5, 0, 5]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_outbounds')\n\nx = np.array([-1, 0, 6]).astype(np.float32)\ny = np.array([-1, 0, 5]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_splitbounds')", + "summary": "clip" + }, + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min'],\n outputs=['y'],\n)\nmin_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, np.inf)\nexpect(node, inputs=[x, min_val], outputs=[y],\n name='test_clip_default_min')\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, 'max'],\n outputs=['y'],\n)\nmax_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, -np.inf, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y],\n name='test_clip_default_max')\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, no_max],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_clip_default_inbounds')", + "summary": "clip_default" + }, + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min'],\n outputs=['y'],\n)\nmin_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, min_val, np.iinfo(np.int8).max)\nexpect(node, inputs=[x, min_val], outputs=[y],\n name='test_clip_default_int8_min')\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, 'max'],\n outputs=['y'],\n)\nmax_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, np.iinfo(np.int8).min, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y],\n name='test_clip_default_int8_max')\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, no_max],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.int8)\ny = np.array([-1, 0, 1]).astype(np.int8)\nexpect(node, inputs=[x], outputs=[y],\n name='test_clip_default_int8_inbounds')", + "summary": "clip_default_int8" + } + ], + "inputs": [ + { + "description": "Input tensor whose elements to be clipped", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor with clipped input elements", + "name": "output", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Clip", + "schema": { + "description": "Clip operator limits the given input within an interval. The interval is\nspecified by the inputs 'min' and 'max'. They default to\nnumeric_limits::lowest() and numeric_limits::max(), respectively.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min', 'max'],\n outputs=['y'],\n)\n\nx = np.array([-2, 0, 2]).astype(np.float32)\nmin_val = np.float32(-1)\nmax_val = np.float32(1)\ny = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.]\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, max_val)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip')\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min', 'max'],\n outputs=['y'],\n)\n\nmin_val = np.float32(-5)\nmax_val = np.float32(5)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_inbounds')\n\nx = np.array([-6, 0, 6]).astype(np.float32)\ny = np.array([-5, 0, 5]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_outbounds')\n\nx = np.array([-1, 0, 6]).astype(np.float32)\ny = np.array([-1, 0, 5]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_splitbounds')", + "summary": "clip" + }, + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min'],\n outputs=['y'],\n)\nmin_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, np.inf)\nexpect(node, inputs=[x, min_val], outputs=[y],\n name='test_clip_default_min')\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, 'max'],\n outputs=['y'],\n)\nmax_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, -np.inf, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y],\n name='test_clip_default_max')\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, no_max],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_clip_default_inbounds')", + "summary": "clip_default" + }, + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min'],\n outputs=['y'],\n)\nmin_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, min_val, np.iinfo(np.int8).max)\nexpect(node, inputs=[x, min_val], outputs=[y],\n name='test_clip_default_int8_min')\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, 'max'],\n outputs=['y'],\n)\nmax_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, np.iinfo(np.int8).min, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y],\n name='test_clip_default_int8_max')\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, no_max],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.int8)\ny = np.array([-1, 0, 1]).astype(np.int8)\nexpect(node, inputs=[x], outputs=[y],\n name='test_clip_default_int8_inbounds')", + "summary": "clip_default_int8" + } + ], + "inputs": [ + { + "description": "Input tensor whose elements to be clipped", + "name": "input", + "type": "T" + }, + { + "description": "Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).", + "name": "min", + "option": "optional", + "type": "T" + }, + { + "description": "Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).", + "name": "max", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "1 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor with clipped input elements", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Clip", + "schema": { + "description": "Clip operator limits the given input within an interval. The interval is\nspecified by the inputs 'min' and 'max'. They default to\nnumeric_limits::lowest() and numeric_limits::max(), respectively.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min', 'max'],\n outputs=['y'],\n)\n\nx = np.array([-2, 0, 2]).astype(np.float32)\nmin_val = np.float32(-1)\nmax_val = np.float32(1)\ny = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.]\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, max_val)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip')\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min', 'max'],\n outputs=['y'],\n)\n\nmin_val = np.float32(-5)\nmax_val = np.float32(5)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_inbounds')\n\nx = np.array([-6, 0, 6]).astype(np.float32)\ny = np.array([-5, 0, 5]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_outbounds')\n\nx = np.array([-1, 0, 6]).astype(np.float32)\ny = np.array([-1, 0, 5]).astype(np.float32)\nexpect(node, inputs=[x, min_val, max_val], outputs=[y],\n name='test_clip_splitbounds')", + "summary": "clip" + }, + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min'],\n outputs=['y'],\n)\nmin_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, min_val, np.inf)\nexpect(node, inputs=[x, min_val], outputs=[y],\n name='test_clip_default_min')\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, 'max'],\n outputs=['y'],\n)\nmax_val = np.float32(0)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, -np.inf, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y],\n name='test_clip_default_max')\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, no_max],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-1, 0, 1]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_clip_default_inbounds')", + "summary": "clip_default" + }, + { + "code": "node = onnx.helper.make_node(\n 'Clip',\n inputs=['x', 'min'],\n outputs=['y'],\n)\nmin_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, min_val, np.iinfo(np.int8).max)\nexpect(node, inputs=[x, min_val], outputs=[y],\n name='test_clip_default_int8_min')\n\nno_min = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, 'max'],\n outputs=['y'],\n)\nmax_val = np.int8(0)\nx = np.random.randn(3, 4, 5).astype(np.int8)\ny = np.clip(x, np.iinfo(np.int8).min, max_val)\nexpect(node, inputs=[x, max_val], outputs=[y],\n name='test_clip_default_int8_max')\n\nno_max = \"\" # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Clip',\n inputs=['x', no_min, no_max],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.int8)\ny = np.array([-1, 0, 1]).astype(np.int8)\nexpect(node, inputs=[x], outputs=[y],\n name='test_clip_default_int8_inbounds')", + "summary": "clip_default_int8" + } + ], + "inputs": [ + { + "description": "Input tensor whose elements to be clipped", + "name": "input", + "type": "T" + }, + { + "description": "Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape).", + "name": "min", + "option": "optional", + "type": "T" + }, + { + "description": "Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape).", + "name": "max", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "1 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor with clipped input elements", + "name": "output", + "type": "T" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Compress", + "schema": { + "attributes": [ + { + "description": "(Optional) Axis along which to take slices. If not specified, input is flattened before elements being selected.", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "description": "Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index.\n In case axis is not provided, input is flattened before elements are selected.\n Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Compress',\n inputs=['input', 'condition'],\n outputs=['output'],\n axis=0,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1, 1])\noutput = np.compress(condition, input, axis=0)\n#print(output)\n#[[ 3. 4.]\n# [ 5. 6.]]\n\nexpect(node, inputs=[input, condition.astype(np.bool)], outputs=[output],\n name='test_compress_0')", + "summary": "compress_0" + }, + { + "code": "node = onnx.helper.make_node(\n 'Compress',\n inputs=['input', 'condition'],\n outputs=['output'],\n axis=1,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1])\noutput = np.compress(condition, input, axis=1)\n#print(output)\n#[[ 2.]\n# [ 4.]\n# [ 6.]]\n\nexpect(node, inputs=[input, condition.astype(np.bool)], outputs=[output],\n name='test_compress_1')", + "summary": "compress_1" + }, + { + "code": "node = onnx.helper.make_node(\n 'Compress',\n inputs=['input', 'condition'],\n outputs=['output'],\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1, 0, 0, 1])\noutput = np.compress(condition, input)\n#print(output)\n#[ 2., 5.]\n\nexpect(node, inputs=[input, condition.astype(np.bool)], outputs=[output],\n name='test_compress_default_axis')", + "summary": "compress_default_axis" + }, + { + "code": "node = onnx.helper.make_node(\n 'Compress',\n inputs=['input', 'condition'],\n outputs=['output'],\n axis=-1,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1])\noutput = np.compress(condition, input, axis=-1)\n# print(output)\n#[[ 2.]\n# [ 4.]\n# [ 6.]]\nexpect(node, inputs=[input, condition.astype(np.bool)], outputs=[output],\n name='test_compress_negative_axis')", + "summary": "compress_negative_axis" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "input", + "type": "T" + }, + { + "description": "Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length alone the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded.", + "name": "condition", + "type": "T1" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank r if axis is specified. Otherwise output is a Tensor of rank 1.", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains to boolean tensors.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Compress", + "schema": { + "attributes": [ + { + "description": "(Optional) Axis along which to take slices. If not specified, input is flattened before elements being selected. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "description": "Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index.\n In case axis is not provided, input is flattened before elements are selected.\n Compress behaves like numpy.compress: https://docs.scipy.org/doc/numpy/reference/generated/numpy.compress.html\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Compress',\n inputs=['input', 'condition'],\n outputs=['output'],\n axis=0,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1, 1])\noutput = np.compress(condition, input, axis=0)\n#print(output)\n#[[ 3. 4.]\n# [ 5. 6.]]\n\nexpect(node, inputs=[input, condition.astype(np.bool)], outputs=[output],\n name='test_compress_0')", + "summary": "compress_0" + }, + { + "code": "node = onnx.helper.make_node(\n 'Compress',\n inputs=['input', 'condition'],\n outputs=['output'],\n axis=1,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1])\noutput = np.compress(condition, input, axis=1)\n#print(output)\n#[[ 2.]\n# [ 4.]\n# [ 6.]]\n\nexpect(node, inputs=[input, condition.astype(np.bool)], outputs=[output],\n name='test_compress_1')", + "summary": "compress_1" + }, + { + "code": "node = onnx.helper.make_node(\n 'Compress',\n inputs=['input', 'condition'],\n outputs=['output'],\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1, 0, 0, 1])\noutput = np.compress(condition, input)\n#print(output)\n#[ 2., 5.]\n\nexpect(node, inputs=[input, condition.astype(np.bool)], outputs=[output],\n name='test_compress_default_axis')", + "summary": "compress_default_axis" + }, + { + "code": "node = onnx.helper.make_node(\n 'Compress',\n inputs=['input', 'condition'],\n outputs=['output'],\n axis=-1,\n)\ninput = np.array([[1, 2], [3, 4], [5, 6]]).astype(np.float32)\ncondition = np.array([0, 1])\noutput = np.compress(condition, input, axis=-1)\n# print(output)\n#[[ 2.]\n# [ 4.]\n# [ 6.]]\nexpect(node, inputs=[input, condition.astype(np.bool)], outputs=[output],\n name='test_compress_negative_axis')", + "summary": "compress_negative_axis" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "input", + "type": "T" + }, + { + "description": "Rank 1 tensor of booleans to indicate which slices or data elements to be selected. Its length can be less than the input length along the axis or the flattened input size if axis is not specified. In such cases data slices or elements exceeding the condition length are discarded.", + "name": "condition", + "type": "T1" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank r if axis is specified. Otherwise output is a Tensor of rank 1.", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains to boolean tensors.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Concat", + "schema": { + "attributes": [ + { + "description": "Which axis to concat on. Default value is 1.", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "category": "Tensor", + "description": "Concatenate a list of tensors into a single tensor", + "domain": "ai.onnx", + "examples": [ + { + "code": "test_cases = {\n '1d': ([1, 2],\n [3, 4]),\n '2d': ([[1, 2], [3, 4]],\n [[5, 6], [7, 8]]),\n '3d': ([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\n [[[9, 10], [11, 12]], [[13, 14], [15, 16]]])\n} # type: Dict[Text, Sequence[Any]]\n\nfor test_case, values_ in test_cases.items():\n values = [np.asarray(v, dtype=np.float32) for v in values_]\n for i in range(len(values[0].shape)):\n in_args = ['value' + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n 'Concat',\n inputs=[s for s in in_args],\n outputs=['output'],\n axis=i\n )\n output = np.concatenate(values, i)\n expect(node, inputs=[v for v in values], outputs=[output],\n name='test_concat_' + test_case + '_axis_' + str(i))\n\n for i in range(-len(values[0].shape), 0):\n in_args = ['value' + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n 'Concat',\n inputs=[s for s in in_args],\n outputs=['output'],\n axis=i\n )\n output = np.concatenate(values, i)\n expect(node, inputs=[v for v in values], outputs=[output],\n name='test_concat_' + test_case + '_axis_negative_' + str(abs(i)))", + "summary": "concat" + } + ], + "inputs": [ + { + "description": "List of tensors for concatenation", + "name": "inputs", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Concatenated tensor", + "name": "concat_result", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Concat", + "schema": { + "attributes": [ + { + "description": "Which axis to concat on", + "name": "axis", + "required": true, + "type": "int64" + } + ], + "category": "Tensor", + "description": "Concatenate a list of tensors into a single tensor", + "domain": "ai.onnx", + "examples": [ + { + "code": "test_cases = {\n '1d': ([1, 2],\n [3, 4]),\n '2d': ([[1, 2], [3, 4]],\n [[5, 6], [7, 8]]),\n '3d': ([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\n [[[9, 10], [11, 12]], [[13, 14], [15, 16]]])\n} # type: Dict[Text, Sequence[Any]]\n\nfor test_case, values_ in test_cases.items():\n values = [np.asarray(v, dtype=np.float32) for v in values_]\n for i in range(len(values[0].shape)):\n in_args = ['value' + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n 'Concat',\n inputs=[s for s in in_args],\n outputs=['output'],\n axis=i\n )\n output = np.concatenate(values, i)\n expect(node, inputs=[v for v in values], outputs=[output],\n name='test_concat_' + test_case + '_axis_' + str(i))\n\n for i in range(-len(values[0].shape), 0):\n in_args = ['value' + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n 'Concat',\n inputs=[s for s in in_args],\n outputs=['output'],\n axis=i\n )\n output = np.concatenate(values, i)\n expect(node, inputs=[v for v in values], outputs=[output],\n name='test_concat_' + test_case + '_axis_negative_' + str(abs(i)))", + "summary": "concat" + } + ], + "inputs": [ + { + "description": "List of tensors for concatenation", + "name": "inputs", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Concatenated tensor", + "name": "concat_result", + "type": "T" + } + ], + "since_version": 4, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain output types to any tensor type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Concat", + "schema": { + "attributes": [ + { + "description": "Which axis to concat on. A negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(inputs)..", + "name": "axis", + "required": true, + "type": "int64" + } + ], + "category": "Tensor", + "description": "Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on.", + "domain": "ai.onnx", + "examples": [ + { + "code": "test_cases = {\n '1d': ([1, 2],\n [3, 4]),\n '2d': ([[1, 2], [3, 4]],\n [[5, 6], [7, 8]]),\n '3d': ([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],\n [[[9, 10], [11, 12]], [[13, 14], [15, 16]]])\n} # type: Dict[Text, Sequence[Any]]\n\nfor test_case, values_ in test_cases.items():\n values = [np.asarray(v, dtype=np.float32) for v in values_]\n for i in range(len(values[0].shape)):\n in_args = ['value' + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n 'Concat',\n inputs=[s for s in in_args],\n outputs=['output'],\n axis=i\n )\n output = np.concatenate(values, i)\n expect(node, inputs=[v for v in values], outputs=[output],\n name='test_concat_' + test_case + '_axis_' + str(i))\n\n for i in range(-len(values[0].shape), 0):\n in_args = ['value' + str(k) for k in range(len(values))]\n node = onnx.helper.make_node(\n 'Concat',\n inputs=[s for s in in_args],\n outputs=['output'],\n axis=i\n )\n output = np.concatenate(values, i)\n expect(node, inputs=[v for v in values], outputs=[output],\n name='test_concat_' + test_case + '_axis_negative_' + str(abs(i)))", + "summary": "concat" + } + ], + "inputs": [ + { + "description": "List of tensors for concatenation", + "name": "inputs", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Concatenated tensor", + "name": "concat_result", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain output types to any tensor type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ConcatFromSequence", + "schema": { + "attributes": [ + { + "description": "Which axis to concat on. Accepted range in `[-r, r - 1]`, where `r` is the rank of input tensors. When `new_axis` is 1, accepted range is `[-r - 1, r]`. ", + "name": "axis", + "required": true, + "type": "int64" + }, + { + "description": "Insert and concatenate on a new axis or not, default 0 means do not insert new axis.", + "name": "new_axis", + "required": false, + "type": "int64" + } + ], + "description": "Concatenate a sequence of tensors into a single tensor.\nAll input tensors must have the same shape, except for the dimension size of the axis to concatenate on.\nBy default 'new_axis' is 0, the behavior is similar to numpy.concatenate.\nWhen 'new_axis' is 1, the behavior is similar to numpy.stack.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Sequence of tensors for concatenation", + "name": "input_sequence", + "type": "S" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Concatenated tensor", + "name": "concat_result", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ], + "description": "Constrain input types to any tensor type.", + "type_param_str": "S" + }, + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain output types to any tensor type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Constant", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "value", + "required": true, + "type": "tensor" + } + ], + "category": "Constant", + "description": "A constant tensor.", + "domain": "ai.onnx", + "examples": [ + { + "code": "values = np.random.randn(5, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['values'],\n value=onnx.helper.make_tensor(\n name='const_tensor',\n data_type=onnx.TensorProto.FLOAT,\n dims=values.shape,\n vals=values.flatten().astype(float),\n ),\n)\n\nexpect(node, inputs=[], outputs=[values],\n name='test_constant')", + "summary": "constant" + } + ], + "max_input": 0, + "max_output": 1, + "min_input": 0, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor containing the same value of the provided tensor.", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Constant", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor.", + "name": "value", + "required": true, + "type": "tensor" + } + ], + "category": "Constant", + "description": "A constant tensor.", + "domain": "ai.onnx", + "examples": [ + { + "code": "values = np.random.randn(5, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['values'],\n value=onnx.helper.make_tensor(\n name='const_tensor',\n data_type=onnx.TensorProto.FLOAT,\n dims=values.shape,\n vals=values.flatten().astype(float),\n ),\n)\n\nexpect(node, inputs=[], outputs=[values],\n name='test_constant')", + "summary": "constant" + } + ], + "max_input": 0, + "max_output": 1, + "min_input": 0, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor containing the same value of the provided tensor.", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Constant", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor in sparse format.", + "name": "sparse_value", + "required": false + }, + { + "description": "The value for the elements of the output tensor.", + "name": "value", + "required": false, + "type": "tensor" + } + ], + "category": "Constant", + "description": "A constant tensor. Exactly one of the two attributes, either value or sparse_value,\nmust be specified.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "values = np.random.randn(5, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['values'],\n value=onnx.helper.make_tensor(\n name='const_tensor',\n data_type=onnx.TensorProto.FLOAT,\n dims=values.shape,\n vals=values.flatten().astype(float),\n ),\n)\n\nexpect(node, inputs=[], outputs=[values],\n name='test_constant')", + "summary": "constant" + } + ], + "max_input": 0, + "max_output": 1, + "min_input": 0, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor containing the same value of the provided tensor.", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Constant", + "schema": { + "attributes": [ + { + "description": "The value for the elements of the output tensor in sparse format.", + "name": "sparse_value", + "required": false + }, + { + "description": "The value for the elements of the output tensor.", + "name": "value", + "required": false, + "type": "tensor" + }, + { + "description": "The value for the sole element for the scalar, float32, output tensor.", + "name": "value_float", + "required": false, + "type": "float32" + }, + { + "description": "The values for the elements for the 1D, float32, output tensor.", + "name": "value_floats", + "required": false, + "type": "float32[]" + }, + { + "description": "The value for the sole element for the scalar, int64, output tensor.", + "name": "value_int", + "required": false, + "type": "int64" + }, + { + "description": "The values for the elements for the 1D, int64, output tensor.", + "name": "value_ints", + "required": false, + "type": "int64[]" + }, + { + "description": "The value for the sole element for the scalar, UTF-8 string, output tensor.", + "name": "value_string", + "required": false, + "type": "string" + }, + { + "description": "The values for the elements for the 1D, UTF-8 string, output tensor.", + "name": "value_strings", + "required": false, + "type": "string[]" + } + ], + "category": "Constant", + "description": "This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value,\nor value_* must be specified.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "values = np.random.randn(5, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['values'],\n value=onnx.helper.make_tensor(\n name='const_tensor',\n data_type=onnx.TensorProto.FLOAT,\n dims=values.shape,\n vals=values.flatten().astype(float),\n ),\n)\n\nexpect(node, inputs=[], outputs=[values],\n name='test_constant')", + "summary": "constant" + } + ], + "max_input": 0, + "max_output": 1, + "min_input": 0, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor containing the same value of the provided tensor.", + "name": "output", + "type": "T" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ConstantOfShape", + "schema": { + "attributes": [ + { + "description": "(Optional) The value of the output elements.Should be a one-element tensor. If not specified, it defaults to a tensor of value 0 and datatype float32", + "name": "value", + "required": false, + "type": "tensor" + } + ], + "description": "Generate a tensor with given value and shape.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "x = np.array([4, 3, 2]).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\"value\", onnx.TensorProto.FLOAT,\n [1], [1])\nnode = onnx.helper.make_node(\n 'ConstantOfShape',\n inputs=['x'],\n outputs=['y'],\n value=tensor_value,\n)\n\ny = np.ones(x, dtype=np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_constantofshape_float_ones')", + "summary": "float_ones" + }, + { + "code": "x = np.array([0, ]).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\"value\", onnx.TensorProto.INT32,\n [1], [0])\nnode = onnx.helper.make_node(\n 'ConstantOfShape',\n inputs=['x'],\n outputs=['y'],\n value=tensor_value,\n)\ny = np.zeros(x, dtype=np.int32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_constantofshape_int_shape_zero')", + "summary": "int32_shape_zero" + }, + { + "code": "x = np.array([10, 6]).astype(np.int64)\ntensor_value = onnx.helper.make_tensor(\"value\", onnx.TensorProto.INT32,\n [1], [0])\nnode = onnx.helper.make_node(\n 'ConstantOfShape',\n inputs=['x'],\n outputs=['y'],\n value=tensor_value,\n)\ny = np.zeros(x, dtype=np.int32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_constantofshape_int_zeros')", + "summary": "int32_zeros" + } + ], + "inputs": [ + { + "description": "1D tensor. The shape of the expected output tensor. If empty tensor is given, the output would be a scalar. All values must be >= 0.", + "name": "input", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of shape specified by 'input'.If attribute 'value' is specified, the value and datatype of the output tensor is taken from 'value'.If attribute 'value' is not specified, the value in the output defaults to 0, and the datatype defaults to float32.", + "name": "output", + "type": "T2" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain input types.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ], + "description": "Constrain output types to be numerics.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "Conv", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "dilation value along each spatial axis of the filter.", + "name": "dilations", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "number of groups input channels and output channels are divided into.", + "name": "group", + "required": false, + "type": "int64" + }, + { + "description": "The shape of the convolution kernel. If not present, should be inferred from input W.", + "name": "kernel_shape", + "required": false, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Layer", + "description": "The convolution operator consumes an input tensor and a filter, and\ncomputes the output.", + "domain": "ai.onnx", + "examples": [ + { + "code": "\nx = np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 5, 5) input tensor\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.],\n [20., 21., 22., 23., 24.]]]]).astype(np.float32)\nW = np.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights\n [1., 1., 1.],\n [1., 1., 1.]]]]).astype(np.float32)\n\n# Convolution with padding\nnode_with_padding = onnx.helper.make_node(\n 'Conv',\n inputs=['x', 'W'],\n outputs=['y'],\n kernel_shape=[3, 3],\n # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1\n pads=[1, 1, 1, 1],\n)\ny_with_padding = np.array([[[[12., 21., 27., 33., 24.], # (1, 1, 5, 5) output tensor\n [33., 54., 63., 72., 51.],\n [63., 99., 108., 117., 81.],\n [93., 144., 153., 162., 111.],\n [72., 111., 117., 123., 84.]]]]).astype(np.float32)\nexpect(node_with_padding, inputs=[x, W], outputs=[y_with_padding],\n name='test_basic_conv_with_padding')\n\n# Convolution without padding\nnode_without_padding = onnx.helper.make_node(\n 'Conv',\n inputs=['x', 'W'],\n outputs=['y'],\n kernel_shape=[3, 3],\n # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1\n pads=[0, 0, 0, 0],\n)\ny_without_padding = np.array([[[[54., 63., 72.], # (1, 1, 3, 3) output tensor\n [99., 108., 117.],\n [144., 153., 162.]]]]).astype(np.float32)\nexpect(node_without_padding, inputs=[x, W], outputs=[y_without_padding],\n name='test_basic_conv_without_padding')", + "summary": "conv" + }, + { + "code": "\nx = np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.],\n [20., 21., 22., 23., 24.],\n [25., 26., 27., 28., 29.],\n [30., 31., 32., 33., 34.]]]]).astype(np.float32)\nW = np.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights\n [1., 1., 1.],\n [1., 1., 1.]]]]).astype(np.float32)\n\n# Convolution with strides=2 and padding\nnode_with_padding = onnx.helper.make_node(\n 'Conv',\n inputs=['x', 'W'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[1, 1, 1, 1],\n strides=[2, 2], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_with_padding = np.array([[[[12., 27., 24.], # (1, 1, 4, 3) output tensor\n [63., 108., 81.],\n [123., 198., 141.],\n [112., 177., 124.]]]]).astype(np.float32)\nexpect(node_with_padding, inputs=[x, W], outputs=[y_with_padding],\n name='test_conv_with_strides_padding')\n\n# Convolution with strides=2 and no padding\nnode_without_padding = onnx.helper.make_node(\n 'Conv',\n inputs=['x', 'W'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[0, 0, 0, 0],\n strides=[2, 2], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_without_padding = np.array([[[[54., 72.], # (1, 1, 3, 2) output tensor\n [144., 162.],\n [234., 252.]]]]).astype(np.float32)\nexpect(node_without_padding, inputs=[x, W], outputs=[y_without_padding],\n name='test_conv_with_strides_no_padding')\n\n# Convolution with strides=2 and padding only along one dimension (the H dimension in NxCxHxW tensor)\nnode_with_asymmetric_padding = onnx.helper.make_node(\n 'Conv',\n inputs=['x', 'W'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[1, 0, 1, 0],\n strides=[2, 2], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_with_asymmetric_padding = np.array([[[[21., 33.], # (1, 1, 4, 2) output tensor\n [99., 117.],\n [189., 207.],\n [171., 183.]]]]).astype(np.float32)\nexpect(node_with_asymmetric_padding, inputs=[x, W], outputs=[y_with_asymmetric_padding],\n name='test_conv_with_strides_and_asymmetric_padding')", + "summary": "conv_with_strides" + } + ], + "inputs": [ + { + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + }, + { + "description": "The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL ...]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL. ", + "name": "W", + "type": "T" + }, + { + "description": "Optional 1D bias to be added to the convolution, has size of M.", + "name": "B", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Conv", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "dilation value along each spatial axis of the filter. If not present, the dilation defaults is 1 along each spatial axis.", + "name": "dilations", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "number of groups input channels and output channels are divided into.", + "name": "group", + "required": false, + "type": "int64" + }, + { + "description": "The shape of the convolution kernel. If not present, should be inferred from input W.", + "name": "kernel_shape", + "required": false, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis. If not present, the stride defaults is 1 along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Layer", + "description": "The convolution operator consumes an input tensor and a filter, and\ncomputes the output.", + "domain": "ai.onnx", + "examples": [ + { + "code": "\nx = np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 5, 5) input tensor\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.],\n [20., 21., 22., 23., 24.]]]]).astype(np.float32)\nW = np.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights\n [1., 1., 1.],\n [1., 1., 1.]]]]).astype(np.float32)\n\n# Convolution with padding\nnode_with_padding = onnx.helper.make_node(\n 'Conv',\n inputs=['x', 'W'],\n outputs=['y'],\n kernel_shape=[3, 3],\n # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1\n pads=[1, 1, 1, 1],\n)\ny_with_padding = np.array([[[[12., 21., 27., 33., 24.], # (1, 1, 5, 5) output tensor\n [33., 54., 63., 72., 51.],\n [63., 99., 108., 117., 81.],\n [93., 144., 153., 162., 111.],\n [72., 111., 117., 123., 84.]]]]).astype(np.float32)\nexpect(node_with_padding, inputs=[x, W], outputs=[y_with_padding],\n name='test_basic_conv_with_padding')\n\n# Convolution without padding\nnode_without_padding = onnx.helper.make_node(\n 'Conv',\n inputs=['x', 'W'],\n outputs=['y'],\n kernel_shape=[3, 3],\n # Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1\n pads=[0, 0, 0, 0],\n)\ny_without_padding = np.array([[[[54., 63., 72.], # (1, 1, 3, 3) output tensor\n [99., 108., 117.],\n [144., 153., 162.]]]]).astype(np.float32)\nexpect(node_without_padding, inputs=[x, W], outputs=[y_without_padding],\n name='test_basic_conv_without_padding')", + "summary": "conv" + }, + { + "code": "\nx = np.array([[[[0., 1., 2., 3., 4.], # (1, 1, 7, 5) input tensor\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.],\n [20., 21., 22., 23., 24.],\n [25., 26., 27., 28., 29.],\n [30., 31., 32., 33., 34.]]]]).astype(np.float32)\nW = np.array([[[[1., 1., 1.], # (1, 1, 3, 3) tensor for convolution weights\n [1., 1., 1.],\n [1., 1., 1.]]]]).astype(np.float32)\n\n# Convolution with strides=2 and padding\nnode_with_padding = onnx.helper.make_node(\n 'Conv',\n inputs=['x', 'W'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[1, 1, 1, 1],\n strides=[2, 2], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_with_padding = np.array([[[[12., 27., 24.], # (1, 1, 4, 3) output tensor\n [63., 108., 81.],\n [123., 198., 141.],\n [112., 177., 124.]]]]).astype(np.float32)\nexpect(node_with_padding, inputs=[x, W], outputs=[y_with_padding],\n name='test_conv_with_strides_padding')\n\n# Convolution with strides=2 and no padding\nnode_without_padding = onnx.helper.make_node(\n 'Conv',\n inputs=['x', 'W'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[0, 0, 0, 0],\n strides=[2, 2], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_without_padding = np.array([[[[54., 72.], # (1, 1, 3, 2) output tensor\n [144., 162.],\n [234., 252.]]]]).astype(np.float32)\nexpect(node_without_padding, inputs=[x, W], outputs=[y_without_padding],\n name='test_conv_with_strides_no_padding')\n\n# Convolution with strides=2 and padding only along one dimension (the H dimension in NxCxHxW tensor)\nnode_with_asymmetric_padding = onnx.helper.make_node(\n 'Conv',\n inputs=['x', 'W'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[1, 0, 1, 0],\n strides=[2, 2], # Default values for other attributes: dilations=[1, 1], groups=1\n)\ny_with_asymmetric_padding = np.array([[[[21., 33.], # (1, 1, 4, 2) output tensor\n [99., 117.],\n [189., 207.],\n [171., 183.]]]]).astype(np.float32)\nexpect(node_with_asymmetric_padding, inputs=[x, W], outputs=[y_with_asymmetric_padding],\n name='test_conv_with_strides_and_asymmetric_padding')", + "summary": "conv_with_strides" + } + ], + "inputs": [ + { + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + }, + { + "description": "The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL ...]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL. ", + "name": "W", + "type": "T" + }, + { + "description": "Optional 1D bias to be added to the convolution, has size of M.", + "name": "B", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "Y", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ConvInteger", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each axis.", + "name": "dilations", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "number of groups input channels and output channels are divided into. default is 1.", + "name": "group", + "required": false, + "type": "int64" + }, + { + "description": "The shape of the convolution kernel. If not present, should be inferred from input 'w'.", + "name": "kernel_shape", + "required": false, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0.The value represent the number of pixels added to the beginning and end part of the corresponding axis.`pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number ofpixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`.This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaultsto 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "description": "The integer convolution operator consumes an input tensor, its zero-point, a filter, and its zero-point,\nand computes the output. The production MUST never overflow. The accumulation may overflow if and only if in 32 bits.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "\nx = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.uint8).reshape((1, 1, 3, 3))\nx_zero_point = np.uint8(1)\nw = np.array([1, 1, 1, 1]).astype(np.uint8).reshape((1, 1, 2, 2))\n\ny = np.array([12, 16, 24, 28]).astype(np.int32).reshape(1, 1, 2, 2)\n\n# ConvInteger without padding\nconvinteger_node = onnx.helper.make_node('ConvInteger',\n inputs=['x', 'w', 'x_zero_point'],\n outputs=['y'])\n\nexpect(convinteger_node, inputs=[x, w, x_zero_point], outputs=[y],\n name='test_basic_convinteger')\n\n# ConvInteger with padding\ny_with_padding = np.array([1, 3, 5, 3, 5, 12, 16, 9, 11, 24, 28, 15, 7, 15, 17, 9]).astype(np.int32).reshape((1, 1, 4, 4))\n\nconvinteger_node_with_padding = onnx.helper.make_node('ConvInteger',\n inputs=['x', 'w', 'x_zero_point'],\n outputs=['y'],\n pads=[1, 1, 1, 1],)\n\nexpect(convinteger_node_with_padding, inputs=[x, w, x_zero_point], outputs=[y_with_padding],\n name='test_convinteger_with_padding')", + "summary": "convinteger" + } + ], + "inputs": [ + { + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "x", + "type": "T1" + }, + { + "description": "The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL ...]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL. ", + "name": "w", + "type": "T2" + }, + { + "description": "Zero point tensor for input 'x'. It's optional and default value is 0. It's a scalar, which means a per-tensor/layer quantization.", + "name": "x_zero_point", + "option": "optional", + "type": "T1" + }, + { + "description": "Zero point tensor for input 'w'. It's optional and default value is 0. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it's a 1-D tensor, its number of elements should be equal to the number of output channels (M)", + "name": "w_zero_point", + "option": "optional", + "type": "T2" + } + ], + "inputs_range": "2 - 4", + "max_input": 4, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "y", + "type": "T3" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain input x and its zero point data type to 8-bit integer tensor.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain input w and its zero point data type to 8-bit integer tensor.", + "type_param_str": "T2" + }, + { + "allowed_type_strs": [ + "tensor(int32)" + ], + "description": "Constrain output y data type to 32-bit integer tensor.", + "type_param_str": "T3" + } + ] + } + }, + { + "name": "ConvTranspose", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "dilation value along each spatial axis of the filter.", + "name": "dilations", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "number of groups input channels and output channels are divided into.", + "name": "group", + "required": false, + "type": "int64" + }, + { + "description": "The shape of the convolution kernel. If not present, should be inferred from input W.", + "name": "kernel_shape", + "required": false, + "type": "int64[]" + }, + { + "description": "The zero-padding added to one side of the output. This is also called adjs/adjustment in some frameworks.", + "name": "output_padding", + "required": false, + "type": "int64[]" + }, + { + "description": "The shape of the output can be explicitly set which will cause pads values to be auto generated. If output_shape is specified pads values are ignored. See doc for details for equations to generate pads", + "name": "output_shape", + "required": false, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Layer", + "description": "The convolution transpose operator consumes an input tensor and a filter,\nand computes the output.\n\nIf the pads parameter is provided the shape of the output is calculated via the following equation:\n\n output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i]\n\noutput_shape can also be explicitly specified in which case pads values are auto generated using these equations:\n\n total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i]\n If (auto_pads != SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2)\n Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2).\n\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "x = np.array([[[[0., 1., 2.], # (1, 1, 3, 3)\n [3., 4., 5.],\n [6., 7., 8.]]]]).astype(np.float32)\n\nW = np.array([[[[1., 1., 1.], # (1, 2, 3, 3)\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array([[[[0., 1., 3., 3., 2.], # (1, 2, 5, 5)\n [3., 8., 15., 12., 7.],\n [9., 21., 36., 27., 15.],\n [9., 20., 33., 24., 13.],\n [6., 13., 21., 15., 8.]],\n\n [[0., 1., 3., 3., 2.],\n [3., 8., 15., 12., 7.],\n [9., 21., 36., 27., 15.],\n [9., 20., 33., 24., 13.],\n [6., 13., 21., 15., 8.]]]]).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose')", + "summary": "convtranspose" + }, + { + "code": "x = np.array([[[0., 1., 2.]]]).astype(np.float32) # (1, 1, 3)\n\nW = np.array([[[1., 1., 1.], # (1, 2, 3)\n [1., 1., 1.]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array([[[0., 1., 3., 3., 2.], # (1, 2, 5)\n [0., 1., 3., 3., 2.]]]).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_1d')", + "summary": "convtranspose_1d" + }, + { + "code": "x = np.array([[[[[0., 1., 2., 3., 4.], # (1, 1, 3, 4, 5)\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.]],\n [[20., 21., 22., 23., 24.],\n [25., 26., 27., 28., 29.],\n [30., 31., 32., 33., 34.],\n [35., 36., 37., 38., 39.]],\n [[40., 41., 42., 43., 44.],\n [45., 46., 47., 48., 49.],\n [50., 51., 52., 53., 54.],\n [55., 56., 57., 58., 59.]]]]]).astype(np.float32)\n\nW = np.array([[[[[1., 1., 1.], # (1, 2, 3, 3, 3)\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]]],\n [[[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]]]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array([[[[[0., 1., 3., 6., 9., 7., 4.], # (1, 2, 5, 6, 7)\n [5., 12., 21., 27., 33., 24., 13.],\n [15., 33., 54., 63., 72., 51., 27.],\n [30., 63., 99., 108., 117., 81., 42.],\n [25., 52., 81., 87., 93., 64., 33.],\n [15., 31., 48., 51., 54., 37., 19.]],\n\n [[20., 42., 66., 72., 78., 54., 28.],\n [50., 104., 162., 174., 186., 128., 66.],\n [90., 186., 288., 306., 324., 222., 114.],\n [120., 246., 378., 396., 414., 282., 144.],\n [90., 184., 282., 294., 306., 208., 106.],\n [50., 102., 156., 162., 168., 114., 58.]],\n\n [[60., 123., 189., 198., 207., 141., 72.],\n [135., 276., 423., 441., 459., 312., 159.],\n [225., 459., 702., 729., 756., 513., 261.],\n [270., 549., 837., 864., 891., 603., 306.],\n [195., 396., 603., 621., 639., 432., 219.],\n [105., 213., 324., 333., 342., 231., 117.]],\n\n [[60., 122., 186., 192., 198., 134., 68.],\n [130., 264., 402., 414., 426., 288., 146.],\n [210., 426., 648., 666., 684., 462., 234.],\n [240., 486., 738., 756., 774., 522., 264.],\n [170., 344., 522., 534., 546., 368., 186.],\n [90., 182., 276., 282., 288., 194., 98.]],\n\n [[40., 81., 123., 126., 129., 87., 44.],\n [85., 172., 261., 267., 273., 184., 93.],\n [135., 273., 414., 423., 432., 291., 147.],\n [150., 303., 459., 468., 477., 321., 162.],\n [105., 212., 321., 327., 333., 224., 113.],\n [55., 111., 168., 171., 174., 117., 59.]]],\n\n [[[0., 1., 3., 6., 9., 7., 4.],\n [5., 12., 21., 27., 33., 24., 13.],\n [15., 33., 54., 63., 72., 51., 27.],\n [30., 63., 99., 108., 117., 81., 42.],\n [25., 52., 81., 87., 93., 64., 33.],\n [15., 31., 48., 51., 54., 37., 19.]],\n\n [[20., 42., 66., 72., 78., 54., 28.],\n [50., 104., 162., 174., 186., 128., 66.],\n [90., 186., 288., 306., 324., 222., 114.],\n [120., 246., 378., 396., 414., 282., 144.],\n [90., 184., 282., 294., 306., 208., 106.],\n [50., 102., 156., 162., 168., 114., 58.]],\n\n [[60., 123., 189., 198., 207., 141., 72.],\n [135., 276., 423., 441., 459., 312., 159.],\n [225., 459., 702., 729., 756., 513., 261.],\n [270., 549., 837., 864., 891., 603., 306.],\n [195., 396., 603., 621., 639., 432., 219.],\n [105., 213., 324., 333., 342., 231., 117.]],\n\n [[60., 122., 186., 192., 198., 134., 68.],\n [130., 264., 402., 414., 426., 288., 146.],\n [210., 426., 648., 666., 684., 462., 234.],\n [240., 486., 738., 756., 774., 522., 264.],\n [170., 344., 522., 534., 546., 368., 186.],\n [90., 182., 276., 282., 288., 194., 98.]],\n\n [[40., 81., 123., 126., 129., 87., 44.],\n [85., 172., 261., 267., 273., 184., 93.],\n [135., 273., 414., 423., 432., 291., 147.],\n [150., 303., 459., 468., 477., 321., 162.],\n [105., 212., 321., 327., 333., 224., 113.],\n [55., 111., 168., 171., 174., 117., 59.]]]]]).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_3d')", + "summary": "convtranspose_3d" + }, + { + "code": "x = np.array([[[[0., 1., 2.], # (1, 1, 3, 3)\n [3., 4., 5.],\n [6., 7., 8.]]]]).astype(np.float32)\n\nW = np.array([[[[1., 1., 1.], # (1, 2, 3, 3)\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]]]]).astype(np.float32)\n\ny = np.array([[[[0., 0., 1., 1., 3., 2., 2., 0.], # (1, 2, 10, 8)\n [0., 0., 1., 1., 3., 2., 2., 0.],\n [0., 0., 1., 1., 3., 2., 2., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0.]],\n\n [[0., 0., 1., 1., 3., 2., 2., 0.],\n [0., 0., 1., 1., 3., 2., 2., 0.],\n [0., 0., 1., 1., 3., 2., 2., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0.]]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"],\n strides=[3, 2],\n output_shape=[10, 8])\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_output_shape')\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"],\n strides=[3, 2],\n output_padding=[1, 1])\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_pad')\n\nnode = onnx.helper.make_node(\n 'ConvTranspose', ['X', 'W'], ['Y'],\n name='test',\n strides=[3, 2],\n output_shape=[10, 8],\n kernel_shape=[3, 3],\n output_padding=[1, 1]\n)\nexpect(node, inputs=[x, W], outputs=[y],\n name='test_convtranspose_kernel_shape')", + "summary": "convtranspose_attributes" + }, + { + "code": "x = np.array([[[[3., 8., 1.], # (1, 1, 3, 3)\n [9., 5., 7.],\n [3., 2., 6.]]]]).astype(np.float32)\nW = np.array([[[[7., 2.], # (1, 1, 2, 2)\n [1., 9.]]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], dilations=[2, 2])\n\ny = np.array([[[[21., 56., 13., 16., 2.], # [1, 1, 5, 5]\n [63., 35., 67., 10., 14.],\n [24., 22., 76., 76., 21.],\n [9., 5., 88., 45., 63.],\n [3., 2., 33., 18., 54.]]]]).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_dilations')", + "summary": "convtranspose_dilations" + }, + { + "code": "x = np.array([[[[0., 1., 2.], # (1, 1, 3, 3)\n [3., 4., 5.],\n [6., 7., 8.]]]]).astype(np.float32)\n\nW = np.array([[[[1., 1., 1.], # (1, 2, 3, 3)\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"],\n strides=[3, 2],\n pads=[1, 2, 1, 2])\n\ny = np.array([[[[1., 1., 3.], # (1, 2, 7, 3)\n [1., 1., 3.],\n [7., 4., 9.],\n [7., 4., 9.],\n [7., 4., 9.],\n [13., 7., 15.],\n [13., 7., 15.]],\n\n [[1., 1., 3.],\n [1., 1., 3.],\n [7., 4., 9.],\n [7., 4., 9.],\n [7., 4., 9.],\n [13., 7., 15.],\n [13., 7., 15.]]]]).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_pads')", + "summary": "convtranspose_pads" + } + ], + "inputs": [ + { + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn)", + "name": "X", + "type": "T" + }, + { + "description": "The weight tensor that will be used in the convolutions; has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x ... x kn), where (k1 x k2 x ... x kn) is the dimension of the kernel. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)", + "name": "W", + "type": "T" + }, + { + "description": "Optional 1D bias to be added to the convolution, has size of M.", + "name": "B", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, pad lengths and group count. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ConvTranspose", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis.", + "name": "dilations", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "number of groups input channels and output channels are divided into.", + "name": "group", + "required": false, + "type": "int64" + }, + { + "description": "The shape of the convolution kernel. If not present, should be inferred from input W.", + "name": "kernel_shape", + "required": false, + "type": "int64[]" + }, + { + "description": "Additional elements added to the side with higher coordinate indices in the output. Each padding value in \"output_padding\" must be less than the corresponding stride/dilation dimension. By default, this attribute is a zero vector. Note that this attribute doesn't directly affect the computed output values. It only controls the selection of the computed values, so changing this attribute only adds or removes output elements. If \"output_shape\" is explicitly provided, \"output_padding\" does not contribute additional size to \"output_shape\" but participates in the computation of the needed padding amount. This is also called adjs or adjustment in some frameworks.", + "name": "output_padding", + "required": false, + "type": "int64[]" + }, + { + "description": "The shape of the output can be explicitly set which will cause pads values to be auto generated. If output_shape is specified pads values are ignored. See doc for details for equations to generate pads", + "name": "output_shape", + "required": false, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Layer", + "description": "The convolution transpose operator consumes an input tensor and a filter,\nand computes the output.\n\nIf the pads parameter is provided the shape of the output is calculated via the following equation:\n\n output_shape[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - pads[start_i] - pads[end_i]\n\noutput_shape can also be explicitly specified in which case pads values are auto generated using these equations:\n\n total_padding[i] = stride[i] * (input_size[i] - 1) + output_padding[i] + ((kernel_shape[i] - 1) * dilations[i] + 1) - output_shape[i]\n If (auto_pads != SAME_UPPER): pads[start_i] = total_padding[i]/2; pads[end_i] = total_padding[i] - (total_padding[i]/2)\n Else: pads[start_i] = total_padding[i] - (total_padding[i]/2); pads[end_i] = (total_padding[i]/2).\n\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "x = np.array([[[[0., 1., 2.], # (1, 1, 3, 3)\n [3., 4., 5.],\n [6., 7., 8.]]]]).astype(np.float32)\n\nW = np.array([[[[1., 1., 1.], # (1, 2, 3, 3)\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array([[[[0., 1., 3., 3., 2.], # (1, 2, 5, 5)\n [3., 8., 15., 12., 7.],\n [9., 21., 36., 27., 15.],\n [9., 20., 33., 24., 13.],\n [6., 13., 21., 15., 8.]],\n\n [[0., 1., 3., 3., 2.],\n [3., 8., 15., 12., 7.],\n [9., 21., 36., 27., 15.],\n [9., 20., 33., 24., 13.],\n [6., 13., 21., 15., 8.]]]]).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose')", + "summary": "convtranspose" + }, + { + "code": "x = np.array([[[0., 1., 2.]]]).astype(np.float32) # (1, 1, 3)\n\nW = np.array([[[1., 1., 1.], # (1, 2, 3)\n [1., 1., 1.]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array([[[0., 1., 3., 3., 2.], # (1, 2, 5)\n [0., 1., 3., 3., 2.]]]).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_1d')", + "summary": "convtranspose_1d" + }, + { + "code": "x = np.array([[[[[0., 1., 2., 3., 4.], # (1, 1, 3, 4, 5)\n [5., 6., 7., 8., 9.],\n [10., 11., 12., 13., 14.],\n [15., 16., 17., 18., 19.]],\n [[20., 21., 22., 23., 24.],\n [25., 26., 27., 28., 29.],\n [30., 31., 32., 33., 34.],\n [35., 36., 37., 38., 39.]],\n [[40., 41., 42., 43., 44.],\n [45., 46., 47., 48., 49.],\n [50., 51., 52., 53., 54.],\n [55., 56., 57., 58., 59.]]]]]).astype(np.float32)\n\nW = np.array([[[[[1., 1., 1.], # (1, 2, 3, 3, 3)\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]]],\n [[[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]]]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"])\n\ny = np.array([[[[[0., 1., 3., 6., 9., 7., 4.], # (1, 2, 5, 6, 7)\n [5., 12., 21., 27., 33., 24., 13.],\n [15., 33., 54., 63., 72., 51., 27.],\n [30., 63., 99., 108., 117., 81., 42.],\n [25., 52., 81., 87., 93., 64., 33.],\n [15., 31., 48., 51., 54., 37., 19.]],\n\n [[20., 42., 66., 72., 78., 54., 28.],\n [50., 104., 162., 174., 186., 128., 66.],\n [90., 186., 288., 306., 324., 222., 114.],\n [120., 246., 378., 396., 414., 282., 144.],\n [90., 184., 282., 294., 306., 208., 106.],\n [50., 102., 156., 162., 168., 114., 58.]],\n\n [[60., 123., 189., 198., 207., 141., 72.],\n [135., 276., 423., 441., 459., 312., 159.],\n [225., 459., 702., 729., 756., 513., 261.],\n [270., 549., 837., 864., 891., 603., 306.],\n [195., 396., 603., 621., 639., 432., 219.],\n [105., 213., 324., 333., 342., 231., 117.]],\n\n [[60., 122., 186., 192., 198., 134., 68.],\n [130., 264., 402., 414., 426., 288., 146.],\n [210., 426., 648., 666., 684., 462., 234.],\n [240., 486., 738., 756., 774., 522., 264.],\n [170., 344., 522., 534., 546., 368., 186.],\n [90., 182., 276., 282., 288., 194., 98.]],\n\n [[40., 81., 123., 126., 129., 87., 44.],\n [85., 172., 261., 267., 273., 184., 93.],\n [135., 273., 414., 423., 432., 291., 147.],\n [150., 303., 459., 468., 477., 321., 162.],\n [105., 212., 321., 327., 333., 224., 113.],\n [55., 111., 168., 171., 174., 117., 59.]]],\n\n [[[0., 1., 3., 6., 9., 7., 4.],\n [5., 12., 21., 27., 33., 24., 13.],\n [15., 33., 54., 63., 72., 51., 27.],\n [30., 63., 99., 108., 117., 81., 42.],\n [25., 52., 81., 87., 93., 64., 33.],\n [15., 31., 48., 51., 54., 37., 19.]],\n\n [[20., 42., 66., 72., 78., 54., 28.],\n [50., 104., 162., 174., 186., 128., 66.],\n [90., 186., 288., 306., 324., 222., 114.],\n [120., 246., 378., 396., 414., 282., 144.],\n [90., 184., 282., 294., 306., 208., 106.],\n [50., 102., 156., 162., 168., 114., 58.]],\n\n [[60., 123., 189., 198., 207., 141., 72.],\n [135., 276., 423., 441., 459., 312., 159.],\n [225., 459., 702., 729., 756., 513., 261.],\n [270., 549., 837., 864., 891., 603., 306.],\n [195., 396., 603., 621., 639., 432., 219.],\n [105., 213., 324., 333., 342., 231., 117.]],\n\n [[60., 122., 186., 192., 198., 134., 68.],\n [130., 264., 402., 414., 426., 288., 146.],\n [210., 426., 648., 666., 684., 462., 234.],\n [240., 486., 738., 756., 774., 522., 264.],\n [170., 344., 522., 534., 546., 368., 186.],\n [90., 182., 276., 282., 288., 194., 98.]],\n\n [[40., 81., 123., 126., 129., 87., 44.],\n [85., 172., 261., 267., 273., 184., 93.],\n [135., 273., 414., 423., 432., 291., 147.],\n [150., 303., 459., 468., 477., 321., 162.],\n [105., 212., 321., 327., 333., 224., 113.],\n [55., 111., 168., 171., 174., 117., 59.]]]]]).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_3d')", + "summary": "convtranspose_3d" + }, + { + "code": "x = np.array([[[[0., 1., 2.], # (1, 1, 3, 3)\n [3., 4., 5.],\n [6., 7., 8.]]]]).astype(np.float32)\n\nW = np.array([[[[1., 1., 1.], # (1, 2, 3, 3)\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]]]]).astype(np.float32)\n\ny = np.array([[[[0., 0., 1., 1., 3., 2., 2., 0.], # (1, 2, 10, 8)\n [0., 0., 1., 1., 3., 2., 2., 0.],\n [0., 0., 1., 1., 3., 2., 2., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0.]],\n\n [[0., 0., 1., 1., 3., 2., 2., 0.],\n [0., 0., 1., 1., 3., 2., 2., 0.],\n [0., 0., 1., 1., 3., 2., 2., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [3., 3., 7., 4., 9., 5., 5., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [6., 6., 13., 7., 15., 8., 8., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0.]]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"],\n strides=[3, 2],\n output_shape=[10, 8])\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_output_shape')\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"],\n strides=[3, 2],\n output_padding=[1, 1])\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_pad')\n\nnode = onnx.helper.make_node(\n 'ConvTranspose', ['X', 'W'], ['Y'],\n name='test',\n strides=[3, 2],\n output_shape=[10, 8],\n kernel_shape=[3, 3],\n output_padding=[1, 1]\n)\nexpect(node, inputs=[x, W], outputs=[y],\n name='test_convtranspose_kernel_shape')", + "summary": "convtranspose_attributes" + }, + { + "code": "x = np.array([[[[3., 8., 1.], # (1, 1, 3, 3)\n [9., 5., 7.],\n [3., 2., 6.]]]]).astype(np.float32)\nW = np.array([[[[7., 2.], # (1, 1, 2, 2)\n [1., 9.]]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"], dilations=[2, 2])\n\ny = np.array([[[[21., 56., 13., 16., 2.], # [1, 1, 5, 5]\n [63., 35., 67., 10., 14.],\n [24., 22., 76., 76., 21.],\n [9., 5., 88., 45., 63.],\n [3., 2., 33., 18., 54.]]]]).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_dilations')", + "summary": "convtranspose_dilations" + }, + { + "code": "x = np.array([[[[0., 1., 2.], # (1, 1, 3, 3)\n [3., 4., 5.],\n [6., 7., 8.]]]]).astype(np.float32)\n\nW = np.array([[[[1., 1., 1.], # (1, 2, 3, 3)\n [1., 1., 1.],\n [1., 1., 1.]],\n [[1., 1., 1.],\n [1., 1., 1.],\n [1., 1., 1.]]]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\"ConvTranspose\", [\"X\", \"W\"], [\"Y\"],\n strides=[3, 2],\n pads=[1, 2, 1, 2])\n\ny = np.array([[[[1., 1., 3.], # (1, 2, 7, 3)\n [1., 1., 3.],\n [7., 4., 9.],\n [7., 4., 9.],\n [7., 4., 9.],\n [13., 7., 15.],\n [13., 7., 15.]],\n\n [[1., 1., 3.],\n [1., 1., 3.],\n [7., 4., 9.],\n [7., 4., 9.],\n [7., 4., 9.],\n [13., 7., 15.],\n [13., 7., 15.]]]]).astype(np.float32)\n\nexpect(node, inputs=[x, W], outputs=[y], name='test_convtranspose_pads')", + "summary": "convtranspose_pads" + } + ], + "inputs": [ + { + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn)", + "name": "X", + "type": "T" + }, + { + "description": "The weight tensor that will be used in the convolutions; has size (C x M/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the weight shape will be (C x M/group x k1 x k2 x ... x kn), where (k1 x k2 x ... x kn) is the dimension of the kernel. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)", + "name": "W", + "type": "T" + }, + { + "description": "Optional 1D bias to be added to the convolution, has size of M.", + "name": "B", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, pad lengths and group count. The number of channels in the output should be equal to W.shape[1] * group (assuming zero based indices of the shape array)", + "name": "Y", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Cos", + "schema": { + "description": "Calculates the cosine of the given input tensor, element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Cos',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.cos(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_cos_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.cos(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_cos')", + "summary": "cos" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The cosine of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Cosh", + "schema": { + "description": "Calculates the hyperbolic cosine of the given input tensor element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Cosh',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.cosh(x) # expected output [1.54308069, 1., 1.54308069]\nexpect(node, inputs=[x], outputs=[y],\n name='test_cosh_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.cosh(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_cosh')", + "summary": "cosh" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The hyperbolic cosine values of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "CumSum", + "schema": { + "attributes": [ + { + "description": "If set to 1 will return exclusive sum in which the top element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements.", + "name": "exclusive", + "required": false, + "type": "int64" + }, + { + "description": "If set to 1 will perform the sums in reverse direction.", + "name": "reverse", + "required": false, + "type": "int64" + } + ], + "description": "Performs cumulative sum of the input elements along the given axis.\nBy default, it will do the sum inclusively meaning the first element is copied as is.\nThrough an `exclusive` attribute, this behavior can change to exclude the first element.\nIt can also perform summation in the opposite direction of the axis. For that, set `reverse` attribute to 1.\n\nExample:\n```\ninput_x = [1, 2, 3]\naxis=0\noutput = [1, 3, 6]\nexclusive=1\noutput = [0, 1, 3]\nexclusive=0\nreverse=1\noutput = [6, 5, 3]\nexclusive=1\nreverse=1\noutput = [5, 3, 0]\n```\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y']\n)\nx = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\naxis = np.array([0]).astype(np.int32)\ny = np.array([1., 3., 6., 10., 15.]).astype(np.float64)\nexpect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d')", + "summary": "cumsum_1d" + }, + { + "code": "node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n exclusive=1\n)\nx = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\naxis = np.array([0]).astype(np.int32)\ny = np.array([0., 1., 3., 6., 10.]).astype(np.float64)\nexpect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d_exclusive')", + "summary": "cumsum_1d_exclusive" + }, + { + "code": "node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n reverse=1\n)\nx = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\naxis = np.array([0]).astype(np.int32)\ny = np.array([15., 14., 12., 9., 5.]).astype(np.float64)\nexpect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d_reverse')", + "summary": "cumsum_1d_reverse" + }, + { + "code": "node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n reverse=1\n)\nx = np.array([1., 2., 3., 4., 5.]).astype(np.float64)\naxis = np.array([0]).astype(np.int32)\ny = np.array([14., 12., 9., 5., 0.]).astype(np.float64)\nexpect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_1d_reverse_exclusive')", + "summary": "cumsum_1d_reverse_exclusive" + }, + { + "code": "node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n)\nx = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float64).reshape((2, 3))\naxis = np.array([0]).astype(np.int32)\ny = np.array([1., 2., 3., 5., 7., 9.]).astype(np.float64).reshape((2, 3))\nexpect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_2d_axis_0')", + "summary": "cumsum_2d_axis_0" + }, + { + "code": "node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n)\nx = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float64).reshape((2, 3))\naxis = np.array([1]).astype(np.int32)\ny = np.array([1., 3., 6., 4., 9., 15.]).astype(np.float64).reshape((2, 3))\nexpect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_2d_axis_1')", + "summary": "cumsum_2d_axis_1" + }, + { + "code": "node = onnx.helper.make_node(\n 'CumSum',\n inputs=['x', 'axis'],\n outputs=['y'],\n)\nx = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float64).reshape((2, 3))\naxis = np.array([-1]).astype(np.int32)\ny = np.array([1., 3., 6., 4., 9., 15.]).astype(np.float64).reshape((2, 3))\nexpect(node, inputs=[x, axis], outputs=[y],\n name='test_cumsum_2d_negative_axis')", + "summary": "cumsum_2d_negative_axis" + } + ], + "inputs": [ + { + "description": "An input tensor that is to be processed.", + "name": "x", + "type": "T" + }, + { + "description": "(Optional) A 0-D tensor. Must be in the range [-rank(x), rank(x)-1]. Negative value means counting dimensions from the back.", + "name": "axis", + "type": "T2" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of the same type as 'x' with cumulative sums of the x's elements", + "name": "y", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float)", + "tensor(double)" + ], + "description": "Input can be of any tensor type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "axis tensor can be int32 or int64 only", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "DepthToSpace", + "schema": { + "attributes": [ + { + "description": "Blocks of [blocksize, blocksize] are moved.", + "name": "blocksize", + "required": true, + "type": "int64" + } + ], + "description": "DepthToSpace rearranges (permutes) data from depth into blocks of spatial data.\nThis is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of\nthe input tensor where values from the depth dimension are moved in spatial blocks to the height\nand width dimensions.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'DepthToSpace',\n inputs=['x'],\n outputs=['y'],\n blocksize=2,\n mode='CRD'\n)\n\n# (1, 8, 2, 3) input tensor\nx = np.array([[[[0., 1., 2.],\n [3., 4., 5.]],\n [[9., 10., 11.],\n [12., 13., 14.]],\n [[18., 19., 20.],\n [21., 22., 23.]],\n [[27., 28., 29.],\n [30., 31., 32.]],\n [[36., 37., 38.],\n [39., 40., 41.]],\n [[45., 46., 47.],\n [48., 49., 50.]],\n [[54., 55., 56.],\n [57., 58., 59.]],\n [[63., 64., 65.],\n [66., 67., 68.]]]]).astype(np.float32)\n\n# (1, 2, 4, 6) output tensor\ny = np.array([[[[0., 9., 1., 10., 2., 11.],\n [18., 27., 19., 28., 20., 29.],\n [3., 12., 4., 13., 5., 14.],\n [21., 30., 22., 31., 23., 32.]],\n [[36., 45., 37., 46., 38., 47.],\n [54., 63., 55., 64., 56., 65.],\n [39., 48., 40., 49., 41., 50.],\n [57., 66., 58., 67., 59., 68.]]]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_depthtospace_crd_mode_example')", + "summary": "crd_mode_example" + }, + { + "code": "node = onnx.helper.make_node(\n 'DepthToSpace',\n inputs=['x'],\n outputs=['y'],\n blocksize=2,\n mode='DCR'\n)\n\n# (1, 8, 2, 3) input tensor\nx = np.array([[[[0., 1., 2.],\n [3., 4., 5.]],\n [[9., 10., 11.],\n [12., 13., 14.]],\n [[18., 19., 20.],\n [21., 22., 23.]],\n [[27., 28., 29.],\n [30., 31., 32.]],\n [[36., 37., 38.],\n [39., 40., 41.]],\n [[45., 46., 47.],\n [48., 49., 50.]],\n [[54., 55., 56.],\n [57., 58., 59.]],\n [[63., 64., 65.],\n [66., 67., 68.]]]]).astype(np.float32)\n\n# (1, 2, 4, 6) output tensor\ny = np.array([[[[0., 18., 1., 19., 2., 20.],\n [36., 54., 37., 55., 38., 56.],\n [3., 21., 4., 22., 5., 23.],\n [39., 57., 40., 58., 41., 59.]],\n [[9., 27., 10., 28., 11., 29.],\n [45., 63., 46., 64., 47., 65.],\n [12., 30., 13., 31., 14., 32.],\n [48., 66., 49., 67., 50., 68.]]]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_depthtospace_example')", + "summary": "default_mode_example" + } + ], + "inputs": [ + { + "description": "Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize].", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "DepthToSpace", + "schema": { + "attributes": [ + { + "description": "Blocks of [blocksize, blocksize] are moved.", + "name": "blocksize", + "required": true, + "type": "int64" + }, + { + "default": "DCR", + "description": "DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order.", + "name": "mode", + "required": false, + "type": "string" + } + ], + "description": "DepthToSpace rearranges (permutes) data from depth into blocks of spatial data.\nThis is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of\nthe input tensor where values from the depth dimension are moved in spatial blocks to the height\nand width dimensions. By default, `mode` = `DCR`.\nIn the DCR mode, elements along the depth dimension from the input tensor are rearranged in the\nfollowing order: depth, column, and then row. The output y is computed from the input x as below:\n\nb, c, h, w = x.shape\n\ntmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])\n\ntmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])\n\ny = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])\n\n\nIn the CRD mode, elements along the depth dimension from the input tensor are rearranged in the\nfollowing order: column, row, and the depth. The output y is computed from the input x as below:\n\nb, c, h, w = x.shape\n\ntmp = np.reshape(x, [b, c // (blocksize ** 2), blocksize, blocksize, h, w])\n\ntmp = np.transpose(tmp, [0, 1, 4, 2, 5, 3])\n\ny = np.reshape(tmp, [b, c // (blocksize ** 2), h * blocksize, w * blocksize])\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'DepthToSpace',\n inputs=['x'],\n outputs=['y'],\n blocksize=2,\n mode='CRD'\n)\n\n# (1, 8, 2, 3) input tensor\nx = np.array([[[[0., 1., 2.],\n [3., 4., 5.]],\n [[9., 10., 11.],\n [12., 13., 14.]],\n [[18., 19., 20.],\n [21., 22., 23.]],\n [[27., 28., 29.],\n [30., 31., 32.]],\n [[36., 37., 38.],\n [39., 40., 41.]],\n [[45., 46., 47.],\n [48., 49., 50.]],\n [[54., 55., 56.],\n [57., 58., 59.]],\n [[63., 64., 65.],\n [66., 67., 68.]]]]).astype(np.float32)\n\n# (1, 2, 4, 6) output tensor\ny = np.array([[[[0., 9., 1., 10., 2., 11.],\n [18., 27., 19., 28., 20., 29.],\n [3., 12., 4., 13., 5., 14.],\n [21., 30., 22., 31., 23., 32.]],\n [[36., 45., 37., 46., 38., 47.],\n [54., 63., 55., 64., 56., 65.],\n [39., 48., 40., 49., 41., 50.],\n [57., 66., 58., 67., 59., 68.]]]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_depthtospace_crd_mode_example')", + "summary": "crd_mode_example" + }, + { + "code": "node = onnx.helper.make_node(\n 'DepthToSpace',\n inputs=['x'],\n outputs=['y'],\n blocksize=2,\n mode='DCR'\n)\n\n# (1, 8, 2, 3) input tensor\nx = np.array([[[[0., 1., 2.],\n [3., 4., 5.]],\n [[9., 10., 11.],\n [12., 13., 14.]],\n [[18., 19., 20.],\n [21., 22., 23.]],\n [[27., 28., 29.],\n [30., 31., 32.]],\n [[36., 37., 38.],\n [39., 40., 41.]],\n [[45., 46., 47.],\n [48., 49., 50.]],\n [[54., 55., 56.],\n [57., 58., 59.]],\n [[63., 64., 65.],\n [66., 67., 68.]]]]).astype(np.float32)\n\n# (1, 2, 4, 6) output tensor\ny = np.array([[[[0., 18., 1., 19., 2., 20.],\n [36., 54., 37., 55., 38., 56.],\n [3., 21., 4., 22., 5., 23.],\n [39., 57., 40., 58., 41., 59.]],\n [[9., 27., 10., 28., 11., 29.],\n [45., 63., 46., 64., 47., 65.],\n [12., 30., 13., 31., 14., 32.],\n [48., 66., 49., 67., 50., 68.]]]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_depthtospace_example')", + "summary": "default_mode_example" + } + ], + "inputs": [ + { + "description": "Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of [N, C/(blocksize * blocksize), H * blocksize, W * blocksize].", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "DequantizeLinear", + "schema": { + "description": "The linear dequantization operator. It consumes a quantized tensor, a scale, a zero point to compute the full precision tensor.\nThe dequantization formula is y = (x - x_zero_point) * x_scale. 'x_scale' and 'x_zero_point' must have same shape.\n'x_zero_point' and 'x' must have same type. 'x' and 'y' must have same shape. In the case of dequantizing int32,\nthere's no zero point (zero point is supposed to be 0).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node('DequantizeLinear',\n inputs=['x', 'x_scale', 'x_zero_point'],\n outputs=['y'],)\n\n# scalar zero point and scale\nx = np.array([0, 3, 128, 255]).astype(np.uint8)\nx_scale = np.float32(2)\nx_zero_point = np.uint8(128)\ny = np.array([-256, -250, 0, 254], dtype=np.float32)\n\nexpect(node, inputs=[x, x_scale, x_zero_point], outputs=[y],\n name='test_dequantizelinear')", + "summary": "dequantizelinear" + } + ], + "inputs": [ + { + "description": "N-D quantized input tensor to be de-quantized.", + "name": "x", + "type": "T" + }, + { + "description": "Scale for input 'x'. It's a scalar, which means a per-tensor/layer quantization.", + "name": "x_scale", + "type": "tensor(float)" + }, + { + "description": "Zero point for input 'x'. It's a scalar, which means a per-tensor/layer quantization. It's optional. 0 is the default value when it's not specified.", + "name": "x_zero_point", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "N-D full precision output tensor. It has same shape as input 'x'.", + "name": "y", + "type": "tensor(float)" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)", + "tensor(int32)" + ], + "description": "Constrain 'x_zero_point' and 'x' to 8-bit/32-bit integer tensor.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Det", + "schema": { + "description": "Det calculates determinant of a square matrix or batches of square matrices.\nDet takes one input tensor of shape `[*, M, M]`, where `*` is zero or more batch dimensions,\nand the inner-most 2 dimensions form square matrices.\nThe output is a tensor of shape `[*]`, containing the determinants of all input submatrices.\ne.g., When the input is 2-D, the output is a scalar(shape is empty: `[]`).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Det',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.arange(4).reshape(2, 2).astype(np.float32)\ny = np.linalg.det(x) # expect -2\nexpect(node, inputs=[x], outputs=[y],\n name='test_det_2d')", + "summary": "2d" + }, + { + "code": "node = onnx.helper.make_node(\n 'Det',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]).astype(np.float32)\ny = np.linalg.det(x) # expect array([-2., -3., -8.])\nexpect(node, inputs=[x], outputs=[y],\n name='test_det_nd')", + "summary": "nd" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to floating-point tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "DictVectorizer", + "schema": { + "attributes": [ + { + "description": "An integer vocabulary array.
    One and only one of the vocabularies must be defined.", + "name": "int64_vocabulary", + "required": false, + "type": "int64[]" + }, + { + "description": "A string vocabulary array.
    One and only one of the vocabularies must be defined.", + "name": "string_vocabulary", + "required": false, + "type": "string[]" + } + ], + "description": "Uses an index mapping to convert a dictionary to an array.
    \n Given a dictionary, each key is looked up in the vocabulary attribute corresponding to\n the key type. The index into the vocabulary array at which the key is found is then\n used to index the output 1-D tensor 'Y' and insert into it the value found in the dictionary 'X'.
    \n The key type of the input map must correspond to the element type of the defined vocabulary attribute.\n Therefore, the output array will be equal in length to the index mapping vector parameter.\n All keys in the input dictionary must be present in the index mapping vector.\n For each item in the input dictionary, insert its value in the output array.\n Any keys not present in the input dictionary, will be zero in the output array.
    \n For example: if the ``string_vocabulary`` parameter is set to ``[\"a\", \"c\", \"b\", \"z\"]``,\n then an input of ``{\"a\": 4, \"c\": 8}`` will produce an output of ``[4, 8, 0, 0]``.\n ", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "A dictionary.", + "name": "X", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "A 1-D tensor holding values from the input dictionary.", + "name": "Y", + "type": "T2" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "map(string, int64)", + "map(int64, string)", + "map(int64, float)", + "map(int64, double)", + "map(string, float)", + "map(string, double)" + ], + "description": "The input must be a map from strings or integers to either strings or a numeric type. The key and value types cannot be the same.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int64)", + "tensor(float)", + "tensor(double)", + "tensor(string)" + ], + "description": "The output will be a tensor of the value type of the input map. It's shape will be [1,C], where C is the length of the input dictionary.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "Div", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions. See doc for details.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + }, + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Performs element-wise binary division (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Div',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([3, 4]).astype(np.float32)\ny = np.array([1, 2]).astype(np.float32)\nz = x / y # expected output [3., 2.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_div_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(3, 4, 5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_div')", + "summary": "div" + }, + { + "code": "node = onnx.helper.make_node(\n 'Div',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_div_bcast')", + "summary": "div_broadcast" + } + ], + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Div", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions. See doc for details.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + } + ], + "description": "Performs element-wise binary division (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Div',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([3, 4]).astype(np.float32)\ny = np.array([1, 2]).astype(np.float32)\nz = x / y # expected output [3., 2.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_div_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(3, 4, 5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_div')", + "summary": "div" + }, + { + "code": "node = onnx.helper.make_node(\n 'Div',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_div_bcast')", + "summary": "div_broadcast" + } + ], + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Div", + "schema": { + "description": "Performs element-wise binary division (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Div',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([3, 4]).astype(np.float32)\ny = np.array([1, 2]).astype(np.float32)\nz = x / y # expected output [3., 2.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_div_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(3, 4, 5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_div')", + "summary": "div" + }, + { + "code": "node = onnx.helper.make_node(\n 'Div',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.rand(5).astype(np.float32) + 1.0\nz = x / y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_div_bcast')", + "summary": "div_broadcast" + } + ], + "inputs": [ + { + "description": "First operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same element type as two inputs", + "name": "C", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Dropout", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + }, + { + "description": "(int, default 0) if nonzero, run dropout in test mode where the output is simply Y = X.", + "name": "is_test", + "required": false, + "type": "int64" + }, + { + "default": 0.5, + "description": "(float, default 0.5) the ratio of random dropout", + "name": "ratio", + "required": false, + "type": "float32" + } + ], + "category": "Dropout", + "description": "Dropout takes one input data (Tensor) and produces two Tensor outputs,\noutput (Tensor) and mask (Tensor). Depending on whether it is in\ntest mode or not, the output Y will either be a random dropout, or a simple\ncopy of the input. Note that our implementation of Dropout does scaling in\nthe training phase, so during testing nothing needs to be done.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x)\nexpect(node, inputs=[x], outputs=[y], name='test_dropout_default')", + "summary": "default" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, return_mask=True)\nexpect(node, inputs=[x], outputs=[y, z], name='test_dropout_default_mask')", + "summary": "default_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, r, return_mask=True)\nexpect(node, inputs=[x, r], outputs=[y, z], name='test_dropout_default_mask_ratio')", + "summary": "default_mask_ratio" + }, + { + "code": "node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = x\nexpect(node, inputs=[x], outputs=[y],\n name='test_dropout_default_old', opset_imports=[helper.make_opsetid(\"\", 11)])", + "summary": "default_old" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r'],\n outputs=['y'],\n seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x, r)\nexpect(node, inputs=[x, r], outputs=[y], name='test_dropout_default_ratio')", + "summary": "default_ratio" + }, + { + "code": "node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n ratio=.2,\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x\nexpect(node, inputs=[x], outputs=[y],\n name='test_dropout_random_old', opset_imports=[helper.make_opsetid(\"\", 11)])", + "summary": "random_old" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout')", + "summary": "training" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout_default')", + "summary": "training_default" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_default_mask')", + "summary": "training_default_ratio_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout_zero_ratio')", + "summary": "training_default_zero_ratio" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_zero_ratio_mask')", + "summary": "training_default_zero_ratio_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_mask')", + "summary": "training_ratio_mask" + } + ], + "inputs": [ + { + "description": "The input data as Tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output.", + "name": "output", + "type": "T" + }, + { + "description": "The output mask. If is_test is nonzero, this output is not filled.", + "name": "mask", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "1 - 2", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Dropout", + "schema": { + "attributes": [ + { + "description": "(int, default 0) if nonzero, run dropout in test mode where the output is simply Y = X.", + "name": "is_test", + "required": false, + "type": "int64" + }, + { + "default": 0.5, + "description": "(float, default 0.5) the ratio of random dropout", + "name": "ratio", + "required": false, + "type": "float32" + } + ], + "category": "Dropout", + "description": "Dropout takes one input data (Tensor) and produces two Tensor outputs,\noutput (Tensor) and mask (Tensor). Depending on whether it is in\ntest mode or not, the output Y will either be a random dropout, or a simple\ncopy of the input. Note that our implementation of Dropout does scaling in\nthe training phase, so during testing nothing needs to be done.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x)\nexpect(node, inputs=[x], outputs=[y], name='test_dropout_default')", + "summary": "default" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, return_mask=True)\nexpect(node, inputs=[x], outputs=[y, z], name='test_dropout_default_mask')", + "summary": "default_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, r, return_mask=True)\nexpect(node, inputs=[x, r], outputs=[y, z], name='test_dropout_default_mask_ratio')", + "summary": "default_mask_ratio" + }, + { + "code": "node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = x\nexpect(node, inputs=[x], outputs=[y],\n name='test_dropout_default_old', opset_imports=[helper.make_opsetid(\"\", 11)])", + "summary": "default_old" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r'],\n outputs=['y'],\n seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x, r)\nexpect(node, inputs=[x, r], outputs=[y], name='test_dropout_default_ratio')", + "summary": "default_ratio" + }, + { + "code": "node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n ratio=.2,\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x\nexpect(node, inputs=[x], outputs=[y],\n name='test_dropout_random_old', opset_imports=[helper.make_opsetid(\"\", 11)])", + "summary": "random_old" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout')", + "summary": "training" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout_default')", + "summary": "training_default" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_default_mask')", + "summary": "training_default_ratio_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout_zero_ratio')", + "summary": "training_default_zero_ratio" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_zero_ratio_mask')", + "summary": "training_default_zero_ratio_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_mask')", + "summary": "training_ratio_mask" + } + ], + "inputs": [ + { + "description": "The input data as Tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output.", + "name": "output", + "type": "T" + }, + { + "description": "The output mask. If is_test is nonzero, this output is not filled.", + "name": "mask", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "1 - 2", + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Dropout", + "schema": { + "attributes": [ + { + "default": 0.5, + "description": "The ratio of random dropout", + "name": "ratio", + "required": false, + "type": "float32" + } + ], + "category": "Dropout", + "description": "Dropout takes one input data (Tensor) and produces two Tensor outputs,\noutput (Tensor) and mask (Tensor). Depending on whether it is in\ntest mode or not, the output Y will either be a random dropout, or a simple\ncopy of the input. Note that our implementation of Dropout does scaling in\nthe training phase, so during testing nothing needs to be done.\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x)\nexpect(node, inputs=[x], outputs=[y], name='test_dropout_default')", + "summary": "default" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, return_mask=True)\nexpect(node, inputs=[x], outputs=[y, z], name='test_dropout_default_mask')", + "summary": "default_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, r, return_mask=True)\nexpect(node, inputs=[x, r], outputs=[y, z], name='test_dropout_default_mask_ratio')", + "summary": "default_mask_ratio" + }, + { + "code": "node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = x\nexpect(node, inputs=[x], outputs=[y],\n name='test_dropout_default_old', opset_imports=[helper.make_opsetid(\"\", 11)])", + "summary": "default_old" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r'],\n outputs=['y'],\n seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x, r)\nexpect(node, inputs=[x, r], outputs=[y], name='test_dropout_default_ratio')", + "summary": "default_ratio" + }, + { + "code": "node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n ratio=.2,\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x\nexpect(node, inputs=[x], outputs=[y],\n name='test_dropout_random_old', opset_imports=[helper.make_opsetid(\"\", 11)])", + "summary": "random_old" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout')", + "summary": "training" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout_default')", + "summary": "training_default" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_default_mask')", + "summary": "training_default_ratio_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout_zero_ratio')", + "summary": "training_default_zero_ratio" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_zero_ratio_mask')", + "summary": "training_default_zero_ratio_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_mask')", + "summary": "training_ratio_mask" + } + ], + "inputs": [ + { + "description": "The input data as Tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output.", + "name": "output", + "type": "T" + }, + { + "description": "The output mask.", + "name": "mask", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "1 - 2", + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Dropout", + "schema": { + "attributes": [ + { + "default": 0.5, + "description": "The ratio of random dropout", + "name": "ratio", + "required": false, + "type": "float32" + } + ], + "category": "Dropout", + "description": "Dropout takes one input floating tensor and produces two tensor outputs,\noutput (floating tensor) and mask (`Tensor`). Depending on whether it is\nin test mode or not, the output Y will either be a random dropout, or a simple\ncopy of the input. Note that our implementation of Dropout does scaling in\nthe training phase, so during testing nothing needs to be done.\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x)\nexpect(node, inputs=[x], outputs=[y], name='test_dropout_default')", + "summary": "default" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, return_mask=True)\nexpect(node, inputs=[x], outputs=[y, z], name='test_dropout_default_mask')", + "summary": "default_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, r, return_mask=True)\nexpect(node, inputs=[x, r], outputs=[y, z], name='test_dropout_default_mask_ratio')", + "summary": "default_mask_ratio" + }, + { + "code": "node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = x\nexpect(node, inputs=[x], outputs=[y],\n name='test_dropout_default_old', opset_imports=[helper.make_opsetid(\"\", 11)])", + "summary": "default_old" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r'],\n outputs=['y'],\n seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x, r)\nexpect(node, inputs=[x, r], outputs=[y], name='test_dropout_default_ratio')", + "summary": "default_ratio" + }, + { + "code": "node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n ratio=.2,\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x\nexpect(node, inputs=[x], outputs=[y],\n name='test_dropout_random_old', opset_imports=[helper.make_opsetid(\"\", 11)])", + "summary": "random_old" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout')", + "summary": "training" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout_default')", + "summary": "training_default" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_default_mask')", + "summary": "training_default_ratio_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout_zero_ratio')", + "summary": "training_default_zero_ratio" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_zero_ratio_mask')", + "summary": "training_default_zero_ratio_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_mask')", + "summary": "training_ratio_mask" + } + ], + "inputs": [ + { + "description": "The input data as Tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output.", + "name": "output", + "type": "T" + }, + { + "description": "The output mask.", + "name": "mask", + "option": "optional", + "type": "T1" + } + ], + "outputs_range": "1 - 2", + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrain output mask types to boolean tensors.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Dropout", + "schema": { + "attributes": [ + { + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one.", + "name": "seed", + "required": false, + "type": "int64" + } + ], + "category": "Dropout", + "description": "Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs,\noutput (floating-point tensor) and mask (optional `Tensor`). If `training_mode` is true then the output Y will be a random dropout;\nNote that this Dropout scales the masked input data by the following equation, so to convert the trained model into inference mode,\nthe user can simply not pass `training_mode` input or set it to false.\n```\noutput = scale * data * mask,\n```\nwhere\n```\nscale = 1. / (1. - ratio).\n```\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x)\nexpect(node, inputs=[x], outputs=[y], name='test_dropout_default')", + "summary": "default" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, return_mask=True)\nexpect(node, inputs=[x], outputs=[y, z], name='test_dropout_default_mask')", + "summary": "default_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny, z = dropout(x, r, return_mask=True)\nexpect(node, inputs=[x, r], outputs=[y, z], name='test_dropout_default_mask_ratio')", + "summary": "default_mask_ratio" + }, + { + "code": "node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = x\nexpect(node, inputs=[x], outputs=[y],\n name='test_dropout_default_old', opset_imports=[helper.make_opsetid(\"\", 11)])", + "summary": "default_old" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r'],\n outputs=['y'],\n seed=seed\n)\n\nr = np.float32(0.1)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = dropout(x, r)\nexpect(node, inputs=[x, r], outputs=[y], name='test_dropout_default_ratio')", + "summary": "default_ratio" + }, + { + "code": "node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x'],\n outputs=['y'],\n ratio=.2,\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x\nexpect(node, inputs=[x], outputs=[y],\n name='test_dropout_random_old', opset_imports=[helper.make_opsetid(\"\", 11)])", + "summary": "random_old" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout')", + "summary": "training" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout_default')", + "summary": "training_default" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.5)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_default_mask')", + "summary": "training_default_ratio_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny = dropout(x, r, training_mode=t)\nexpect(node, inputs=[x, r, t], outputs=[y], name='test_training_dropout_zero_ratio')", + "summary": "training_default_zero_ratio" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.0)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_zero_ratio_mask')", + "summary": "training_default_zero_ratio_mask" + }, + { + "code": "seed = np.int64(0)\nnode = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nr = np.float32(0.75)\nt = np.bool_(True)\ny, z = dropout(x, r, training_mode=t, return_mask=True)\nexpect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_mask')", + "summary": "training_ratio_mask" + } + ], + "inputs": [ + { + "description": "The input data as Tensor.", + "name": "data", + "type": "T" + }, + { + "description": "The ratio of random dropout, with value in [0, 1). If this input was not set, or if it was set to 0, the output would be a simple copy of the input. If it's non-zero, output will be a random dropout of the scaled input, which is typically the case during training. It is an optional value, if not specified it will default to 0.5.", + "name": "ratio", + "option": "optional", + "type": "T1" + }, + { + "description": "If set to true then it indicates dropout is being used for training. It is an optional value hence unless specified explicitly, it is false. If it is false, ratio is ignored and the operation mimics inference mode where nothing will be dropped from the input data and if mask is requested as output it will contain all ones.", + "name": "training_mode", + "option": "optional", + "type": "T2" + } + ], + "inputs_range": "1 - 3", + "max_input": 3, + "max_output": 2, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output.", + "name": "output", + "type": "T" + }, + { + "description": "The output mask.", + "name": "mask", + "option": "optional", + "type": "T2" + } + ], + "outputs_range": "1 - 2", + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input 'ratio' types to float tensors.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrain output 'mask' types to boolean tensors.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "DynamicQuantizeLinear", + "schema": { + "description": "A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion of FP32 Input data.\nOutputs Scale, ZeroPoint and Quantized Input for a given FP32 Input.\nScale is calculated as:\n```\n y_scale = (max(x) - min(x))/(qmax - qmin)\n * where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8\n * data range is adjusted to include 0.\n```\nZero point is calculated as:\n```\nintermediate_zero_point = qmin - min(x)/y_scale\ny_zero_point = cast(round(saturate(itermediate_zero_point)))\n* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n```\nData quantization formula is:\n```\ny = saturate (round (x / y_scale) + y_zero_point)\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n```\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node('DynamicQuantizeLinear',\n inputs=['x'],\n outputs=['y', 'y_scale', 'y_zero_point'],\n)\n\n# expected scale 0.0196078438 and zero point 153\nX = np.array([0, 2, -3, -2.5, 1.34, 0.5]).astype(np.float32)\nx_min = np.minimum(0, np.min(X))\nx_max = np.maximum(0, np.max(X))\nY_Scale = np.float32((x_max - x_min) / (255 - 0)) # uint8 -> [0, 255]\nY_ZeroPoint = np.clip(round((0 - x_min) / Y_Scale), 0, 255).astype(np.uint8)\nY = np.clip(np.round(X / Y_Scale) + Y_ZeroPoint, 0, 255).astype(np.uint8)\n\nexpect(node, inputs=[X], outputs=[Y, Y_Scale, Y_ZeroPoint],\n name='test_dynamicquantizelinear')\n\n# expected scale 0.0156862754 and zero point 255\nX = np.array([-1.0, -2.1, -1.3, -2.5, -3.34, -4.0]).astype(np.float32)\nx_min = np.minimum(0, np.min(X))\nx_max = np.maximum(0, np.max(X))\nY_Scale = np.float32((x_max - x_min) / (255 - 0)) # uint8 -> [0, 255]\nY_ZeroPoint = np.clip(round((0 - x_min) / Y_Scale), 0, 255).astype(np.uint8)\nY = np.clip(np.round(X / Y_Scale) + Y_ZeroPoint, 0, 255).astype(np.uint8)\n\nexpect(node, inputs=[X], outputs=[Y, Y_Scale, Y_ZeroPoint],\n name='test_dynamicquantizelinear_max_adjusted')\n\nX = np.array([1, 2.1, 1.3, 2.5,\n 3.34, 4.0, 1.5, 2.6,\n 3.9, 4.0, 3.0, 2.345]).astype(np.float32).reshape((3, 4))\n\n# expected scale 0.0156862754 and zero point 0\nx_min = np.minimum(0, np.min(X))\nx_max = np.maximum(0, np.max(X))\nY_Scale = np.float32((x_max - x_min) / (255 - 0)) # uint8 -> [0, 255]\nY_ZeroPoint = np.clip(round((0 - x_min) / Y_Scale), 0, 255).astype(np.uint8)\nY = np.clip(np.round(X / Y_Scale) + Y_ZeroPoint, 0, 255).astype(np.uint8)\n\nexpect(node, inputs=[X], outputs=[Y, Y_Scale, Y_ZeroPoint],\n name='test_dynamicquantizelinear_min_adjusted')", + "summary": "dynamicquantizelinear" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "x", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 3, + "min_input": 1, + "min_output": 3, + "outputs": [ + { + "description": "Quantized output tensor", + "name": "y", + "type": "T2" + }, + { + "description": "Output scale. It's a scalar, which means a per-tensor/layer quantization.", + "name": "y_scale", + "type": "tensor(float)" + }, + { + "description": "Output zero point. It's a scalar, which means a per-tensor/layer quantization.", + "name": "y_zero_point", + "type": "T2" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)" + ], + "description": "Constrain 'x' to float tensor.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(uint8)" + ], + "description": "Constrain 'y_zero_point' and 'y' to 8-bit unsigned integer tensor.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "Einsum", + "schema": { + "attributes": [ + { + "description": "Einsum expression string.", + "name": "equation", + "required": true, + "type": "string" + } + ], + "description": "An einsum of the form ```term1, term2 -> output-term``` produces an output tensor using the following equation\n\n```output[output-term] = reduce-sum( input1[term1] * input2[term] )```\n\nwhere the reduce-sum performs a summation over all the indices occurring in in the input terms (term1, term2)\nthat do not occur in the output-term.\n\nThe Einsum operator evaluates algebraic tensor operations on a sequence of tensors, using the Einstein summation\nconvention. The equation string contains a comma-separated sequence of lower case letters. Each term corresponds to\nan operand tensor, and the characters within the terms correspond to operands dimensions.\n\nThis sequence may be followed by \"->\" to separate the left and right hand side of the equation.\nIf the equation contains \"->\" followed by the right-hand side, the explicit (not classical) form of the Einstein\nsummation is performed, and the right-hand side indices indicate output tensor dimensions. In other cases,\noutput indices are (implicitly) set to the alphabetically sorted sequence of indices appearing exactly once in the\nequation.\n\nWhen a dimension character is repeated in the left-hand side, it represents summation along the dimension.\n\nThe equation may contain ellipsis (\"...\") to enable broadcasting. Ellipsis must indicate a fixed number of dimensions.\nSpecifically, every occurrence of ellipsis in the equation must represent the same number of dimensions.\nThe right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the\nbeginning of the output. The equation string may contain space (U+0020) character.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "Eqn = '...ii ->...i'\nnode = onnx.helper.make_node(\n 'Einsum',\n inputs=['x'],\n outputs=['y'],\n equation=Eqn\n)\n\nX = np.random.randn(3, 5, 5)\nZ = einsum_reference_implementation(Eqn, (X,))\n\nexpect(node, inputs=[X], outputs=[Z], name='test_einsum_batch_diagonal')", + "summary": "einsum_batch_diagonal" + }, + { + "code": "Eqn = 'bij, bjk -> bik'\nnode = onnx.helper.make_node(\n 'Einsum',\n inputs=['x', 'y'],\n outputs=['z'],\n equation=Eqn\n)\n\nX = np.random.randn(5, 2, 3)\nY = np.random.randn(5, 3, 4)\nZ = einsum_reference_implementation(Eqn, (X, Y))\n\nexpect(node, inputs=[X, Y], outputs=[Z], name='test_einsum_batch_matmul')", + "summary": "einsum_batch_matmul" + }, + { + "code": "Eqn = 'i,i'\nnode = onnx.helper.make_node(\n 'Einsum',\n inputs=['x', 'y'],\n outputs=['z'],\n equation=Eqn\n)\n\nX = np.random.randn(5)\nY = np.random.randn(5)\nZ = einsum_reference_implementation(Eqn, (X, Y))\n\nexpect(node, inputs=[X, Y], outputs=[Z], name='test_einsum_inner_prod')", + "summary": "einsum_inner_prod" + }, + { + "code": "Eqn = 'ij->i'\nnode = onnx.helper.make_node(\n 'Einsum',\n inputs=['x'],\n outputs=['y'],\n equation=Eqn\n)\n\nX = np.random.randn(3, 4)\nZ = einsum_reference_implementation(Eqn, (X,))\n\nexpect(node, inputs=[X], outputs=[Z], name='test_einsum_sum')", + "summary": "einsum_sum" + }, + { + "code": "Eqn = 'ij->ji'\nnode = onnx.helper.make_node(\n 'Einsum',\n inputs=['x'],\n outputs=['y'],\n equation=Eqn\n)\n\nX = np.random.randn(3, 4)\nY = einsum_reference_implementation(Eqn, (X,))\n\nexpect(node, inputs=[X], outputs=[Y], name='test_einsum_transpose')", + "summary": "einsum_transpose" + } + ], + "inputs": [ + { + "description": "Operands", + "name": "Inputs", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Output", + "type": "T" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numerical tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Elu", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Coefficient of ELU default to 1.0.", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "category": "Activation", + "description": "Elu takes one input data (Tensor) and produces one output data\n(Tensor) where the function `f(x) = alpha * (exp(x) - 1.) for x <\n0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise.\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Elu',\n inputs=['x'],\n outputs=['y'],\n alpha=2.0\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-1.2642411, 0., 1.]\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0\nexpect(node, inputs=[x], outputs=[y],\n name='test_elu_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0\nexpect(node, inputs=[x], outputs=[y],\n name='test_elu')", + "summary": "elu" + }, + { + "code": "default_alpha = 1.0\nnode = onnx.helper.make_node(\n 'Elu',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha\nexpect(node, inputs=[x], outputs=[y],\n name='test_elu_default')", + "summary": "elu_default" + } + ], + "inputs": [ + { + "description": "1D input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "1D input tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Elu", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Coefficient of ELU.", + "name": "alpha", + "required": false, + "type": "float32" + } + ], + "category": "Activation", + "description": "Elu takes one input data (Tensor) and produces one output data\n(Tensor) where the function `f(x) = alpha * (exp(x) - 1.) for x <\n0`, `f(x) = x for x >= 0`., is applied to the tensor elementwise.\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Elu',\n inputs=['x'],\n outputs=['y'],\n alpha=2.0\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-1.2642411, 0., 1.]\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0\nexpect(node, inputs=[x], outputs=[y],\n name='test_elu_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0\nexpect(node, inputs=[x], outputs=[y],\n name='test_elu')", + "summary": "elu" + }, + { + "code": "default_alpha = 1.0\nnode = onnx.helper.make_node(\n 'Elu',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha\nexpect(node, inputs=[x], outputs=[y],\n name='test_elu_default')", + "summary": "elu_default" + } + ], + "inputs": [ + { + "description": "1D input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "1D input tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Equal", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + } + ], + "category": "Logic", + "description": "Returns the tensor resulted from performing the `equal` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Equal',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_equal')", + "summary": "equal" + }, + { + "code": "node = onnx.helper.make_node(\n 'Equal',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_equal_bcast')", + "summary": "equal_broadcast" + } + ], + "inputs": [ + { + "description": "Left input tensor for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Right input tensor for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)", + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrains input to integral tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Equal", + "schema": { + "category": "Logic", + "description": "Returns the tensor resulted from performing the `equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Equal',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_equal')", + "summary": "equal" + }, + { + "code": "node = onnx.helper.make_node(\n 'Equal',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_equal_bcast')", + "summary": "equal_broadcast" + } + ], + "inputs": [ + { + "description": "First input operand for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Second input operand for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)", + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrains input to integral tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Equal", + "schema": { + "category": "Logic", + "description": "Returns the tensor resulted from performing the `equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Equal',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_equal')", + "summary": "equal" + }, + { + "code": "node = onnx.helper.make_node(\n 'Equal',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = (np.random.randn(3, 4, 5) * 10).astype(np.int32)\ny = (np.random.randn(5) * 10).astype(np.int32)\nz = np.equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_equal_bcast')", + "summary": "equal_broadcast" + } + ], + "inputs": [ + { + "description": "First input operand for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Second input operand for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input types to all numeric tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Erf", + "schema": { + "description": "Computes the error function of the given input tensor element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Erf',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\ny = np.vectorize(math.erf)(x).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_erf')", + "summary": "erf" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The error function of the input tensor computed element-wise. It has the same shape and type of the input.", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Exp", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Calculates the exponential of the given input tensor, element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Exp',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.exp(x) # expected output [0.36787945, 1., 2.71828175]\nexpect(node, inputs=[x], outputs=[y],\n name='test_exp_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.exp(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_exp')", + "summary": "exp" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The exponential of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Exp", + "schema": { + "description": "Calculates the exponential of the given input tensor, element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Exp',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.exp(x) # expected output [0.36787945, 1., 2.71828175]\nexpect(node, inputs=[x], outputs=[y],\n name='test_exp_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.exp(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_exp')", + "summary": "exp" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The exponential of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Expand", + "schema": { + "description": "Broadcast the input tensor following the given shape and the broadcast rule.\nThe broadcast rule is similar to numpy.array(input) * numpy.ones(shape):\nDimensions are right alignment;\nTwo corresponding dimension must have the same value, or one of them is equal to 1.\nAlso, this operator is similar to numpy.broadcast_to(input, shape),\nbut the major difference is numpy.broadcast_to() does not allow shape to be smaller than input.size().\nIt is possible that the output.shape is not equal to shape, when some dimensions in shape is equal to 1,\nor the shape.ndim < input.shape.ndim.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Expand',\n inputs=['data', 'new_shape'],\n outputs=['expanded'],\n)\nshape = [3, 1]\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[1.], [2.], [3.]]\nnew_shape = [2, 1, 6]\nexpanded = data * np.ones(new_shape, dtype=np.float32)\n#print(expanded)\n#[[[1., 1., 1., 1., 1., 1.],\n# [2., 2., 2., 2., 2., 2.],\n# [3., 3., 3., 3., 3., 3.]],\n#\n# [[1., 1., 1., 1., 1., 1.],\n# [2., 2., 2., 2., 2., 2.],\n# [3., 3., 3., 3., 3., 3.]]]\nnew_shape = np.array(new_shape, dtype=np.int64)\nexpect(node, inputs=[data, new_shape], outputs=[expanded],\n name='test_expand_dim_changed')", + "summary": "dim_changed" + }, + { + "code": "node = onnx.helper.make_node(\n 'Expand',\n inputs=['data', 'new_shape'],\n outputs=['expanded'],\n)\nshape = [3, 1]\nnew_shape = [3, 4]\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[1.], [2.], [3.]]\nexpanded = np.tile(data, 4)\n#print(expanded)\n#[[1., 1., 1., 1.],\n# [2., 2., 2., 2.],\n# [3., 3., 3., 3.]]\nnew_shape = np.array(new_shape, dtype=np.int64)\nexpect(node, inputs=[data, new_shape], outputs=[expanded],\n name='test_expand_dim_unchanged')", + "summary": "dim_unchanged" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + }, + { + "description": "A 1-D tensor indicates the shape you want to expand to, following the broadcast rule", + "name": "shape", + "type": "tensor(int64)" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "output", + "type": "T" + } + ], + "since_version": 8, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "EyeLike", + "schema": { + "attributes": [ + { + "description": "(Optional) The data type for the elements of the output tensor. If not specified,the data type of the input tensor T1 is used. If input tensor T1 is also notspecified, then type defaults to 'float'.", + "name": "dtype", + "required": false, + "type": "int64" + }, + { + "description": "(Optional) Index of the diagonal to be populated with ones. Default is 0. If T2 is the output, this op sets T2[i, i+k] = 1. k = 0 populates the main diagonal, k > 0 populates an upper diagonal, and k < 0 populates a lower diagonal.", + "name": "k", + "required": false, + "type": "int64" + } + ], + "description": "Generate a 2D tensor (matrix) with ones on the diagonal and zeros everywhere else. Only 2D\ntensors are supported, i.e. input T1 must be of rank 2. The shape of the output tensor is the\nsame as the input tensor. The data type can be specified by the 'dtype' argument. If\n'dtype' is not specified, then the type of input tensor is used. By default, the main diagonal\nis populated with ones, but attribute 'k' can be used to populate upper or lower diagonals.\nThe 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the\nTensorProto message and be valid as an output type.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = (4, 5)\noff_diagonal_offset = 1\nnode = onnx.helper.make_node(\n 'EyeLike',\n inputs=['x'],\n outputs=['y'],\n k=off_diagonal_offset,\n dtype=onnx.TensorProto.FLOAT,\n)\n\nx = np.random.randint(0, 100, size=shape, dtype=np.int32)\ny = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)\nexpect(node, inputs=[x], outputs=[y], name='test_eyelike_populate_off_main_diagonal')", + "summary": "populate_off_main_diagonal" + }, + { + "code": "shape = (3, 4)\nnode = onnx.helper.make_node(\n 'EyeLike',\n inputs=['x'],\n outputs=['y'],\n dtype=onnx.TensorProto.DOUBLE,\n)\n\nx = np.random.randint(0, 100, size=shape, dtype=np.int32)\ny = np.eye(shape[0], shape[1], dtype=np.float64)\nexpect(node, inputs=[x], outputs=[y], name='test_eyelike_with_dtype')", + "summary": "with_dtype" + }, + { + "code": "shape = (4, 4)\nnode = onnx.helper.make_node(\n 'EyeLike',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.random.randint(0, 100, size=shape, dtype=np.int32)\ny = np.eye(shape[0], shape[1], dtype=np.int32)\nexpect(node, inputs=[x], outputs=[y], name='test_eyelike_without_dtype')", + "summary": "without_dtype" + } + ], + "inputs": [ + { + "description": "2D input tensor to copy shape, and optionally, type information from.", + "name": "input", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor, same shape as input tensor T1.", + "name": "output", + "type": "T2" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ], + "description": "Constrain input types. Strings and complex are not supported.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(bool)" + ], + "description": "Constrain output types. Strings and complex are not supported.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "FeatureVectorizer", + "schema": { + "attributes": [ + { + "description": "The size of each input in the input list", + "name": "inputdimensions", + "required": false, + "type": "int64[]" + } + ], + "description": "Concatenates input tensors into one continuous output.
    \n All input shapes are 2-D and are concatenated along the second dimention. 1-D tensors are treated as [1,C].\n Inputs are copied to the output maintaining the order of the input arguments.
    \n All inputs must be integers or floats, while the output will be all floating point values.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "An ordered collection of tensors, all with the same element type.", + "name": "X", + "option": "variadic", + "type": "T1" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output array, elements ordered as the inputs.", + "name": "Y", + "type": "tensor(float)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)", + "tensor(float)", + "tensor(double)" + ], + "description": "The input type must be a tensor of a numeric type.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Flatten", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [0, R], where R is the rank of the input tensor. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input tensor is (d_0, d_1, ... d_n). ", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "category": "Shape", + "description": "Flattens the input tensor into a 2D matrix. If input tensor has shape\n(d_0, d_1, ... d_n) then the output will have shape\n(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(len(shape)):\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=['a'],\n outputs=['b'],\n axis=i,\n )\n\n new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b],\n name='test_flatten_axis' + str(i))", + "summary": "flatten" + }, + { + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(-len(shape), 0):\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=['a'],\n outputs=['b'],\n axis=i,\n )\n\n new_shape = (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b],\n name='test_flatten_negative_axis' + str(abs(i)))", + "summary": "flatten_negative_axis" + }, + { + "code": "node = onnx.helper.make_node(\n 'Flatten',\n inputs=['a'],\n outputs=['b'], # Default value for axis: axis=1\n)\n\nshape = (5, 4, 3, 2)\na = np.random.random_sample(shape).astype(np.float32)\nnew_shape = (5, 24)\nb = np.reshape(a, new_shape)\nexpect(node, inputs=[a], outputs=[b],\n name='test_flatten_default_axis')", + "summary": "flatten_with_default_axis" + } + ], + "inputs": [ + { + "description": "A tensor of rank >= axis.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Flatten", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [0, R], where R is the rank of the input tensor. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input tensor is (d_0, d_1, ... d_n). ", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "category": "Shape", + "description": "Flattens the input tensor into a 2D matrix. If input tensor has shape\n(d_0, d_1, ... d_n) then the output will have shape\n(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(len(shape)):\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=['a'],\n outputs=['b'],\n axis=i,\n )\n\n new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b],\n name='test_flatten_axis' + str(i))", + "summary": "flatten" + }, + { + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(-len(shape), 0):\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=['a'],\n outputs=['b'],\n axis=i,\n )\n\n new_shape = (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b],\n name='test_flatten_negative_axis' + str(abs(i)))", + "summary": "flatten_negative_axis" + }, + { + "code": "node = onnx.helper.make_node(\n 'Flatten',\n inputs=['a'],\n outputs=['b'], # Default value for axis: axis=1\n)\n\nshape = (5, 4, 3, 2)\na = np.random.random_sample(shape).astype(np.float32)\nnew_shape = (5, 24)\nb = np.reshape(a, new_shape)\nexpect(node, inputs=[a], outputs=[b],\n name='test_flatten_default_axis')", + "summary": "flatten_with_default_axis" + } + ], + "inputs": [ + { + "description": "A tensor of rank >= axis.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Flatten", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input tensor is (d_0, d_1, ... d_n). ", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "category": "Shape", + "description": "Flattens the input tensor into a 2D matrix. If input tensor has shape\n(d_0, d_1, ... d_n) then the output will have shape\n(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(len(shape)):\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=['a'],\n outputs=['b'],\n axis=i,\n )\n\n new_shape = (1, -1) if i == 0 else (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b],\n name='test_flatten_axis' + str(i))", + "summary": "flatten" + }, + { + "code": "shape = (2, 3, 4, 5)\na = np.random.random_sample(shape).astype(np.float32)\n\nfor i in range(-len(shape), 0):\n node = onnx.helper.make_node(\n 'Flatten',\n inputs=['a'],\n outputs=['b'],\n axis=i,\n )\n\n new_shape = (np.prod(shape[0:i]).astype(int), -1)\n b = np.reshape(a, new_shape)\n expect(node, inputs=[a], outputs=[b],\n name='test_flatten_negative_axis' + str(abs(i)))", + "summary": "flatten_negative_axis" + }, + { + "code": "node = onnx.helper.make_node(\n 'Flatten',\n inputs=['a'],\n outputs=['b'], # Default value for axis: axis=1\n)\n\nshape = (5, 4, 3, 2)\na = np.random.random_sample(shape).astype(np.float32)\nnew_shape = (5, 24)\nb = np.reshape(a, new_shape)\nexpect(node, inputs=[a], outputs=[b],\n name='test_flatten_default_axis')", + "summary": "flatten_with_default_axis" + } + ], + "inputs": [ + { + "description": "A tensor of rank >= axis.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "A 2D tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Floor", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Floor takes one input data (Tensor) and produces one output data\n(Tensor) where the floor is, y = floor(x), is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Floor',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1.5, 1.2, 2]).astype(np.float32)\ny = np.floor(x) # expected output [-2., 1., 2.]\nexpect(node, inputs=[x], outputs=[y],\n name='test_floor_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.floor(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_floor')", + "summary": "floor" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Floor", + "schema": { + "description": "Floor takes one input data (Tensor) and produces one output data\n(Tensor) where the floor is, y = floor(x), is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Floor',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1.5, 1.2, 2]).astype(np.float32)\ny = np.floor(x) # expected output [-2., 1., 2.]\nexpect(node, inputs=[x], outputs=[y],\n name='test_floor_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.floor(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_floor')", + "summary": "floor" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "GRU", + "schema": { + "attributes": [ + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM.", + "name": "activation_alpha", + "required": false, + "type": "float32[]" + }, + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM.", + "name": "activation_beta", + "required": false, + "type": "float32[]" + }, + { + "description": "A list of 2 (or 4 if bidirectional) activation functions for update, reset, and hidden gates. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified.", + "name": "activations", + "required": false, + "type": "string[]" + }, + { + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified.", + "name": "clip", + "required": false, + "type": "float32" + }, + { + "default": "foward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional.", + "name": "direction", + "required": false, + "type": "string" + }, + { + "description": "Number of neurons in the hidden layer", + "name": "hidden_size", + "required": false, + "type": "int64" + }, + { + "description": "The sequence output for the hidden is optional if 0. Default 0.", + "name": "output_sequence", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "Computes an one-layer GRU. This operator is usually supported via some custom\nimplementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`z` - update gate\n\n`r` - reset gate\n\n`h` - hidden gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[zrh]` - W parameter weight matrix for update, reset, and hidden gates\n\n`R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates\n\n`Wb[zrh]` - W bias vectors for update, reset, and hidden gates\n\n`Rb[zrh]` - R bias vectors for update, reset, and hidden gates\n\n`WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates\n\n`RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates\n\n`WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates\n\n`RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh):\n\n - zt = f(Xt*(Wz^T) + Ht-1*Rz + Wbz + Rbz)\n\n - rt = f(Xt*(Wr^T) + Ht-1*Rr + Wbr + Rbr)\n\n - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*Rh + Rbh + Wbh) # default, when linear_before_reset = 0\n\n - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*Rh + Rbh) + Wbh) # when linear_before_reset != 0\n\n - Ht = (1 - zt) (.) ht + zt (.) Ht-1\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([[[1., 2.], [3., 4.], [5., 6.]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 5\nweight_scale = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n 'GRU',\n inputs=['X', 'W', 'R'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\ngru = GRU_Helper(X=input, W=W, R=R)\n_, Y_h = gru.step()\nexpect(node, inputs=[input, W, R], outputs=[Y_h.astype(np.float32)], name='test_gru_defaults')", + "summary": "defaults" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 3\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n 'GRU',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(np.float32)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRU_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_gru_with_initial_bias')", + "summary": "initial_bias" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],\n [[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n 'GRU',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = np.random.randn(1, number_of_gates * hidden_size, input_size).astype(np.float32)\nR = np.random.randn(1, number_of_gates * hidden_size, hidden_size).astype(np.float32)\n\n# Adding custom bias\nW_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nR_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRU_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_gru_seq_length')", + "summary": "seq_length" + } + ], + "inputs": [ + { + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`.", + "name": "X", + "type": "T" + }, + { + "description": "The weight tensor for the gates. Concatenation of `W[zrh]` and `WB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, input_size]`.", + "name": "W", + "type": "T" + }, + { + "description": "The recurrence weight tensor. Concatenation of `R[zrh]` and `RB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, hidden_size]`.", + "name": "R", + "type": "T" + }, + { + "description": "The bias tensor for the gates. Concatenation of `[Wb[zrh], Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 6*hidden_size]`. Optional: If not specified - assumed to be 0", + "name": "B", + "option": "optional", + "type": "T" + }, + { + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`.", + "name": "sequence_lens", + "option": "optional", + "type": "T1" + }, + { + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "initial_h", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "3 - 6", + "max_input": 6, + "max_output": 2, + "min_input": 3, + "min_output": 2, + "outputs": [ + { + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. It is optional if `output_sequence` is 0.", + "name": "Y", + "option": "optional", + "type": "T" + }, + { + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "Y_h", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)" + ], + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "GRU", + "schema": { + "attributes": [ + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01.", + "name": "activation_alpha", + "required": false, + "type": "float32[]" + }, + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.", + "name": "activation_beta", + "required": false, + "type": "float32[]" + }, + { + "description": "A list of 2 (or 4 if bidirectional) activation functions for update, reset, and hidden gates. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified.", + "name": "activations", + "required": false, + "type": "string[]" + }, + { + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified.", + "name": "clip", + "required": false, + "type": "float32" + }, + { + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional.", + "name": "direction", + "required": false, + "type": "string" + }, + { + "description": "Number of neurons in the hidden layer", + "name": "hidden_size", + "required": false, + "type": "int64" + }, + { + "description": "When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate.", + "name": "linear_before_reset", + "required": false, + "type": "int64" + }, + { + "description": "The sequence output for the hidden is optional if 0. Default 0.", + "name": "output_sequence", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "Computes an one-layer GRU. This operator is usually supported via some custom\nimplementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`z` - update gate\n\n`r` - reset gate\n\n`h` - hidden gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[zrh]` - W parameter weight matrix for update, reset, and hidden gates\n\n`R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates\n\n`Wb[zrh]` - W bias vectors for update, reset, and hidden gates\n\n`Rb[zrh]` - R bias vectors for update, reset, and hidden gates\n\n`WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates\n\n`RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates\n\n`WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates\n\n`RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh):\n\n - zt = f(Xt*(Wz^T) + Ht-1*Rz + Wbz + Rbz)\n\n - rt = f(Xt*(Wr^T) + Ht-1*Rr + Wbr + Rbr)\n\n - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*Rh + Rbh + Wbh) # default, when linear_before_reset = 0\n\n - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*Rh + Rbh) + Wbh) # when linear_before_reset != 0\n\n - Ht = (1 - zt) (.) ht + zt (.) Ht-1\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([[[1., 2.], [3., 4.], [5., 6.]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 5\nweight_scale = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n 'GRU',\n inputs=['X', 'W', 'R'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\ngru = GRU_Helper(X=input, W=W, R=R)\n_, Y_h = gru.step()\nexpect(node, inputs=[input, W, R], outputs=[Y_h.astype(np.float32)], name='test_gru_defaults')", + "summary": "defaults" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 3\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n 'GRU',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(np.float32)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRU_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_gru_with_initial_bias')", + "summary": "initial_bias" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],\n [[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n 'GRU',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = np.random.randn(1, number_of_gates * hidden_size, input_size).astype(np.float32)\nR = np.random.randn(1, number_of_gates * hidden_size, hidden_size).astype(np.float32)\n\n# Adding custom bias\nW_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nR_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRU_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_gru_seq_length')", + "summary": "seq_length" + } + ], + "inputs": [ + { + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`.", + "name": "X", + "type": "T" + }, + { + "description": "The weight tensor for the gates. Concatenation of `W[zrh]` and `WB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, input_size]`.", + "name": "W", + "type": "T" + }, + { + "description": "The recurrence weight tensor. Concatenation of `R[zrh]` and `RB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, hidden_size]`.", + "name": "R", + "type": "T" + }, + { + "description": "The bias tensor for the gates. Concatenation of `[Wb[zrh], Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 6*hidden_size]`. Optional: If not specified - assumed to be 0", + "name": "B", + "option": "optional", + "type": "T" + }, + { + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`.", + "name": "sequence_lens", + "option": "optional", + "type": "T1" + }, + { + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "initial_h", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "3 - 6", + "max_input": 6, + "max_output": 2, + "min_input": 3, + "min_output": 0, + "outputs": [ + { + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. It is optional if `output_sequence` is 0.", + "name": "Y", + "option": "optional", + "type": "T" + }, + { + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "Y_h", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "0 - 2", + "since_version": 3, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)" + ], + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "GRU", + "schema": { + "attributes": [ + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01.", + "name": "activation_alpha", + "required": false, + "type": "float32[]" + }, + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.", + "name": "activation_beta", + "required": false, + "type": "float32[]" + }, + { + "description": "A list of 2 (or 4 if bidirectional) activation functions for update, reset, and hidden gates. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified.", + "name": "activations", + "required": false, + "type": "string[]" + }, + { + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified.", + "name": "clip", + "required": false, + "type": "float32" + }, + { + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional.", + "name": "direction", + "required": false, + "type": "string" + }, + { + "description": "Number of neurons in the hidden layer", + "name": "hidden_size", + "required": false, + "type": "int64" + }, + { + "description": "When computing the output of the hidden gate, apply the linear transformation before multiplying by the output of the reset gate.", + "name": "linear_before_reset", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "Computes an one-layer GRU. This operator is usually supported via some custom\nimplementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`z` - update gate\n\n`r` - reset gate\n\n`h` - hidden gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[zrh]` - W parameter weight matrix for update, reset, and hidden gates\n\n`R[zrh]` - R recurrence weight matrix for update, reset, and hidden gates\n\n`Wb[zrh]` - W bias vectors for update, reset, and hidden gates\n\n`Rb[zrh]` - R bias vectors for update, reset, and hidden gates\n\n`WB[zrh]` - W parameter weight matrix for backward update, reset, and hidden gates\n\n`RB[zrh]` - R recurrence weight matrix for backward update, reset, and hidden gates\n\n`WBb[zrh]` - W bias vectors for backward update, reset, and hidden gates\n\n`RBb[zrh]` - R bias vectors for backward update, reset, and hidden gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh):\n\n - zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)\n\n - rt = f(Xt*(Wr^T) + Ht-1*(Rr^T) + Wbr + Rbr)\n\n - ht = g(Xt*(Wh^T) + (rt (.) Ht-1)*(Rh^T) + Rbh + Wbh) # default, when linear_before_reset = 0\n\n - ht = g(Xt*(Wh^T) + (rt (.) (Ht-1*(Rh^T) + Rbh)) + Wbh) # when linear_before_reset != 0\n\n - Ht = (1 - zt) (.) ht + zt (.) Ht-1\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([[[1., 2.], [3., 4.], [5., 6.]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 5\nweight_scale = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n 'GRU',\n inputs=['X', 'W', 'R'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\ngru = GRU_Helper(X=input, W=W, R=R)\n_, Y_h = gru.step()\nexpect(node, inputs=[input, W, R], outputs=[Y_h.astype(np.float32)], name='test_gru_defaults')", + "summary": "defaults" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 3\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n 'GRU',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(np.float32)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRU_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_gru_with_initial_bias')", + "summary": "initial_bias" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],\n [[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\nnumber_of_gates = 3\n\nnode = onnx.helper.make_node(\n 'GRU',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = np.random.randn(1, number_of_gates * hidden_size, input_size).astype(np.float32)\nR = np.random.randn(1, number_of_gates * hidden_size, hidden_size).astype(np.float32)\n\n# Adding custom bias\nW_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nR_B = np.random.randn(1, number_of_gates * hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\ngru = GRU_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = gru.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_gru_seq_length')", + "summary": "seq_length" + } + ], + "inputs": [ + { + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`.", + "name": "X", + "type": "T" + }, + { + "description": "The weight tensor for the gates. Concatenation of `W[zrh]` and `WB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, input_size]`.", + "name": "W", + "type": "T" + }, + { + "description": "The recurrence weight tensor. Concatenation of `R[zrh]` and `RB[zrh]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 3*hidden_size, hidden_size]`.", + "name": "R", + "type": "T" + }, + { + "description": "The bias tensor for the gates. Concatenation of `[Wb[zrh], Rb[zrh]]` and `[WBb[zrh], RBb[zrh]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 6*hidden_size]`. Optional: If not specified - assumed to be 0", + "name": "B", + "option": "optional", + "type": "T" + }, + { + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`.", + "name": "sequence_lens", + "option": "optional", + "type": "T1" + }, + { + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "initial_h", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "3 - 6", + "max_input": 6, + "max_output": 2, + "min_input": 3, + "min_output": 0, + "outputs": [ + { + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. ", + "name": "Y", + "option": "optional", + "type": "T" + }, + { + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "Y_h", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "0 - 2", + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)" + ], + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Gather", + "schema": { + "attributes": [ + { + "description": "Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1]", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "category": "Transform", + "description": "Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather\nentries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates\nthem in an output tensor of rank q + (r - 1).\nExample 1:\n```\n data = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n indices = [\n [0, 1],\n [1, 2],\n ]\n output = [\n [\n [1.0, 1.2],\n [2.3, 3.4],\n ],\n [\n [2.3, 3.4],\n [4.5, 5.7],\n ],\n ]\n```\nExample 2:\n```\n data = [\n [1.0, 1.2, 1.9],\n [2.3, 3.4, 3.9],\n [4.5, 5.7, 5.9],\n ]\n indices = [\n [0, 2],\n ]\n axis = 1,\n output = [\n [\n [1.0, 1.9],\n [2.3, 3.9],\n [4.5, 5.9],\n ],\n ]\n```\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Gather',\n inputs=['data', 'indices'],\n outputs=['y'],\n axis=0,\n)\ndata = np.random.randn(5, 4, 3, 2).astype(np.float32)\nindices = np.array([0, 1, 3])\ny = np.take(data, indices, axis=0)\n\nexpect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],\n name='test_gather_0')", + "summary": "gather_0" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gather',\n inputs=['data', 'indices'],\n outputs=['y'],\n axis=1,\n)\ndata = np.random.randn(5, 4, 3, 2).astype(np.float32)\nindices = np.array([0, 1, 3])\ny = np.take(data, indices, axis=1)\n\nexpect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],\n name='test_gather_1')", + "summary": "gather_1" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gather',\n inputs=['data', 'indices'],\n outputs=['y'],\n axis=0,\n)\ndata = np.arange(10).astype(np.float32)\nindices = np.array([0, -9, -10])\ny = np.take(data, indices, axis=0)\n\nexpect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],\n name='test_gather_negative_indices')", + "summary": "gather_negative_indices" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "data", + "type": "T" + }, + { + "description": "Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds. It is an error if any of the index values are out of bounds.", + "name": "indices", + "type": "Tind" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank q + (r - 1).", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain indices to integer types", + "type_param_str": "Tind" + } + ] + } + }, + { + "name": "Gather", + "schema": { + "attributes": [ + { + "description": "Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "category": "Transform", + "description": "Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather\nentries of the axis dimension of `data` (by default outer-most one as axis=0) indexed by `indices`, and concatenates\nthem in an output tensor of rank q + (r - 1).\n\naxis = 0 :\n\nLet\nk = indices[i_{0}, ..., i_{q-1}]\nThen\noutput[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[k , j_{0}, ..., j_{r-2}]\n\n```\n data = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n indices = [\n [0, 1],\n [1, 2],\n ]\n output = [\n [\n [1.0, 1.2],\n [2.3, 3.4],\n ],\n [\n [2.3, 3.4],\n [4.5, 5.7],\n ],\n ]\n```\naxis = 1 :\n\nLet\nk = indices[i_{0}, ..., i_{q-1}]\nThen\noutput[i_{0}, ..., i_{q-1}, j_{0}, ..., j_{r-2}] = input[j_{0}, k, j_{1}, ..., j_{r-2}]\n\n```\n data = [\n [1.0, 1.2, 1.9],\n [2.3, 3.4, 3.9],\n [4.5, 5.7, 5.9],\n ]\n indices = [\n [0, 2],\n ]\n axis = 1,\n output = [\n [\n [1.0, 1.9],\n [2.3, 3.9],\n [4.5, 5.9],\n ],\n ]\n```\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Gather',\n inputs=['data', 'indices'],\n outputs=['y'],\n axis=0,\n)\ndata = np.random.randn(5, 4, 3, 2).astype(np.float32)\nindices = np.array([0, 1, 3])\ny = np.take(data, indices, axis=0)\n\nexpect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],\n name='test_gather_0')", + "summary": "gather_0" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gather',\n inputs=['data', 'indices'],\n outputs=['y'],\n axis=1,\n)\ndata = np.random.randn(5, 4, 3, 2).astype(np.float32)\nindices = np.array([0, 1, 3])\ny = np.take(data, indices, axis=1)\n\nexpect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],\n name='test_gather_1')", + "summary": "gather_1" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gather',\n inputs=['data', 'indices'],\n outputs=['y'],\n axis=0,\n)\ndata = np.arange(10).astype(np.float32)\nindices = np.array([0, -9, -10])\ny = np.take(data, indices, axis=0)\n\nexpect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],\n name='test_gather_negative_indices')", + "summary": "gather_negative_indices" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "data", + "type": "T" + }, + { + "description": "Tensor of int32/int64 indices, of any rank q. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.", + "name": "indices", + "type": "Tind" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank q + (r - 1).", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain indices to integer types", + "type_param_str": "Tind" + } + ] + } + }, + { + "name": "GatherElements", + "schema": { + "attributes": [ + { + "description": "Which axis to gather on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "description": "GatherElements takes two inputs `data` and `indices` of the same rank r >= 1\nand an optional attribute `axis` that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). It is an indexing operation\nthat produces its output by indexing into the input data tensor at index\npositions determined by elements of the `indices` tensor.\nIts output shape is the same as the shape of `indices` and consists of one value\n(gathered from the `data`) for each element in `indices`.\n\nFor instance, in the 3-D case (r = 3), the output produced is determined\nby the following equations: \n```\n out[i][j][k] = input[index[i][j][k]][j][k] if axis = 0,\n out[i][j][k] = input[i][index[i][j][k]][k] if axis = 1,\n out[i][j][k] = input[i][j][index[i][j][k]] if axis = 2,\n```\n\nThis operator is also the inverse of ScatterElements. It is similar to Torch's gather operation.\n\nExample 1:\n```\n data = [\n [1, 2],\n [3, 4],\n ]\n indices = [\n [0, 0],\n [1, 0],\n ]\n axis = 1\n output = [\n [\n [1, 1],\n [4, 3],\n ],\n ]\n```\nExample 2:\n```\n data = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n ]\n indices = [\n [1, 2, 0],\n [2, 0, 0],\n ]\n axis = 0\n output = [\n [\n [4, 8, 3],\n [7, 2, 3],\n ],\n ]\n```\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "axis = 1\nnode = onnx.helper.make_node(\n 'GatherElements',\n inputs=['data', 'indices'],\n outputs=['y'],\n axis=axis,\n)\ndata = np.array([[1, 2],\n [3, 4]], dtype=np.float32)\nindices = np.array([[0, 0],\n [1, 0]], dtype=np.int32)\n\ny = gather_elements(data, indices, axis)\n# print(y) produces\n# [[1, 1],\n# [4, 3]]\n\nexpect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],\n name='test_gather_elements_0')", + "summary": "gather_elements_0" + }, + { + "code": "axis = 0\nnode = onnx.helper.make_node(\n 'GatherElements',\n inputs=['data', 'indices'],\n outputs=['y'],\n axis=axis,\n)\ndata = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]], dtype=np.float32)\nindices = np.array([[1, 2, 0],\n [2, 0, 0]], dtype=np.int32)\n\ny = gather_elements(data, indices, axis)\n# print(y) produces\n# [[4, 8, 3],\n# [7, 2, 3]]\n\nexpect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],\n name='test_gather_elements_1')", + "summary": "gather_elements_1" + }, + { + "code": "axis = 0\nnode = onnx.helper.make_node(\n 'GatherElements',\n inputs=['data', 'indices'],\n outputs=['y'],\n axis=axis,\n)\ndata = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]], dtype=np.float32)\nindices = np.array([[-1, -2, 0],\n [-2, 0, 0]], dtype=np.int32)\n\ny = gather_elements(data, indices, axis)\n# print(y) produces\n# [[7, 5, 3],\n# [4, 2, 3]]\n\nexpect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],\n name='test_gather_elements_negative_indices')", + "summary": "gather_elements_negative_indices" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "data", + "type": "T" + }, + { + "description": "Tensor of int32/int64 indices, with the same rank r as the input. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.", + "name": "indices", + "type": "Tind" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of the same shape as indices.", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain indices to integer types", + "type_param_str": "Tind" + } + ] + } + }, + { + "name": "GatherND", + "schema": { + "description": "Given `data` tensor of rank `r` >= 1, and `indices` tensor of rank `q` >= 1, this operator gathers \nslices of `data` into an output tensor of rank `q + r - indices_shape[-1] - 1`.\n\n`indices` is an q-dimensional integer tensor, best thought of as a `(q-1)`-dimensional tensor of index-tuples into `data`, \nwhere each element defines a slice of `data`\n\nSome salient points about the inputs' rank and shape:\n \n1) r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks `r` and `q`\n\n2) The `indices_shape[-1]` should have a value between 1 (inclusive) and rank `r` (inclusive) \n\n3) All values in `indices` are expected to be within bounds [-s, s-1] along axis of size `s` (i.e.) `-data_shape[i] <= indices[...,i] <= data_shape[i] - 1`.\n It is an error if any of the index values are out of bounds.\n\nThe output is computed as follows:\n\nThe output tensor is obtained by mapping each index-tuple in the `indices` tensor to the corresponding slice of the input `data`.\n \n1) If `indices_shape[-1] > r` => error condition\n\n2) If `indices_shape[-1] == r`, since the rank of `indices` is `q`, `indices` can be thought of as a `(q-1)`-dimensional tensor\n containing 1-D tensors of dimension `r`. Let us think of each such `r` ranked tensor as `indices_slice`. \n Each *scalar value* corresponding to `data[indices_slice]` is filled into the corresponding location of the `(q-1)`-dimensional tensor \n to form the `output` tensor (Example 1 below)\n\n3) If `indices_shape[-1] < r`, since the rank of `indices` is `q`, `indices` can be thought of as a `(q-1)`-dimensional tensor\n containing 1-D tensors of dimension `< r`. Let us think of each such tensors as `indices_slice`. \n Each *tensor slice* corresponding to `data[indices_slice , :]` is filled into the corresponding location of the `(q-1)`-dimensional tensor \n to form the `output` tensor (Examples 2, 3, and 4 below)\n\nThis operator is the inverse of `ScatterND`.\n\n`Example 1`\n\n data = [[0,1],[2,3]] # data_shape = [2, 2]\n\n indices = [[0,0],[1,1]] # indices_shape = [2, 2]\n\n output = [0,3] # output_shape = [2]\n\n`Example 2`\n\n data = [[0,1],[2,3]] # data_shape = [2, 2]\n\n indices = [[1],[0]] # indices_shape = [2, 1]\n\n output = [[2,3],[0,1]] # output_shape = [2, 2]\n\n`Example 3`\n\n data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\n\n indices = [[0,1],[1,0]] # indices_shape = [2, 2]\n\n output = [[2,3],[4,5]] # output_shape = [2, 2] \n\n`Example 4`\n\n data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\n\n indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]\n\n output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2] \n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'GatherND',\n inputs=['data', 'indices'],\n outputs=['output'],\n)\n\ndata = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.float32)\nindices = np.array([[[0, 1]], [[1, 0]]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 0)\nexpected_output = np.array([[[2, 3]], [[4, 5]]], dtype=np.float32)\nassert (np.array_equal(output, expected_output))\nexpect(node, inputs=[data, indices], outputs=[output],\n name='test_gathernd_example_float32')", + "summary": "float32" + }, + { + "code": "node = onnx.helper.make_node(\n 'GatherND',\n inputs=['data', 'indices'],\n outputs=['output'],\n)\n\ndata = np.array([[0, 1], [2, 3]], dtype=np.int32)\nindices = np.array([[0, 0], [1, 1]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 0)\nexpected_output = np.array([0, 3], dtype=np.int32)\nassert (np.array_equal(output, expected_output))\nexpect(node, inputs=[data, indices], outputs=[output],\n name='test_gathernd_example_int32')", + "summary": "int32" + }, + { + "code": "node = onnx.helper.make_node(\n 'GatherND',\n inputs=['data', 'indices'],\n outputs=['output'],\n batch_dims=1,\n)\n\ndata = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.int32)\nindices = np.array([[1], [0]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 1)\nexpected_output = np.array([[2, 3], [4, 5]], dtype=np.int32)\nassert (np.array_equal(output, expected_output))\nexpect(node, inputs=[data, indices], outputs=[output],\n name='test_gathernd_example_int32_batch_dim1')", + "summary": "int32_batchdim_1" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "data", + "type": "T" + }, + { + "description": "Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.", + "name": "indices", + "type": "tensor(int64)" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank q + r - indices_shape[-1] - 1.", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "GatherND", + "schema": { + "attributes": [ + { + "description": "The number of batch dimensions. The gather of indexing starts from dimension of data[batch_dims:]", + "name": "batch_dims", + "required": false, + "type": "int64" + } + ], + "description": "Given `data` tensor of rank `r` >= 1, `indices` tensor of rank `q` >= 1, and `batch_dims` integer `b`, this operator gathers \nslices of `data` into an output tensor of rank `q + r - indices_shape[-1] - 1 - b`.\n\n`indices` is an q-dimensional integer tensor, best thought of as a `(q-1)`-dimensional tensor of index-tuples into `data`, \nwhere each element defines a slice of `data`\n\n`batch_dims` (denoted as `b`) is an integer indicating the number of batch dimensions, i.e the leading `b` number of dimensions of \n`data` tensor and `indices` are representing the batches, and the gather starts from the `b+1` dimension. \n\nSome salient points about the inputs' rank and shape:\n \n1) r >= 1 and q >= 1 are to be honored. There is no dependency condition to be met between ranks `r` and `q`\n\n2) The first `b` dimensions of the shape of `indices` tensor and `data` tensor must be equal.\n\n3) b < min(q, r) is to be honored.\n\n4) The `indices_shape[-1]` should have a value between 1 (inclusive) and rank `r-b` (inclusive) \n\n5) All values in `indices` are expected to be within bounds [-s, s-1] along axis of size `s` (i.e.) `-data_shape[i] <= indices[...,i] <= data_shape[i] - 1`.\n It is an error if any of the index values are out of bounds.\n\nThe output is computed as follows:\n\nThe output tensor is obtained by mapping each index-tuple in the `indices` tensor to the corresponding slice of the input `data`.\n \n1) If `indices_shape[-1] > r-b` => error condition\n\n2) If `indices_shape[-1] == r-b`, since the rank of `indices` is `q`, `indices` can be thought of as `N` `(q-b-1)`-dimensional tensors\n containing 1-D tensors of dimension `r-b`, where `N` is an integer equals to the product of 1 and all the elements in the batch dimensions \n of the indices_shape. Let us think of each such `r-b` ranked tensor as `indices_slice`. Each *scalar value* corresponding to `data[0:b-1,indices_slice]` \n is filled into the corresponding location of the `(q-b-1)`-dimensional tensor to form the `output` tensor (Example 1 below)\n\n3) If `indices_shape[-1] < r-b`, since the rank of `indices` is `q`, `indices` can be thought of as `N` `(q-b-1)`-dimensional tensor\n containing 1-D tensors of dimension `< r-b`. Let us think of each such tensors as `indices_slice`. Each *tensor slice* corresponding \n to `data[0:b-1, indices_slice , :]` is filled into the corresponding location of the `(q-b-1)`-dimensional tensor \n to form the `output` tensor (Examples 2, 3, 4 and 5 below)\n\nThis operator is the inverse of `ScatterND`.\n\n`Example 1`\n\n batch_dims = 0\n\n data = [[0,1],[2,3]] # data_shape = [2, 2]\n\n indices = [[0,0],[1,1]] # indices_shape = [2, 2]\n\n output = [0,3] # output_shape = [2]\n\n`Example 2`\n\n batch_dims = 0\n\n data = [[0,1],[2,3]] # data_shape = [2, 2]\n\n indices = [[1],[0]] # indices_shape = [2, 1]\n\n output = [[2,3],[0,1]] # output_shape = [2, 2]\n\n`Example 3`\n\n batch_dims = 0\n\n data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\n\n indices = [[0,1],[1,0]] # indices_shape = [2, 2]\n\n output = [[2,3],[4,5]] # output_shape = [2, 2] \n\n`Example 4`\n\n batch_dims = 0\n\n data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\n\n indices = [[[0,1]],[[1,0]]] # indices_shape = [2, 1, 2]\n\n output = [[[2,3]],[[4,5]]] # output_shape = [2, 1, 2] \n\n`Example 5`\n\n batch_dims = 1\n\n data = [[[0,1],[2,3]],[[4,5],[6,7]]] # data_shape = [2, 2, 2]\n\n indices = [[1],[0]] # indices_shape = [2, 1]\n\n output = [[2,3],[4,5]] # output_shape = [2, 2] \n\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'GatherND',\n inputs=['data', 'indices'],\n outputs=['output'],\n)\n\ndata = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.float32)\nindices = np.array([[[0, 1]], [[1, 0]]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 0)\nexpected_output = np.array([[[2, 3]], [[4, 5]]], dtype=np.float32)\nassert (np.array_equal(output, expected_output))\nexpect(node, inputs=[data, indices], outputs=[output],\n name='test_gathernd_example_float32')", + "summary": "float32" + }, + { + "code": "node = onnx.helper.make_node(\n 'GatherND',\n inputs=['data', 'indices'],\n outputs=['output'],\n)\n\ndata = np.array([[0, 1], [2, 3]], dtype=np.int32)\nindices = np.array([[0, 0], [1, 1]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 0)\nexpected_output = np.array([0, 3], dtype=np.int32)\nassert (np.array_equal(output, expected_output))\nexpect(node, inputs=[data, indices], outputs=[output],\n name='test_gathernd_example_int32')", + "summary": "int32" + }, + { + "code": "node = onnx.helper.make_node(\n 'GatherND',\n inputs=['data', 'indices'],\n outputs=['output'],\n batch_dims=1,\n)\n\ndata = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.int32)\nindices = np.array([[1], [0]], dtype=np.int64)\noutput = gather_nd_impl(data, indices, 1)\nexpected_output = np.array([[2, 3], [4, 5]], dtype=np.int32)\nassert (np.array_equal(output, expected_output))\nexpect(node, inputs=[data, indices], outputs=[output],\n name='test_gathernd_example_int32_batch_dim1')", + "summary": "int32_batchdim_1" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "data", + "type": "T" + }, + { + "description": "Tensor of rank q >= 1. All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.", + "name": "indices", + "type": "tensor(int64)" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank q + r - indices_shape[-1] - 1.", + "name": "output", + "type": "T" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Gemm", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Scalar multiplier for the product of input tensors A * B, the default value is 1.0.", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "default": 1.0, + "description": "Scalar multiplier for input tensor C, the default value is 1.0.", + "name": "beta", + "required": false, + "type": "float32" + }, + { + "description": "Whether C should be broadcasted", + "name": "broadcast", + "required": false, + "type": "int64" + }, + { + "description": "Whether A should be transposed", + "name": "transA", + "required": false, + "type": "int64" + }, + { + "description": "Whether B should be transposed", + "name": "transB", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\nCompute Y = alpha * A * B + beta * C, where input tensor A has\ndimension (M X K), input tensor B has dimension (K X N), input tensor C and\noutput tensor Y have dimension (M X N).\nIf attribute broadcast is non-zero, input tensor C will be broadcasted to match\nthe dimension requirement. A will be transposed before doing the computation\nif attribute transA is non-zero, same for B and transB.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n alpha=0.25,\n beta=0.35,\n transA=1,\n transB=1\n)\na = np.random.ranf([4, 3]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.random.ranf([1, 5]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_all_attributes')", + "summary": "all_attributes" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n alpha=0.5\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, alpha=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_alpha')", + "summary": "alpha" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n beta=0.5\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, beta=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_beta')", + "summary": "beta" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.random.ranf([3, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_matrix_bias')", + "summary": "default_matrix_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b'],\n outputs=['y']\n)\na = np.random.ranf([2, 10]).astype(np.float32)\nb = np.random.ranf([10, 3]).astype(np.float32)\ny = gemm_reference_implementation(a, b)\nexpect(node, inputs=[a, b], outputs=[y],\n name='test_gemm_default_no_bias')", + "summary": "default_no_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([2, 3]).astype(np.float32)\nb = np.random.ranf([3, 4]).astype(np.float32)\nc = np.array(3.14).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_scalar_bias')", + "summary": "default_scalar_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 7]).astype(np.float32)\nb = np.random.ranf([7, 3]).astype(np.float32)\nc = np.random.ranf([1]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_single_elem_vector_bias')", + "summary": "default_single_elem_vector_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_vector_bias')", + "summary": "default_vector_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_zero_bias')", + "summary": "default_zero_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n transA=1\n)\na = np.random.ranf([6, 3]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_transposeA')", + "summary": "transposeA" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n transB=1\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([4, 6]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transB=1)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_transposeB')", + "summary": "transposeB" + } + ], + "inputs": [ + { + "description": "Input tensor A", + "name": "A", + "type": "T" + }, + { + "description": "Input tensor B", + "name": "B", + "type": "T" + }, + { + "description": "Input tensor C, can be inplace.", + "name": "C", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor.", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Gemm", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Scalar multiplier for the product of input tensors A * B, the default value is 1.0.", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "default": 1.0, + "description": "Scalar multiplier for input tensor C, the default value is 1.0.", + "name": "beta", + "required": false, + "type": "float32" + }, + { + "description": "Whether C should be broadcasted", + "name": "broadcast", + "required": false, + "type": "int64" + }, + { + "description": "Whether A should be transposed", + "name": "transA", + "required": false, + "type": "int64" + }, + { + "description": "Whether B should be transposed", + "name": "transB", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\nCompute Y = alpha * A * B + beta * C, where input tensor A has\ndimension (M X K), input tensor B has dimension (K X N), input tensor C and\noutput tensor Y have dimension (M X N).\nIf attribute broadcast is non-zero, input tensor C will be broadcasted to match\nthe dimension requirement. A will be transposed before doing the computation\nif attribute transA is non-zero, same for B and transB.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n alpha=0.25,\n beta=0.35,\n transA=1,\n transB=1\n)\na = np.random.ranf([4, 3]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.random.ranf([1, 5]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_all_attributes')", + "summary": "all_attributes" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n alpha=0.5\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, alpha=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_alpha')", + "summary": "alpha" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n beta=0.5\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, beta=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_beta')", + "summary": "beta" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.random.ranf([3, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_matrix_bias')", + "summary": "default_matrix_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b'],\n outputs=['y']\n)\na = np.random.ranf([2, 10]).astype(np.float32)\nb = np.random.ranf([10, 3]).astype(np.float32)\ny = gemm_reference_implementation(a, b)\nexpect(node, inputs=[a, b], outputs=[y],\n name='test_gemm_default_no_bias')", + "summary": "default_no_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([2, 3]).astype(np.float32)\nb = np.random.ranf([3, 4]).astype(np.float32)\nc = np.array(3.14).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_scalar_bias')", + "summary": "default_scalar_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 7]).astype(np.float32)\nb = np.random.ranf([7, 3]).astype(np.float32)\nc = np.random.ranf([1]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_single_elem_vector_bias')", + "summary": "default_single_elem_vector_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_vector_bias')", + "summary": "default_vector_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_zero_bias')", + "summary": "default_zero_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n transA=1\n)\na = np.random.ranf([6, 3]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_transposeA')", + "summary": "transposeA" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n transB=1\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([4, 6]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transB=1)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_transposeB')", + "summary": "transposeB" + } + ], + "inputs": [ + { + "description": "Input tensor A", + "name": "A", + "type": "T" + }, + { + "description": "Input tensor B", + "name": "B", + "type": "T" + }, + { + "description": "Input tensor C", + "name": "C", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor.", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Gemm", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Scalar multiplier for the product of input tensors A * B.", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "default": 1.0, + "description": "Scalar multiplier for input tensor C.", + "name": "beta", + "required": false, + "type": "float32" + }, + { + "description": "Whether A should be transposed", + "name": "transA", + "required": false, + "type": "int64" + }, + { + "description": "Whether B should be transposed", + "name": "transB", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\n\nA' = transpose(A) if transA else A\n\nB' = transpose(B) if transB else B\n\nCompute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),\ninput tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N),\nand output tensor Y has shape (M, N). A will be transposed before doing the\ncomputation if attribute transA is non-zero, same for B and transB.\nThis operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n alpha=0.25,\n beta=0.35,\n transA=1,\n transB=1\n)\na = np.random.ranf([4, 3]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.random.ranf([1, 5]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_all_attributes')", + "summary": "all_attributes" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n alpha=0.5\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, alpha=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_alpha')", + "summary": "alpha" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n beta=0.5\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, beta=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_beta')", + "summary": "beta" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.random.ranf([3, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_matrix_bias')", + "summary": "default_matrix_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b'],\n outputs=['y']\n)\na = np.random.ranf([2, 10]).astype(np.float32)\nb = np.random.ranf([10, 3]).astype(np.float32)\ny = gemm_reference_implementation(a, b)\nexpect(node, inputs=[a, b], outputs=[y],\n name='test_gemm_default_no_bias')", + "summary": "default_no_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([2, 3]).astype(np.float32)\nb = np.random.ranf([3, 4]).astype(np.float32)\nc = np.array(3.14).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_scalar_bias')", + "summary": "default_scalar_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 7]).astype(np.float32)\nb = np.random.ranf([7, 3]).astype(np.float32)\nc = np.random.ranf([1]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_single_elem_vector_bias')", + "summary": "default_single_elem_vector_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_vector_bias')", + "summary": "default_vector_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_zero_bias')", + "summary": "default_zero_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n transA=1\n)\na = np.random.ranf([6, 3]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_transposeA')", + "summary": "transposeA" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n transB=1\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([4, 6]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transB=1)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_transposeB')", + "summary": "transposeB" + } + ], + "inputs": [ + { + "description": "Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.", + "name": "A", + "type": "T" + }, + { + "description": "Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.", + "name": "B", + "type": "T" + }, + { + "description": "Input tensor C. The shape of C should be unidirectional broadcastable to (M, N).", + "name": "C", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of shape (M, N).", + "name": "Y", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Gemm", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Scalar multiplier for the product of input tensors A * B.", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "default": 1.0, + "description": "Scalar multiplier for input tensor C.", + "name": "beta", + "required": false, + "type": "float32" + }, + { + "description": "Whether A should be transposed", + "name": "transA", + "required": false, + "type": "int64" + }, + { + "description": "Whether B should be transposed", + "name": "transB", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\n\nA' = transpose(A) if transA else A\n\nB' = transpose(B) if transB else B\n\nCompute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),\ninput tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N),\nand output tensor Y has shape (M, N). A will be transposed before doing the\ncomputation if attribute transA is non-zero, same for B and transB.\nThis operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n alpha=0.25,\n beta=0.35,\n transA=1,\n transB=1\n)\na = np.random.ranf([4, 3]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.random.ranf([1, 5]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_all_attributes')", + "summary": "all_attributes" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n alpha=0.5\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, alpha=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_alpha')", + "summary": "alpha" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n beta=0.5\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, beta=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_beta')", + "summary": "beta" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.random.ranf([3, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_matrix_bias')", + "summary": "default_matrix_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b'],\n outputs=['y']\n)\na = np.random.ranf([2, 10]).astype(np.float32)\nb = np.random.ranf([10, 3]).astype(np.float32)\ny = gemm_reference_implementation(a, b)\nexpect(node, inputs=[a, b], outputs=[y],\n name='test_gemm_default_no_bias')", + "summary": "default_no_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([2, 3]).astype(np.float32)\nb = np.random.ranf([3, 4]).astype(np.float32)\nc = np.array(3.14).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_scalar_bias')", + "summary": "default_scalar_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 7]).astype(np.float32)\nb = np.random.ranf([7, 3]).astype(np.float32)\nc = np.random.ranf([1]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_single_elem_vector_bias')", + "summary": "default_single_elem_vector_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_vector_bias')", + "summary": "default_vector_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_zero_bias')", + "summary": "default_zero_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n transA=1\n)\na = np.random.ranf([6, 3]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_transposeA')", + "summary": "transposeA" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n transB=1\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([4, 6]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transB=1)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_transposeB')", + "summary": "transposeB" + } + ], + "inputs": [ + { + "description": "Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.", + "name": "A", + "type": "T" + }, + { + "description": "Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.", + "name": "B", + "type": "T" + }, + { + "description": "Input tensor C. The shape of C should be unidirectional broadcastable to (M, N).", + "name": "C", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of shape (M, N).", + "name": "Y", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain input and output types to float/int tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Gemm", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Scalar multiplier for the product of input tensors A * B.", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "default": 1.0, + "description": "Scalar multiplier for input tensor C.", + "name": "beta", + "required": false, + "type": "float32" + }, + { + "description": "Whether A should be transposed", + "name": "transA", + "required": false, + "type": "int64" + }, + { + "description": "Whether B should be transposed", + "name": "transB", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "General Matrix multiplication:\nhttps://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms#Level_3\n\nA' = transpose(A) if transA else A\n\nB' = transpose(B) if transB else B\n\nCompute Y = alpha * A' * B' + beta * C, where input tensor A has shape (M, K) or (K, M),\ninput tensor B has shape (K, N) or (N, K), input tensor C is broadcastable to shape (M, N),\nand output tensor Y has shape (M, N). A will be transposed before doing the\ncomputation if attribute transA is non-zero, same for B and transB.\nThis operator supports **unidirectional broadcasting** (tensor C should be unidirectional broadcastable to tensor A * B); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n alpha=0.25,\n beta=0.35,\n transA=1,\n transB=1\n)\na = np.random.ranf([4, 3]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.random.ranf([1, 5]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_all_attributes')", + "summary": "all_attributes" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n alpha=0.5\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, alpha=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_alpha')", + "summary": "alpha" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n beta=0.5\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, beta=0.5)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_beta')", + "summary": "beta" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.random.ranf([3, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_matrix_bias')", + "summary": "default_matrix_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b'],\n outputs=['y']\n)\na = np.random.ranf([2, 10]).astype(np.float32)\nb = np.random.ranf([10, 3]).astype(np.float32)\ny = gemm_reference_implementation(a, b)\nexpect(node, inputs=[a, b], outputs=[y],\n name='test_gemm_default_no_bias')", + "summary": "default_no_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([2, 3]).astype(np.float32)\nb = np.random.ranf([3, 4]).astype(np.float32)\nc = np.array(3.14).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_scalar_bias')", + "summary": "default_scalar_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 7]).astype(np.float32)\nb = np.random.ranf([7, 3]).astype(np.float32)\nc = np.random.ranf([1]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_single_elem_vector_bias')", + "summary": "default_single_elem_vector_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([2, 7]).astype(np.float32)\nb = np.random.ranf([7, 4]).astype(np.float32)\nc = np.random.ranf([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_vector_bias')", + "summary": "default_vector_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y']\n)\na = np.random.ranf([3, 5]).astype(np.float32)\nb = np.random.ranf([5, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_default_zero_bias')", + "summary": "default_zero_bias" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n transA=1\n)\na = np.random.ranf([6, 3]).astype(np.float32)\nb = np.random.ranf([6, 4]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transA=1)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_transposeA')", + "summary": "transposeA" + }, + { + "code": "node = onnx.helper.make_node(\n 'Gemm',\n inputs=['a', 'b', 'c'],\n outputs=['y'],\n transB=1\n)\na = np.random.ranf([3, 6]).astype(np.float32)\nb = np.random.ranf([4, 6]).astype(np.float32)\nc = np.zeros([1, 4]).astype(np.float32)\ny = gemm_reference_implementation(a, b, c, transB=1)\nexpect(node, inputs=[a, b, c], outputs=[y],\n name='test_gemm_transposeB')", + "summary": "transposeB" + } + ], + "inputs": [ + { + "description": "Input tensor A. The shape of A should be (M, K) if transA is 0, or (K, M) if transA is non-zero.", + "name": "A", + "type": "T" + }, + { + "description": "Input tensor B. The shape of B should be (K, N) if transB is 0, or (N, K) if transB is non-zero.", + "name": "B", + "type": "T" + }, + { + "description": "Optional input tensor C. If not specified, the computation is done as if C is a scalar 0. The shape of C should be unidirectional broadcastable to (M, N).", + "name": "C", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of shape (M, N).", + "name": "Y", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain input and output types to float/int tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "GlobalAveragePool", + "schema": { + "category": "Pool", + "description": "GlobalAveragePool consumes an input tensor X and applies average pooling across\n the values in the same channel. This is equivalent to AveragePool with kernel size\n equal to the spatial dimension of input tensor.", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'GlobalAveragePool',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(1, 3, 5, 5).astype(np.float32)\nspatial_shape = np.ndim(x) - 2\ny = np.average(x, axis=tuple(range(spatial_shape, spatial_shape + 2)))\nfor _ in range(spatial_shape):\n y = np.expand_dims(y, -1)\nexpect(node, inputs=[x], outputs=[y], name='test_globalaveragepool')", + "summary": "globalaveragepool" + }, + { + "code": "\nnode = onnx.helper.make_node(\n 'GlobalAveragePool',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.array([[[\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n]]]).astype(np.float32)\ny = np.array([[[[5]]]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name='test_globalaveragepool_precomputed')", + "summary": "globalaveragepool_precomputed" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "GlobalLpPool", + "schema": { + "attributes": [ + { + "default": 2.0, + "description": "p value of the Lp norm used to pool over the input data, default is 2.0.", + "name": "p", + "required": false, + "type": "float32" + } + ], + "category": "Pool", + "description": "GlobalLpPool consumes an input tensor X and applies lp pool pooling across the\n the values in the same channel. This is equivalent to LpPool with kernel size\n equal to the spatial dimension of input tensor.", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimension are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from pooling across the input tensor. Dimensions will be N x C x 1 x 1", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "GlobalLpPool", + "schema": { + "attributes": [ + { + "default": 2, + "description": "p value of the Lp norm used to pool over the input data.", + "name": "p", + "required": false, + "type": "int64" + } + ], + "category": "Pool", + "description": "GlobalLpPool consumes an input tensor X and applies lp pool pooling across\n the values in the same channel. This is equivalent to LpPool with kernel size\n equal to the spatial dimension of input tensor.", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.", + "name": "Y", + "type": "T" + } + ], + "since_version": 2, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "GlobalMaxPool", + "schema": { + "category": "Pool", + "description": "GlobalMaxPool consumes an input tensor X and applies max pooling across\n the values in the same channel. This is equivalent to MaxPool with kernel size\n equal to the spatial dimension of input tensor.", + "domain": "ai.onnx", + "examples": [ + { + "code": "\nnode = onnx.helper.make_node(\n 'GlobalMaxPool',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(1, 3, 5, 5).astype(np.float32)\nspatial_shape = np.ndim(x) - 2\ny = np.max(x, axis=tuple(range(spatial_shape, spatial_shape + 2)))\nfor _ in range(spatial_shape):\n y = np.expand_dims(y, -1)\nexpect(node, inputs=[x], outputs=[y], name='test_globalmaxpool')", + "summary": "globalmaxpool" + }, + { + "code": "\nnode = onnx.helper.make_node(\n 'GlobalMaxPool',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.array([[[\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n]]]).astype(np.float32)\ny = np.array([[[[9]]]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y], name='test_globalmaxpool_precomputed')", + "summary": "globalmaxpool_precomputed" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1.", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Gradient", + "schema": { + "attributes": [ + { + "description": "Input tensor names of the differentiated sub-graph. It contains only the necessary differentiated inputs of a (sub-)graph. Variables (usually called intermediate variables) that can be generated from inputs cannot be included in this attribute.", + "name": "xs", + "required": true, + "type": "string[]" + }, + { + "description": "The targeted tensor. It can be viewed as the output of the differentiated function. The attribute \"xs\" and attribute \"zs\" are the minimal independent variable set that determines the value of \"y\".", + "name": "y", + "required": true, + "type": "string" + }, + { + "description": "Input tensor names of the differentiated sub-graph. It contains only the necessary non-differentiated inputs of a (sub-)graph. Variables (usually called intermediate variables) that can be generated from inputs cannot be included in this attribute.", + "name": "zs", + "required": false, + "type": "string[]" + } + ], + "description": "Gradient operator computes the partial derivatives of a specific tensor w.r.t.\nsome other tensors. This operator is widely used in gradient-based training\nalgorithms. To illustrate its use, let's consider a computation graph,\n\n```\nX -----.\n |\n v\nW --> Conv --> H --> Gemm --> Y\n ^\n |\n Z\n```\n\n, where W and Z are trainable tensors. Note that operators' attributes are\nomitted for the sake of simplicity. Let dY/dW (dY/dZ) be the gradient of\nY with respect to W (Z). The user can compute gradient by inserting Gradient\noperator to form another graph shown below.\n\n```\nW --> Conv --> H --> Gemm --> Y\n| ^ ^\n| | |\n| X Z\n| | |\n| | .----------'\n| | | (W/Z/X is the 1st/2nd/3rd input of Gradient as shown in\n| | | \"xs\" followed by \"zs\")\n| v v\n'---> Gradient(xs=[\"W\", \"Z\"], zs=[\"X\"], y=\"Y\")\n | |\n | '-----------------------------------> dY/dW (1st output of Gradient)\n |\n '---------------------------------------> dY/dZ (2nd output of Gradient)\n```\n\nBy definition, the tensor \"y\" is a function of independent variables in \"xs\"\nand \"zs\". Since we only compute the gradient of \"y\" w.r.t. the differentiable\nvariables in \"xs\", this Gradient only outputs dY/dW and dY/dZ. Note that \"H\"\ncannot appear in \"xs\" and \"zs\". The reason is that \"H\" can be determined by\ntensors \"W\" and \"X\" and therefore \"H\" is not an independent variable.\n\nAll outputs are optional. If needed, for example, user can assign an empty\nstring to the 1st output name of that Gradient to skip the generation of dY/dW.\nNote that the concept of optional outputs can also be found in ONNX's RNN, GRU,\nand LSTM.\n\nGradient operator can compute derivative against intermediate tensors. For\nexample, the gradient of Y with respect to H can be done via\n\n```\nW --> Conv --> H --> Gemm --> Y\n ^ | ^\n | | |\n X | Z\n .-------' |\n | .----------'\n | | (H/Z is the 1st/2nd input of Gradient as shown in \"xs\")\n v v\n Gradient(xs=[\"H\", \"Z\"], y=\"Y\")\n | |\n | '-----------------------------------> dY/dH (1st output of Gradient)\n |\n '---------------------------------------> dY/dZ (2nd output of Gradient)\n```\n\nIt is possible to represent high-order differentiation using Gradient operators.\nFor example, given the following linear model:\n\n```\nW --> Gemm --> Y --> Loss --> O\n ^ ^\n | |\n X L\n```\n\nTo compute the 2nd order derivative of O with respect to W (denoted by\nd^2O/dW^2), one can do\n\n```\nW --> Gemm --> Y --> Loss --> O\n| ^ ^\n| | |\n| X .------------L\n| | | |\n| | | v\n+------+-+> Gradient(xs=[\"X\", \"W\"], zs=[\"L\"], y=\"O\") ---> dO/dX (1st output of Gradient)\n| | | |\n| | | '---> dO/dW (2nd output of Gradient)\n| v v\n'---> Gradient(xs=[\"X\", \"W\"], zs=[\"L\"], y=\"dO/dW\") ---> d(dO/dW)dX (1st output of\n | Gradient)\n |\n |\n '---> d^2O/dW^2 (2nd output of Gradient)\n```\n\nThe tensors named in attributes \"xs\", \"zs\", and \"y\" define the differentiated\ncomputation graph, and the inputs to Gradient node define the values at\nwhich the gradient is computed. We can feed different tensors to the identified\ngraph. For example, one can compute the gradient of Y with respect to H at \na specific value of H, H_1, by providing that value as an input to the Gradient\nnode.\n\n```\nW --> Conv --> H --> Gemm --> Y\n ^ ^\n | |\n X Z\n\n Z_1 (2nd input of Gradient)\n |\n v\nH_1 --> Gradient(xs=[\"H\", \"Z\"], y=\"Y\") ---> dY/dH when H = H_1 and Y = Y_1.\n |\n '------------------------------> dY/dZ (2nd output of Gradient)\n```\n\nWhen the inputs of Gradient are the tensors named in \"xs\" and \"zs\", the\ncomputation can be optimized. More specifically, intermediate variables in\nforward pass can be reused if the gradient is computed via reverse-mode\nauto-differentiation.\n\n", + "domain": "ai.onnx.preview.training", + "examples": [ + { + "code": "add_node = onnx.helper.make_node('Add',\n ['a', 'b'], ['c'], name='my_add')\ngradient_node = onnx.helper.make_node(\n 'Gradient', ['a', 'b'],\n ['dc_da', 'dc_db'], name='my_gradient',\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,\n xs=['a', 'b'], y='c')\n\na = np.array(1.0).astype(np.float32)\nb = np.array(2.0).astype(np.float32)\nc = a + b\n# dc / da = d(a+b) / da = 1\ndc_da = np.array(1).astype(np.float32)\n# db / db = d(a+b) / db = 1\ndc_db = np.array(1).astype(np.float32)\n\ngraph = onnx.helper.make_graph(\n nodes=[add_node, gradient_node],\n name='GradientOfAdd',\n inputs=[\n onnx.helper.make_tensor_value_info('a', onnx.TensorProto.FLOAT,\n []),\n onnx.helper.make_tensor_value_info('b', onnx.TensorProto.FLOAT,\n [])],\n outputs=[\n onnx.helper.make_tensor_value_info('c', onnx.TensorProto.FLOAT,\n []),\n onnx.helper.make_tensor_value_info('dc_da',\n onnx.TensorProto.FLOAT, []),\n onnx.helper.make_tensor_value_info('dc_db',\n onnx.TensorProto.FLOAT, [])])\nopsets = [\n onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),\n onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)]\nmodel = onnx.helper.make_model(\n graph,\n producer_name='backend-test',\n opset_imports=opsets)\nexpect(model, inputs=[a, b], outputs=[c, dc_da, dc_db],\n name='test_gradient_of_add')", + "summary": "gradient_scalar_add" + }, + { + "code": "add_node = onnx.helper.make_node('Add',\n ['a', 'b'], ['c'], name='my_add')\nmul_node = onnx.helper.make_node('Mul',\n ['c', 'a'], ['d'], name='my_mul')\ngradient_node = onnx.helper.make_node(\n 'Gradient', ['a', 'b'],\n ['dd_da', 'dd_db'], name='my_gradient',\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,\n xs=['a', 'b'], y='d')\n\na = np.array(1.0).astype(np.float32)\nb = np.array(2.0).astype(np.float32)\nc = a + b\n# d = a * c = a * (a + b)\nd = a * c\n# dd / da = d(a*a+a*b) / da = 2 * a + b\ndd_da = (2 * a + b).astype(np.float32)\n# dd / db = d(a*a+a*b) / db = a\ndd_db = a\n\ngraph = onnx.helper.make_graph(\n nodes=[add_node, mul_node, gradient_node],\n name='GradientOfTwoOperators',\n inputs=[\n onnx.helper.make_tensor_value_info('a', onnx.TensorProto.FLOAT,\n []),\n onnx.helper.make_tensor_value_info('b', onnx.TensorProto.FLOAT,\n [])],\n outputs=[\n onnx.helper.make_tensor_value_info('d', onnx.TensorProto.FLOAT,\n []),\n onnx.helper.make_tensor_value_info('dd_da',\n onnx.TensorProto.FLOAT, []),\n onnx.helper.make_tensor_value_info('dd_db',\n onnx.TensorProto.FLOAT, [])])\n\nopsets = [\n onnx.helper.make_operatorsetid(ONNX_DOMAIN, 12),\n onnx.helper.make_operatorsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)]\nmodel = onnx.helper.make_model(graph,\n producer_name='backend-test',\n opset_imports=opsets)\nexpect(model, inputs=[a, b], outputs=[d, dd_da, dd_db],\n name='test_gradient_of_add_and_mul')", + "summary": "gradient_scalar_add_and_mul" + } + ], + "inputs": [ + { + "description": "The values fed into graph identified by the attributes. The i-th input is the value of the i-th tensor specified in the concatenated list of the attribute \"xs\" and the attribute \"zs\". For example, if xs=[\"A\", \"B\"] and zs=[\"C\"], the first input is used as the value of symbol \"A\" and the 3rd input is substituted for all the occurrences of \"C\".", + "name": "Inputs", + "option": "variadic", + "type": "T1" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 2147483647, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The gradient of the tensor specified by the attribute \"y\" with respect to each of tensors specified in the attribute \"xs\". The i-th output is the gradient of \"y\" with respect to the i-th tensor specified in the attribute \"xs\".", + "name": "Outputs", + "option": "variadic", + "type": "T2" + } + ], + "outputs_range": "1 - ∞", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Allow outputs to be any kind of tensor.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Allow inputs to be any kind of floating-point tensor.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "GraphCall", + "schema": { + "attributes": [ + { + "description": "The invoked graph's name. The only allowed value is the name of the inference graph, which is stored in \"ModelProto.graph.name\" in the ONNX model format.", + "name": "graph_name", + "required": true, + "type": "string" + } + ], + "description": "The GraphCall operator invokes a graph inside TrainingInfoProto's\nalgorithm field. The GraphCall inputs and outputs are bound to those of\ninvoked graph by position. If a graph input has an initializer, that input\nis considered optional. All graph outputs are optional.\n\nBelow Python syntax is used for describing dictionary and list.\n\nAssume that ModelProto's graph field has\n- name: \"MyInferenceGraph\"\n- input: [\"X\", \"W\", \"Z\"]\n- initializer: [W]\n- output: [\"Y\"]\n\nas visualized below for inference.\n\n```\nX -----.\n |\n v\nW --> Conv --> H --> Gemm --> Y\n ^\n |\n Z\n```\n\nAssume that the training algorithm contains\n\n- inputs: [\"X_1\", \"Z_1\", \"C\"]\n- initializer: [T]\n- outputs: [\"W_new\"]\n\nwith a dictionary\n\n- update_binding: {\"W\": \"W_new\", \"T\": \"T_new\"}\n\nInside the training algorithm graph, one can invoke the inference\ngraph via adding a GraphCall node with\n\n- inputs: [\"X_1\", \"W\", Z_1\"]\n- outputs: [\"Y_1\"]\n- an attribute graph_name=\"MyInferenceGraph\",\n\nThe initializers, \"W\" and \"T\" in this case, in update_binding\nare considered globally-visible and mutable variables, which\ncan be used as inputs of operators in the training graph.\n\nAn example training algorithm graph may look like\n\n```\n.-------- W (a global and mutable variable from\n| | the inference graph)\n| |\n| .-----'-----------.\n| | |\n| | v\n| | .-- X_1 --> GraphCall(graph_name=\"MyInferenceGraph\")\n| | | | |\n| | | | |\n| | | Z_1 -----' |\n| | | | V\n| | | | Y_1 ---> Loss ---> O\n| | | | ^\n| | | | |\n| | `--. | C\n| | | | |\n| | | | .----------------'\n| | | | |\n| | v v v\n| `--> Gradient(xs=[\"W\"], zs=[\"X_1\", \"Z_1\", \"C\"], y=\"O\")\n| |\n| v\n| dO_dW (gradient of W) 1 (a scalar one)\n| | |\n| V v\n| Div <--- T ------------> Add ---> T_new\n| | (T is the number of training iterations.\n| | T is also globally visible and mutable.)\n| v\n`-----> Sub ----> W_new\n```\n\nwhere Loss is a dummy node which computes the minimized objective function.\n\nThe variable \"W\" is an optional input in the called graph.\nIf the user omits it, the input list of GraphCall becomes [\"X_1\", \"\", \"Z_1\"].\nIn this case, from the view of computation graph, the Conv operator invoked by\nGraphCall's may be still connected the global \"W\" variable and therefore the\nstructure of the computation graph is unchanged.\n", + "domain": "ai.onnx.preview.training", + "inputs": [ + { + "description": "Inputs fed to the invoked graph. The i-th input here goes to the i-th input of the invoked graph. To omit an optional input in this field, the user can drop it or use an empty string.", + "name": "Inputs", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 2147483647, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The outputs generated by the called graph. Its i-th value is bound to the i-th output of the called graph. Similar to the inputs, all outputs are optional.", + "name": "Outputs", + "option": "variadic", + "type": "T" + } + ], + "outputs_range": "1 - ∞", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Allow inputs and outputs to be any kind of tensor.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Greater", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + } + ], + "category": "Logic", + "description": "Returns the tensor resulted from performing the `greater` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Greater',\n inputs=['x', 'y'],\n outputs=['greater'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater')", + "summary": "greater" + }, + { + "code": "node = onnx.helper.make_node(\n 'GreaterOrEqual',\n inputs=['x', 'y'],\n outputs=['greater_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater_equal')", + "summary": "greater" + }, + { + "code": "node = onnx.helper.make_node(\n 'Greater',\n inputs=['x', 'y'],\n outputs=['greater'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater_bcast')", + "summary": "greater_broadcast" + }, + { + "code": "node = onnx.helper.make_node(\n 'GreaterOrEqual',\n inputs=['x', 'y'],\n outputs=['greater_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater_equal_bcast')", + "summary": "greater_broadcast" + } + ], + "inputs": [ + { + "description": "Left input tensor for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Right input tensor for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Greater", + "schema": { + "category": "Logic", + "description": "Returns the tensor resulted from performing the `greater` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Greater',\n inputs=['x', 'y'],\n outputs=['greater'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater')", + "summary": "greater" + }, + { + "code": "node = onnx.helper.make_node(\n 'GreaterOrEqual',\n inputs=['x', 'y'],\n outputs=['greater_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater_equal')", + "summary": "greater" + }, + { + "code": "node = onnx.helper.make_node(\n 'Greater',\n inputs=['x', 'y'],\n outputs=['greater'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater_bcast')", + "summary": "greater_broadcast" + }, + { + "code": "node = onnx.helper.make_node(\n 'GreaterOrEqual',\n inputs=['x', 'y'],\n outputs=['greater_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater_equal_bcast')", + "summary": "greater_broadcast" + } + ], + "inputs": [ + { + "description": "First input operand for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Second input operand for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Greater", + "schema": { + "category": "Logic", + "description": "Returns the tensor resulted from performing the `greater` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Greater',\n inputs=['x', 'y'],\n outputs=['greater'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater')", + "summary": "greater" + }, + { + "code": "node = onnx.helper.make_node(\n 'GreaterOrEqual',\n inputs=['x', 'y'],\n outputs=['greater_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater_equal')", + "summary": "greater" + }, + { + "code": "node = onnx.helper.make_node(\n 'Greater',\n inputs=['x', 'y'],\n outputs=['greater'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater_bcast')", + "summary": "greater_broadcast" + }, + { + "code": "node = onnx.helper.make_node(\n 'GreaterOrEqual',\n inputs=['x', 'y'],\n outputs=['greater_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.greater_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_greater_equal_bcast')", + "summary": "greater_broadcast" + } + ], + "inputs": [ + { + "description": "First input operand for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Second input operand for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input types to all numeric tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "GreaterOrEqual", + "schema": { + "description": "Returns the tensor resulted from performing the `greater_equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "First input operand for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Second input operand for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input types to all numeric tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "HardSigmoid", + "schema": { + "attributes": [ + { + "default": 0.20000000298023224, + "description": "Value of alpha default to 0.2", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "default": 0.5, + "description": "Value of beta default to 0.5", + "name": "beta", + "required": false, + "type": "float32" + }, + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "category": "Activation", + "description": "HardSigmoid takes one input data (Tensor) and produces one output data\n(Tensor) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)),\nis applied to the tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'HardSigmoid',\n inputs=['x'],\n outputs=['y'],\n alpha=0.5,\n beta=0.6\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.clip(x * 0.5 + 0.6, 0, 1) # expected output [0.1, 0.6, 1.]\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardsigmoid_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x * 0.5 + 0.6, 0, 1)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardsigmoid')", + "summary": "hardsigmoid" + }, + { + "code": "default_alpha = 0.2\ndefault_beta = 0.5\nnode = onnx.helper.make_node(\n 'HardSigmoid',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x * default_alpha + default_beta, 0, 1)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardsigmoid_default')", + "summary": "hardsigmoid_default" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "HardSigmoid", + "schema": { + "attributes": [ + { + "default": 0.20000000298023224, + "description": "Value of alpha.", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "default": 0.5, + "description": "Value of beta.", + "name": "beta", + "required": false, + "type": "float32" + } + ], + "category": "Activation", + "description": "HardSigmoid takes one input data (Tensor) and produces one output data\n(Tensor) where the HardSigmoid function, y = max(0, min(1, alpha * x + beta)),\nis applied to the tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'HardSigmoid',\n inputs=['x'],\n outputs=['y'],\n alpha=0.5,\n beta=0.6\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.clip(x * 0.5 + 0.6, 0, 1) # expected output [0.1, 0.6, 1.]\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardsigmoid_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x * 0.5 + 0.6, 0, 1)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardsigmoid')", + "summary": "hardsigmoid" + }, + { + "code": "default_alpha = 0.2\ndefault_beta = 0.5\nnode = onnx.helper.make_node(\n 'HardSigmoid',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x * default_alpha + default_beta, 0, 1)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardsigmoid_default')", + "summary": "hardsigmoid_default" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Hardmax", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "description": "The operator computes the hardmax (1 for the first maximum value, and 0 for all others) values for each layer in the batch\n of the given input. The input is a 2-D tensor (Tensor) of size\n(batch_size x input_feature_dimensions). The output tensor has the same shape\nand contains the hardmax values of the corresponding input.\n\nInput does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([[3, 0, 1, 2], [2, 5, 1, 0], [0, 1, 3, 2], [0, 1, 2, 3]]).astype(np.float32)\ny = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_example')\n\n# For multiple occurrences of the maximal values, the first occurrence is selected for one-hot output\nx = np.array([[3, 3, 3, 1]]).astype(np.float32)\ny = np.array([[1, 0, 0, 0]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_one_hot')", + "summary": "hardmax" + }, + { + "code": "def hardmax_2d(x): # type: (np.ndarray) -> np.ndarray\n return np.eye(x.shape[1], dtype=x.dtype)[np.argmax(x, axis=1)]\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n axis=0,\n)\ny = hardmax_2d(x.reshape(1, 60)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_axis_0')\n\nnode = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n axis=1,\n)\ny = hardmax_2d(x.reshape(3, 20)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_axis_1')\n\n# default axis is 1\nnode = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_default_axis')\n\nnode = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n axis=2,\n)\ny = hardmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_axis_2')\n\nnode = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n axis=-1,\n)\ny = hardmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_negative_axis')", + "summary": "hardmax_axis" + } + ], + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output values with the same shape as input tensor (the original size without coercion).", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Hardmax", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "description": "The operator computes the hardmax (1 for the first maximum value, and 0 for all others) values for each layer in the batch\n of the given input.\n\nThe input does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors. The output tensor has the same shape\nand contains the hardmax values of the corresponding input.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([[3, 0, 1, 2], [2, 5, 1, 0], [0, 1, 3, 2], [0, 1, 2, 3]]).astype(np.float32)\ny = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_example')\n\n# For multiple occurrences of the maximal values, the first occurrence is selected for one-hot output\nx = np.array([[3, 3, 3, 1]]).astype(np.float32)\ny = np.array([[1, 0, 0, 0]]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_one_hot')", + "summary": "hardmax" + }, + { + "code": "def hardmax_2d(x): # type: (np.ndarray) -> np.ndarray\n return np.eye(x.shape[1], dtype=x.dtype)[np.argmax(x, axis=1)]\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n axis=0,\n)\ny = hardmax_2d(x.reshape(1, 60)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_axis_0')\n\nnode = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n axis=1,\n)\ny = hardmax_2d(x.reshape(3, 20)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_axis_1')\n\n# default axis is 1\nnode = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_default_axis')\n\nnode = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n axis=2,\n)\ny = hardmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_axis_2')\n\nnode = onnx.helper.make_node(\n 'Hardmax',\n inputs=['x'],\n outputs=['y'],\n axis=-1,\n)\ny = hardmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_hardmax_negative_axis')", + "summary": "hardmax_axis" + } + ], + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output values with the same shape as input tensor (the original size without coercion).", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Identity", + "schema": { + "description": "Identity operator", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Identity',\n inputs=['x'],\n outputs=['y'],\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nexpect(node, inputs=[data], outputs=[data],\n name='test_identity')", + "summary": "identity" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Tensor to copy input into.", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "If", + "schema": { + "attributes": [ + { + "description": "Graph to run if condition is false. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the then_branch.", + "name": "else_branch", + "required": true, + "type": "graph" + }, + { + "description": "Graph to run if condition is true. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the else_branch.", + "name": "then_branch", + "required": true, + "type": "graph" + } + ], + "description": "If conditional", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Condition for the if", + "name": "cond", + "type": "B" + } + ], + "max_input": 1, + "max_output": 2147483647, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same shape and same data type.", + "name": "outputs", + "option": "variadic", + "type": "V" + } + ], + "outputs_range": "1 - ∞", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "All Tensor types", + "type_param_str": "V" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Only bool", + "type_param_str": "B" + } + ] + } + }, + { + "name": "If", + "schema": { + "attributes": [ + { + "description": "Graph to run if condition is false. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the then_branch.", + "name": "else_branch", + "required": true, + "type": "graph" + }, + { + "description": "Graph to run if condition is true. Has N outputs: values you wish to be live-out to the enclosing scope. The number of outputs must match the number of outputs in the else_branch.", + "name": "then_branch", + "required": true, + "type": "graph" + } + ], + "description": "If conditional", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Condition for the if", + "name": "cond", + "type": "B" + } + ], + "max_input": 1, + "max_output": 2147483647, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Values that are live-out to the enclosing scope. The return values in the `then_branch` and `else_branch` must be of the same data type. The `then_branch` and `else_branch` may produce tensors with the same element type and different shapes. If corresponding outputs from the then-branch and the else-branch have static shapes S1 and S2, then the shape of the corresponding output variable of the if-node (if present) must be compatible with both S1 and S2 as it represents the union of both possible shapes.For example, if in a model file, the the first output of `then_branch` is typed float tensor with shape [2] and the first output of `else_branch` is another float tensor with shape [3], If's first output should have (a) no shape set, or (b) a shape of rank 1 with neither `dim_value` nor `dim_param` set, or (c) a shape of rank 1 with a unique `dim_param`. In contrast, the first output cannot have the shape [2] since [2] and [3] are not compatible.", + "name": "outputs", + "option": "variadic", + "type": "V" + } + ], + "outputs_range": "1 - ∞", + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "All Tensor types", + "type_param_str": "V" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Only bool", + "type_param_str": "B" + } + ] + } + }, + { + "name": "Imputer", + "schema": { + "attributes": [ + { + "description": "Value(s) to change to", + "name": "imputed_value_floats", + "required": false, + "type": "float32[]" + }, + { + "description": "Value(s) to change to.", + "name": "imputed_value_int64s", + "required": false, + "type": "int64[]" + }, + { + "description": "A value that needs replacing.", + "name": "replaced_value_float", + "required": false, + "type": "float32" + }, + { + "description": "A value that needs replacing.", + "name": "replaced_value_int64", + "required": false, + "type": "int64" + } + ], + "description": "Replaces inputs that equal one value with another, leaving all other elements alone.
    \n This operator is typically used to replace missing values in situations where they have a canonical\n representation, such as -1, 0, NaN, or some extreme value.
    \n One and only one of imputed_value_floats or imputed_value_int64s should be defined -- floats if the input tensor\n holds floats, integers if the input tensor holds integers. The imputed values must all fit within the\n width of the tensor element type. One and only one of the replaced_value_float or replaced_value_int64 should be defined,\n which one depends on whether floats or integers are being processed.
    \n The imputed_value attribute length can be 1 element, or it can have one element per input feature.
    In other words, if the input tensor has the shape [*,F], then the length of the attribute array may be 1 or F. If it is 1, then it is broadcast along the last dimension and applied to each feature.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Data to be processed.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Imputed output data", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ], + "description": "The input type must be a tensor of a numeric type, either [N,C] or [C]. The output type will be of the same tensor type and shape.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "InstanceNormalization", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + }, + { + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero, default is 1e-5f.", + "name": "epsilon", + "required": false, + "type": "float32" + } + ], + "category": "Normalization", + "description": "Carries out instance normalization as described in the paper\nhttps://arxiv.org/abs/1607.08022.\n\ny = scale * (x - mean) / sqrt(variance + epsilon) + B,\nwhere mean and variance are computed per instance per channel.\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "def _instancenorm_test_mode(x, s, bias, epsilon=1e-5): # type: ignore\n dims_x = len(x.shape)\n axis = tuple(range(2, dims_x))\n mean = np.mean(x, axis=axis, keepdims=True)\n var = np.var(x, axis=axis, keepdims=True)\n dim_ones = (1,) * (dims_x - 2)\n s = s.reshape(-1, *dim_ones)\n bias = bias.reshape(-1, *dim_ones)\n return s * (x - mean) / np.sqrt(var + epsilon) + bias\n\n# input size: (1, 2, 1, 3)\nx = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)\ns = np.array([1.0, 1.5]).astype(np.float32)\nbias = np.array([0, 1]).astype(np.float32)\ny = _instancenorm_test_mode(x, s, bias).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'InstanceNormalization',\n inputs=['x', 's', 'bias'],\n outputs=['y'],\n)\n\n# output size: (1, 2, 1, 3)\nexpect(node, inputs=[x, s, bias], outputs=[y],\n name='test_instancenorm_example')\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nepsilon = 1e-2\ny = _instancenorm_test_mode(x, s, bias, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'InstanceNormalization',\n inputs=['x', 's', 'bias'],\n outputs=['y'],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(node, inputs=[x, s, bias], outputs=[y],\n name='test_instancenorm_epsilon')", + "summary": "instancenormalization" + } + ], + "inputs": [ + { + "description": "The input 4-dimensional tensor of shape NCHW.", + "name": "input", + "type": "T" + }, + { + "description": "The input 1-dimensional scale tensor of size C.", + "name": "scale", + "type": "T" + }, + { + "description": "The input 1-dimensional bias tensor of size C.", + "name": "B", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "The output 4-dimensional tensor of the same shape as input.", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "InstanceNormalization", + "schema": { + "attributes": [ + { + "default": 9.999999747378752e-06, + "description": "The epsilon value to use to avoid division by zero.", + "name": "epsilon", + "required": false, + "type": "float32" + } + ], + "category": "Normalization", + "description": "Carries out instance normalization as described in the paper\nhttps://arxiv.org/abs/1607.08022.\n\ny = scale * (x - mean) / sqrt(variance + epsilon) + B,\nwhere mean and variance are computed per instance per channel.\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "def _instancenorm_test_mode(x, s, bias, epsilon=1e-5): # type: ignore\n dims_x = len(x.shape)\n axis = tuple(range(2, dims_x))\n mean = np.mean(x, axis=axis, keepdims=True)\n var = np.var(x, axis=axis, keepdims=True)\n dim_ones = (1,) * (dims_x - 2)\n s = s.reshape(-1, *dim_ones)\n bias = bias.reshape(-1, *dim_ones)\n return s * (x - mean) / np.sqrt(var + epsilon) + bias\n\n# input size: (1, 2, 1, 3)\nx = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)\ns = np.array([1.0, 1.5]).astype(np.float32)\nbias = np.array([0, 1]).astype(np.float32)\ny = _instancenorm_test_mode(x, s, bias).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'InstanceNormalization',\n inputs=['x', 's', 'bias'],\n outputs=['y'],\n)\n\n# output size: (1, 2, 1, 3)\nexpect(node, inputs=[x, s, bias], outputs=[y],\n name='test_instancenorm_example')\n\n# input size: (2, 3, 4, 5)\nx = np.random.randn(2, 3, 4, 5).astype(np.float32)\ns = np.random.randn(3).astype(np.float32)\nbias = np.random.randn(3).astype(np.float32)\nepsilon = 1e-2\ny = _instancenorm_test_mode(x, s, bias, epsilon).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'InstanceNormalization',\n inputs=['x', 's', 'bias'],\n outputs=['y'],\n epsilon=epsilon,\n)\n\n# output size: (2, 3, 4, 5)\nexpect(node, inputs=[x, s, bias], outputs=[y],\n name='test_instancenorm_epsilon')", + "summary": "instancenormalization" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.", + "name": "input", + "type": "T" + }, + { + "description": "The input 1-dimensional scale tensor of size C.", + "name": "scale", + "type": "T" + }, + { + "description": "The input 1-dimensional bias tensor of size C.", + "name": "B", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "The output tensor of the same shape as input.", + "name": "output", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "IsInf", + "schema": { + "attributes": [ + { + "default": 1, + "description": "(Optional) Whether map negative infinity to true. Default to 1 so that negative infinity induces true. Set this attribute to 0 if negative infinity should be mapped to false.", + "name": "detect_negative", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "(Optional) Whether map positive infinity to true. Default to 1 so that positive infinity induces true. Set this attribute to 0 if positive infinity should be mapped to false.", + "name": "detect_positive", + "required": false, + "type": "int64" + } + ], + "description": "Map infinity to true and other values to false.", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node('IsInf',\n inputs=['x'],\n outputs=['y'],\n )\n\nx = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf],\n dtype=np.float32)\ny = np.isinf(x)\nexpect(node, inputs=[x], outputs=[y], name='test_isinf')", + "summary": "infinity" + }, + { + "code": "node = onnx.helper.make_node('IsInf',\n inputs=['x'],\n outputs=['y'],\n detect_positive=0\n )\n\nx = np.array([-1.7, np.nan, np.inf, -3.6, np.NINF, np.inf],\n dtype=np.float32)\ny = np.isneginf(x)\nexpect(node, inputs=[x], outputs=[y], name='test_isinf_negative')", + "summary": "negative_infinity_only" + }, + { + "code": "node = onnx.helper.make_node('IsInf',\n inputs=['x'],\n outputs=['y'],\n detect_negative=0\n )\n\nx = np.array([-1.7, np.nan, np.inf, 3.6, np.NINF, np.inf],\n dtype=np.float32)\ny = np.isposinf(x)\nexpect(node, inputs=[x], outputs=[y], name='test_isinf_positive')", + "summary": "positive_infinity_only" + } + ], + "inputs": [ + { + "description": "input", + "name": "X", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "output", + "name": "Y", + "type": "T2" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input types to float tensors.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrain output types to boolean tensors.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "IsNaN", + "schema": { + "description": "Returns which elements of the input are NaN.", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'IsNaN',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([3.0, np.nan, 4.0, np.nan], dtype=np.float32)\ny = np.isnan(x)\nexpect(node, inputs=[x], outputs=[y], name='test_isnan')", + "summary": "isnan" + } + ], + "inputs": [ + { + "description": "input", + "name": "X", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "output", + "name": "Y", + "type": "T2" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input types to float tensors.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrain output types to boolean tensors.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "LRN", + "schema": { + "attributes": [ + { + "default": 9.999999747378752e-05, + "description": "Scaling parameter.", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "default": 0.75, + "description": "The exponent.", + "name": "beta", + "required": false, + "type": "float32" + }, + { + "default": 1.0, + "description": "", + "name": "bias", + "required": false, + "type": "float32" + }, + { + "description": "The number of channels to sum over", + "name": "size", + "required": true, + "type": "int64" + } + ], + "category": "Normalization", + "description": "Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf).\nIt normalizes over local input regions.\nThe local region is defined across the channels. For an element X[n, c, d1, ..., dk] in a tensor\nof shape (N x C x D1 x D2, ..., Dk), its region is\n{X[n, i, d1, ..., dk] | max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2))}.\n\nsquare_sum[n, c, d1, ..., dk] = sum(X[n, i, d1, ..., dk] ^ 2),\nwhere max(0, c - floor((size - 1) / 2)) <= i <= min(C - 1, c + ceil((size - 1) / 2)).\n\nY[n, c, d1, ..., dk] = X[n, c, d1, ..., dk] / (bias + alpha / size * square_sum[n, c, d1, ..., dk] ) ^ beta\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "alpha = 0.0001\nbeta = 0.75\nbias = 1.0\nnsize = 3\nnode = onnx.helper.make_node(\n 'LRN',\n inputs=['x'],\n outputs=['y'],\n size=3\n)\nx = np.random.randn(5, 5, 5, 5).astype(np.float32)\nsquare_sum = np.zeros((5, 5, 5, 5)).astype(np.float32)\nfor n, c, h, w in np.ndindex(x.shape):\n square_sum[n, c, h, w] = sum(x[n,\n max(0, c - int(math.floor((nsize - 1) / 2))):min(5, c + int(math.ceil((nsize - 1) / 2)) + 1),\n h,\n w] ** 2)\ny = x / ((bias + (alpha / nsize) * square_sum) ** beta)\nexpect(node, inputs=[x], outputs=[y],\n name='test_lrn_default')", + "summary": "default" + }, + { + "code": "alpha = 0.0002\nbeta = 0.5\nbias = 2.0\nnsize = 3\nnode = onnx.helper.make_node(\n 'LRN',\n inputs=['x'],\n outputs=['y'],\n alpha=alpha,\n beta=beta,\n bias=bias,\n size=nsize\n)\nx = np.random.randn(5, 5, 5, 5).astype(np.float32)\nsquare_sum = np.zeros((5, 5, 5, 5)).astype(np.float32)\nfor n, c, h, w in np.ndindex(x.shape):\n square_sum[n, c, h, w] = sum(x[n,\n max(0, c - int(math.floor((nsize - 1) / 2))):min(5, c + int(math.ceil((nsize - 1) / 2)) + 1),\n h,\n w] ** 2)\ny = x / ((bias + (alpha / nsize) * square_sum) ** beta)\nexpect(node, inputs=[x], outputs=[y],\n name='test_lrn')", + "summary": "lrn" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor, which has the shape and type as input tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "LSTM", + "schema": { + "attributes": [ + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01.", + "name": "activation_alpha", + "required": false, + "type": "float32[]" + }, + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.", + "name": "activation_beta", + "required": false, + "type": "float32[]" + }, + { + "description": "A list of 3 (or 6 if bidirectional) activation functions for input, output, forget, cell, and hidden. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified.", + "name": "activations", + "required": false, + "type": "string[]" + }, + { + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified.", + "name": "clip", + "required": false, + "type": "float32" + }, + { + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional.", + "name": "direction", + "required": false, + "type": "string" + }, + { + "description": "Number of neurons in the hidden layer", + "name": "hidden_size", + "required": false, + "type": "int64" + }, + { + "description": "Couple the input and forget gates if 1, default 0.", + "name": "input_forget", + "required": false, + "type": "int64" + }, + { + "description": "The sequence output for the hidden is optional if 0. Default 0.", + "name": "output_sequence", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "Computes an one-layer LSTM. This operator is usually supported via some\ncustom implementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`i` - input gate\n\n`o` - output gate\n\n`f` - forget gate\n\n`c` - cell gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates\n\n`R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates\n\n`Wb[iofc]` - W bias vectors for input, output, forget, and cell gates\n\n`Rb[iofc]` - R bias vectors for input, output, forget, and cell gates\n\n`P[iof]` - P peephole weight vector for input, output, and forget gates\n\n`WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates\n\n`RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates\n\n`WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates\n\n`RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates\n\n`PB[iof]` - P peephole weight vector for backward input, output, and forget gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh, h=Tanh):\n\n - it = f(Xt*(Wi^T) + Ht-1*Ri + Pi (.) Ct-1 + Wbi + Rbi)\n\n - ft = f(Xt*(Wf^T) + Ht-1*Rf + Pf (.) Ct-1 + Wbf + Rbf)\n\n - ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc + Rbc)\n\n - Ct = ft (.) Ct-1 + it (.) ct\n\n - ot = f(Xt*(Wo^T) + Ht-1*Ro + Po (.) Ct + Wbo + Rbo)\n\n - Ht = ot (.) h(Ct)\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([[[1., 2.], [3., 4.], [5., 6.]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 3\nweight_scale = 0.1\nnumber_of_gates = 4\n\nnode = onnx.helper.make_node(\n 'LSTM',\n inputs=['X', 'W', 'R'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\nlstm = LSTM_Helper(X=input, W=W, R=R)\n_, Y_h = lstm.step()\nexpect(node, inputs=[input, W, R], outputs=[Y_h.astype(np.float32)], name='test_lstm_defaults')", + "summary": "defaults" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 4\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 4\n\nnode = onnx.helper.make_node(\n 'LSTM',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(np.float32)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), 1)\n\nlstm = LSTM_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = lstm.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_lstm_with_initial_bias')", + "summary": "initial_bias" + }, + { + "code": "input = np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]).astype(np.float32)\n\ninput_size = 4\nhidden_size = 3\nweight_scale = 0.1\nnumber_of_gates = 4\nnumber_of_peepholes = 3\n\nnode = onnx.helper.make_node(\n 'LSTM',\n inputs=['X', 'W', 'R', 'B', 'sequence_lens', 'initial_h', 'initial_c', 'P'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\n# Initializing Inputs\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\nB = np.zeros((1, 2 * number_of_gates * hidden_size)).astype(np.float32)\nseq_lens = np.repeat(input.shape[0], input.shape[1]).astype(np.int32)\ninit_h = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\ninit_c = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\nP = weight_scale * np.ones((1, number_of_peepholes * hidden_size)).astype(np.float32)\n\nlstm = LSTM_Helper(X=input, W=W, R=R, B=B, P=P, initial_c=init_c, initial_h=init_h)\n_, Y_h = lstm.step()\nexpect(node, inputs=[input, W, R, B, seq_lens, init_h, init_c, P], outputs=[Y_h.astype(np.float32)],\n name='test_lstm_with_peepholes')", + "summary": "peepholes" + } + ], + "inputs": [ + { + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`.", + "name": "X", + "type": "T" + }, + { + "description": "The weight tensor for the gates. Concatenation of `W[iofc]` and `WB[iofc]` (if bidirectional) along dimension 0. The tensor has shape `[num_directions, 4*hidden_size, input_size]`.", + "name": "W", + "type": "T" + }, + { + "description": "The recurrence weight tensor. Concatenation of `R[iofc]` and `RB[iofc]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 4*hidden_size, hidden_size]`.", + "name": "R", + "type": "T" + }, + { + "description": "The bias tensor for input gate. Concatenation of `[Wb[iofc], Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 8*hidden_size]`. Optional: If not specified - assumed to be 0.", + "name": "B", + "option": "optional", + "type": "T" + }, + { + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`.", + "name": "sequence_lens", + "option": "optional", + "type": "T1" + }, + { + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "initial_h", + "option": "optional", + "type": "T" + }, + { + "description": "Optional initial value of the cell. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "initial_c", + "option": "optional", + "type": "T" + }, + { + "description": "The weight tensor for peepholes. Concatenation of `P[iof]` and `PB[iof]` (if bidirectional) along dimension 0. It has shape `[num_directions, 3*hidde_size]`. Optional: If not specified - assumed to be 0.", + "name": "P", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "3 - 8", + "max_input": 8, + "max_output": 3, + "min_input": 3, + "min_output": 0, + "outputs": [ + { + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. It is optional if `output_sequence` is 0.", + "name": "Y", + "option": "optional", + "type": "T" + }, + { + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "Y_h", + "option": "optional", + "type": "T" + }, + { + "description": "The last output value of the cell. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "Y_c", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "0 - 3", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)" + ], + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "LSTM", + "schema": { + "attributes": [ + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01.", + "name": "activation_alpha", + "required": false, + "type": "float32[]" + }, + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.", + "name": "activation_beta", + "required": false, + "type": "float32[]" + }, + { + "description": "A list of 3 (or 6 if bidirectional) activation functions for input, output, forget, cell, and hidden. The activation functions must be one of the activation functions specified above. Optional: See the equations for default if not specified.", + "name": "activations", + "required": false, + "type": "string[]" + }, + { + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified.", + "name": "clip", + "required": false, + "type": "float32" + }, + { + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional.", + "name": "direction", + "required": false, + "type": "string" + }, + { + "description": "Number of neurons in the hidden layer", + "name": "hidden_size", + "required": false, + "type": "int64" + }, + { + "description": "Couple the input and forget gates if 1.", + "name": "input_forget", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "Computes an one-layer LSTM. This operator is usually supported via some\ncustom implementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`i` - input gate\n\n`o` - output gate\n\n`f` - forget gate\n\n`c` - cell gate\n\n`t` - time step (t-1 means previous time step)\n\n`W[iofc]` - W parameter weight matrix for input, output, forget, and cell gates\n\n`R[iofc]` - R recurrence weight matrix for input, output, forget, and cell gates\n\n`Wb[iofc]` - W bias vectors for input, output, forget, and cell gates\n\n`Rb[iofc]` - R bias vectors for input, output, forget, and cell gates\n\n`P[iof]` - P peephole weight vector for input, output, and forget gates\n\n`WB[iofc]` - W parameter weight matrix for backward input, output, forget, and cell gates\n\n`RB[iofc]` - R recurrence weight matrix for backward input, output, forget, and cell gates\n\n`WBb[iofc]` - W bias vectors for backward input, output, forget, and cell gates\n\n`RBb[iofc]` - R bias vectors for backward input, output, forget, and cell gates\n\n`PB[iof]` - P peephole weight vector for backward input, output, and forget gates\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Sigmoid, g=Tanh, h=Tanh):\n\n - it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)\n\n - ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)\n\n - ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)\n\n - Ct = ft (.) Ct-1 + it (.) ct\n\n - ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)\n\n - Ht = ot (.) h(Ct)\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([[[1., 2.], [3., 4.], [5., 6.]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 3\nweight_scale = 0.1\nnumber_of_gates = 4\n\nnode = onnx.helper.make_node(\n 'LSTM',\n inputs=['X', 'W', 'R'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\nlstm = LSTM_Helper(X=input, W=W, R=R)\n_, Y_h = lstm.step()\nexpect(node, inputs=[input, W, R], outputs=[Y_h.astype(np.float32)], name='test_lstm_defaults')", + "summary": "defaults" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 4\nweight_scale = 0.1\ncustom_bias = 0.1\nnumber_of_gates = 4\n\nnode = onnx.helper.make_node(\n 'LSTM',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(np.float32)\nR_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), 1)\n\nlstm = LSTM_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = lstm.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_lstm_with_initial_bias')", + "summary": "initial_bias" + }, + { + "code": "input = np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]).astype(np.float32)\n\ninput_size = 4\nhidden_size = 3\nweight_scale = 0.1\nnumber_of_gates = 4\nnumber_of_peepholes = 3\n\nnode = onnx.helper.make_node(\n 'LSTM',\n inputs=['X', 'W', 'R', 'B', 'sequence_lens', 'initial_h', 'initial_c', 'P'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\n# Initializing Inputs\nW = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\nB = np.zeros((1, 2 * number_of_gates * hidden_size)).astype(np.float32)\nseq_lens = np.repeat(input.shape[0], input.shape[1]).astype(np.int32)\ninit_h = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\ninit_c = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\nP = weight_scale * np.ones((1, number_of_peepholes * hidden_size)).astype(np.float32)\n\nlstm = LSTM_Helper(X=input, W=W, R=R, B=B, P=P, initial_c=init_c, initial_h=init_h)\n_, Y_h = lstm.step()\nexpect(node, inputs=[input, W, R, B, seq_lens, init_h, init_c, P], outputs=[Y_h.astype(np.float32)],\n name='test_lstm_with_peepholes')", + "summary": "peepholes" + } + ], + "inputs": [ + { + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`.", + "name": "X", + "type": "T" + }, + { + "description": "The weight tensor for the gates. Concatenation of `W[iofc]` and `WB[iofc]` (if bidirectional) along dimension 0. The tensor has shape `[num_directions, 4*hidden_size, input_size]`.", + "name": "W", + "type": "T" + }, + { + "description": "The recurrence weight tensor. Concatenation of `R[iofc]` and `RB[iofc]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 4*hidden_size, hidden_size]`.", + "name": "R", + "type": "T" + }, + { + "description": "The bias tensor for input gate. Concatenation of `[Wb[iofc], Rb[iofc]]`, and `[WBb[iofc], RBb[iofc]]` (if bidirectional) along dimension 0. This tensor has shape `[num_directions, 8*hidden_size]`. Optional: If not specified - assumed to be 0.", + "name": "B", + "option": "optional", + "type": "T" + }, + { + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`.", + "name": "sequence_lens", + "option": "optional", + "type": "T1" + }, + { + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "initial_h", + "option": "optional", + "type": "T" + }, + { + "description": "Optional initial value of the cell. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "initial_c", + "option": "optional", + "type": "T" + }, + { + "description": "The weight tensor for peepholes. Concatenation of `P[iof]` and `PB[iof]` (if bidirectional) along dimension 0. It has shape `[num_directions, 3*hidde_size]`. Optional: If not specified - assumed to be 0.", + "name": "P", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "3 - 8", + "max_input": 8, + "max_output": 3, + "min_input": 3, + "min_output": 0, + "outputs": [ + { + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. ", + "name": "Y", + "option": "optional", + "type": "T" + }, + { + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "Y_h", + "option": "optional", + "type": "T" + }, + { + "description": "The last output value of the cell. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "Y_c", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "0 - 3", + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)" + ], + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "LabelEncoder", + "schema": { + "attributes": [ + { + "description": "A list of labels.", + "name": "classes_strings", + "required": false, + "type": "string[]" + }, + { + "default": -1, + "description": "An integer to use when an input string value is not found in the map.
    One and only one of the 'default_*' attributes must be defined.", + "name": "default_int64", + "required": false, + "type": "int64" + }, + { + "default": "_Unused", + "description": "A string to use when an input integer value is not found in the map.
    One and only one of the 'default_*' attributes must be defined.", + "name": "default_string", + "required": false, + "type": "string" + } + ], + "description": "Converts strings to integers and vice versa.
    \n If the string default value is set, it will convert integers to strings.\n If the int default value is set, it will convert strings to integers.
    \n Each operator converts either integers to strings or strings to integers, depending \n on which default value attribute is provided. Only one default value attribute\n should be defined.
    \n When converting from integers to strings, the string is fetched from the\n 'classes_strings' list, by simple indexing.
    \n When converting from strings to integers, the string is looked up in the list\n and the index at which it is found is used as the converted value.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Input data.", + "name": "X", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data. If strings are input, the output values are integers, and vice versa.", + "name": "Y", + "type": "T2" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ], + "description": "The input type must be a tensor of integers or strings, of any shape.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ], + "description": "The output type will be a tensor of strings or integers, and will have the same shape as the input.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "LabelEncoder", + "schema": { + "attributes": [ + { + "description": "A float.", + "name": "default_float", + "required": false, + "type": "float32" + }, + { + "default": -1, + "description": "An integer.", + "name": "default_int64", + "required": false, + "type": "int64" + }, + { + "default": "_Unused", + "description": "A string.", + "name": "default_string", + "required": false, + "type": "string" + }, + { + "description": "A list of floats.", + "name": "keys_floats", + "required": false, + "type": "float32[]" + }, + { + "description": "A list of ints.", + "name": "keys_int64s", + "required": false, + "type": "int64[]" + }, + { + "description": "A list of strings. One and only one of 'keys_*'s should be set.", + "name": "keys_strings", + "required": false, + "type": "string[]" + }, + { + "description": "A list of floats.", + "name": "values_floats", + "required": false, + "type": "float32[]" + }, + { + "description": "A list of ints.", + "name": "values_int64s", + "required": false, + "type": "int64[]" + }, + { + "description": "A list of strings. One and only one of 'value_*'s should be set.", + "name": "values_strings", + "required": false, + "type": "string[]" + } + ], + "description": "Maps each element in the input tensor to another value.
    \n The mapping is determined by the two parallel attributes, 'keys_*' and\n 'values_*' attribute. The i-th value in the specified 'keys_*' attribute\n would be mapped to the i-th value in the specified 'values_*' attribute. It\n implies that input's element type and the element type of the specified\n 'keys_*' should be identical while the output type is identical to the\n specified 'values_*' attribute. If an input element can not be found in the\n specified 'keys_*' attribute, the 'default_*' that matches the specified\n 'values_*' attribute may be used as its output value.
    \n Let's consider an example which maps a string tensor to an integer tensor.\n Assume and 'keys_strings' is [\"Amy\", \"Sally\"], 'values_int64s' is [5, 6],\n and 'default_int64' is '-1'. The input [\"Dori\", \"Amy\", \"Amy\", \"Sally\",\n \"Sally\"] would be mapped to [-1, 5, 5, 6, 6].
    \n Since this operator is an one-to-one mapping, its input and output shapes\n are the same. Notice that only one of 'keys_*'/'values_*' can be set.
    \n For key look-up, bit-wise comparison is used so even a float NaN can be\n mapped to a value in 'values_*' attribute.
    \n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Input data. It can be either tensor or scalar.", + "name": "X", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data.", + "name": "Y", + "type": "T2" + } + ], + "since_version": 2, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)", + "tensor(float)" + ], + "description": "The input type is a tensor of any shape.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)", + "tensor(float)" + ], + "description": "Output type is determined by the specified 'values_*' attribute.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "LeakyRelu", + "schema": { + "attributes": [ + { + "default": 0.009999999776482582, + "description": "Coefficient of leakage default to 0.01.", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "category": "Activation", + "description": "LeakyRelu takes input data (Tensor) and an argument alpha, and produces one\noutput data (Tensor) where the function `f(x) = alpha * x for x < 0`,\n`f(x) = x for x >= 0`, is applied to the data tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'LeakyRelu',\n inputs=['x'],\n outputs=['y'],\n alpha=0.1\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-0.1, 0., 1.]\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1\nexpect(node, inputs=[x], outputs=[y],\n name='test_leakyrelu_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1\nexpect(node, inputs=[x], outputs=[y],\n name='test_leakyrelu')", + "summary": "leakyrelu" + }, + { + "code": "default_alpha = 0.01\nnode = onnx.helper.make_node(\n 'LeakyRelu',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * default_alpha\nexpect(node, inputs=[x], outputs=[y],\n name='test_leakyrelu_default')", + "summary": "leakyrelu_default" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "LeakyRelu", + "schema": { + "attributes": [ + { + "default": 0.009999999776482582, + "description": "Coefficient of leakage.", + "name": "alpha", + "required": false, + "type": "float32" + } + ], + "category": "Activation", + "description": "LeakyRelu takes input data (Tensor) and an argument alpha, and produces one\noutput data (Tensor) where the function `f(x) = alpha * x for x < 0`,\n`f(x) = x for x >= 0`, is applied to the data tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'LeakyRelu',\n inputs=['x'],\n outputs=['y'],\n alpha=0.1\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-0.1, 0., 1.]\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1\nexpect(node, inputs=[x], outputs=[y],\n name='test_leakyrelu_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1\nexpect(node, inputs=[x], outputs=[y],\n name='test_leakyrelu')", + "summary": "leakyrelu" + }, + { + "code": "default_alpha = 0.01\nnode = onnx.helper.make_node(\n 'LeakyRelu',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * default_alpha\nexpect(node, inputs=[x], outputs=[y],\n name='test_leakyrelu_default')", + "summary": "leakyrelu_default" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Less", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + } + ], + "category": "Logic", + "description": "Returns the tensor resulted from performing the `less` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Less',\n inputs=['x', 'y'],\n outputs=['less'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less')", + "summary": "less" + }, + { + "code": "node = onnx.helper.make_node(\n 'LessOrEqual',\n inputs=['x', 'y'],\n outputs=['less_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less_equal')", + "summary": "less" + }, + { + "code": "node = onnx.helper.make_node(\n 'Less',\n inputs=['x', 'y'],\n outputs=['less'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less_bcast')", + "summary": "less_broadcast" + }, + { + "code": "node = onnx.helper.make_node(\n 'LessOrEqual',\n inputs=['x', 'y'],\n outputs=['less_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less_equal_bcast')", + "summary": "less_broadcast" + } + ], + "inputs": [ + { + "description": "Left input tensor for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Right input tensor for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Less", + "schema": { + "category": "Logic", + "description": "Returns the tensor resulted from performing the `less` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Less',\n inputs=['x', 'y'],\n outputs=['less'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less')", + "summary": "less" + }, + { + "code": "node = onnx.helper.make_node(\n 'LessOrEqual',\n inputs=['x', 'y'],\n outputs=['less_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less_equal')", + "summary": "less" + }, + { + "code": "node = onnx.helper.make_node(\n 'Less',\n inputs=['x', 'y'],\n outputs=['less'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less_bcast')", + "summary": "less_broadcast" + }, + { + "code": "node = onnx.helper.make_node(\n 'LessOrEqual',\n inputs=['x', 'y'],\n outputs=['less_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less_equal_bcast')", + "summary": "less_broadcast" + } + ], + "inputs": [ + { + "description": "First input operand for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Second input operand for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Less", + "schema": { + "category": "Logic", + "description": "Returns the tensor resulted from performing the `less` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Less',\n inputs=['x', 'y'],\n outputs=['less'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less')", + "summary": "less" + }, + { + "code": "node = onnx.helper.make_node(\n 'LessOrEqual',\n inputs=['x', 'y'],\n outputs=['less_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less_equal')", + "summary": "less" + }, + { + "code": "node = onnx.helper.make_node(\n 'Less',\n inputs=['x', 'y'],\n outputs=['less'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less_bcast')", + "summary": "less_broadcast" + }, + { + "code": "node = onnx.helper.make_node(\n 'LessOrEqual',\n inputs=['x', 'y'],\n outputs=['less_equal'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = np.less_equal(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_less_equal_bcast')", + "summary": "less_broadcast" + } + ], + "inputs": [ + { + "description": "First input operand for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Second input operand for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input types to all numeric tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "LessOrEqual", + "schema": { + "description": "Returns the tensor resulted from performing the `less_equal` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "First input operand for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Second input operand for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input types to all numeric tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "LinearClassifier", + "schema": { + "attributes": [ + { + "description": "Class labels when using integer labels. One and only one 'classlabels' attribute must be defined.", + "name": "classlabels_ints", + "required": false, + "type": "int64[]" + }, + { + "description": "Class labels when using string labels. One and only one 'classlabels' attribute must be defined.", + "name": "classlabels_strings", + "required": false, + "type": "string[]" + }, + { + "description": "A collection of weights of the model(s).", + "name": "coefficients", + "required": true, + "type": "float32[]" + }, + { + "description": "A collection of intercepts.", + "name": "intercepts", + "required": false, + "type": "float32[]" + }, + { + "description": "Indicates whether to do OvR or multinomial (0=OvR is the default).", + "name": "multi_class", + "required": false, + "type": "int64" + }, + { + "default": "NONE", + "description": "Indicates the transform to apply to the scores vector.
    One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT'", + "name": "post_transform", + "required": false, + "type": "string" + } + ], + "description": "Linear classifier\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Data to be classified.", + "name": "X", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 2, + "outputs": [ + { + "description": "Classification outputs (one class per example).", + "name": "Y", + "type": "T2" + }, + { + "description": "Classification scores ([N,E] - one score for each class and example", + "name": "Z", + "type": "tensor(float)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ], + "description": "The input must be a tensor of a numeric type, and of of shape [N,C] or [C]. In the latter case, it will be treated as [1,C]", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ], + "description": "The output will be a tensor of strings or integers.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "LinearRegressor", + "schema": { + "attributes": [ + { + "description": "Weights of the model(s).", + "name": "coefficients", + "required": false, + "type": "float32[]" + }, + { + "description": "Weights of the intercepts, if used.", + "name": "intercepts", + "required": false, + "type": "float32[]" + }, + { + "default": "NONE", + "description": "Indicates the transform to apply to the regression output vector.
    One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT'", + "name": "post_transform", + "required": false, + "type": "string" + }, + { + "default": 1, + "description": "The total number of regression targets, 1 if not defined.", + "name": "targets", + "required": false, + "type": "int64" + } + ], + "description": "Generalized linear regression evaluation.
    \n If targets is set to 1 (default) then univariate regression is performed.
    \n If targets is set to M then M sets of coefficients must be passed in as a sequence\n and M results will be output for each input n in N.
    \n The coefficients array is of length n, and the coefficients for each target are contiguous.\n Intercepts are optional but if provided must match the number of targets.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Data to be regressed.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Regression outputs (one per target, per example).", + "name": "Y", + "type": "tensor(float)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ], + "description": "The input must be a tensor of a numeric type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Log", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Calculates the natural log of the given input tensor, element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Log',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([1, 10]).astype(np.float32)\ny = np.log(x) # expected output [0., 2.30258512]\nexpect(node, inputs=[x], outputs=[y],\n name='test_log_example')\n\nx = np.exp(np.random.randn(3, 4, 5).astype(np.float32))\ny = np.log(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_log')", + "summary": "log" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The natural log of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Log", + "schema": { + "description": "Calculates the natural log of the given input tensor, element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Log',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([1, 10]).astype(np.float32)\ny = np.log(x) # expected output [0., 2.30258512]\nexpect(node, inputs=[x], outputs=[y],\n name='test_log_example')\n\nx = np.exp(np.random.randn(3, 4, 5).astype(np.float32))\ny = np.log(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_log')", + "summary": "log" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The natural log of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "LogSoftmax", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "category": "Activation", + "description": "The operator computes the logsoftmax (log of softmax) values for each layer in the batch\n of the given input. The input is a 2-D tensor (Tensor) of size\n(batch_size x input_feature_dimensions). The output tensor has the same shape\nand contains the logsoftmax values of the corresponding input.\n\nInput does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.array([[-1, 0, 1]]).astype(np.float32)\n# expected output [[-2.40760589, -1.40760589, -0.40760589]]\ny = x - np.log(np.sum(np.exp(x), axis=1))\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_example_1')", + "summary": "logsoftmax" + }, + { + "code": "def logsoftmax_2d(x): # type: (np.ndarray) -> np.ndarray\n max_x = np.max(x, axis=1).reshape((-1, 1))\n exp_x = np.exp(x - max_x)\n return x - max_x - np.log(np.sum(exp_x, axis=1).reshape((-1, 1)))\n\nx = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)\n# expected output [[-3.4401896, -2.4401896, -1.44018972, -0.44018969],\n# [-3.4401896, -2.4401896, -1.44018972, -0.44018969]]\ny = logsoftmax_2d(x)\n\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_large_number')\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n axis=0,\n)\ny = logsoftmax_2d(x.reshape(1, 60)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_axis_0')\n\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n axis=1,\n)\ny = logsoftmax_2d(x.reshape(3, 20)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_axis_1')\n\n# default axis is 1\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_default_axis')\n\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n axis=2,\n)\ny = logsoftmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_axis_2')\n\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n axis=-1,\n)\ny = logsoftmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_negative_axis')", + "summary": "logsoftmax_axis" + } + ], + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output values with the same shape as input tensor (the original size without coercion).", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "LogSoftmax", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "category": "Activation", + "description": "The operator computes the logsoftmax (log of softmax) values for each layer in the batch\n of the given input.\n\nThe input does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors. The output tensor has the same shape\nand contains the logsoftmax values of the corresponding input.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.array([[-1, 0, 1]]).astype(np.float32)\n# expected output [[-2.40760589, -1.40760589, -0.40760589]]\ny = x - np.log(np.sum(np.exp(x), axis=1))\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_example_1')", + "summary": "logsoftmax" + }, + { + "code": "def logsoftmax_2d(x): # type: (np.ndarray) -> np.ndarray\n max_x = np.max(x, axis=1).reshape((-1, 1))\n exp_x = np.exp(x - max_x)\n return x - max_x - np.log(np.sum(exp_x, axis=1).reshape((-1, 1)))\n\nx = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)\n# expected output [[-3.4401896, -2.4401896, -1.44018972, -0.44018969],\n# [-3.4401896, -2.4401896, -1.44018972, -0.44018969]]\ny = logsoftmax_2d(x)\n\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_large_number')\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n axis=0,\n)\ny = logsoftmax_2d(x.reshape(1, 60)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_axis_0')\n\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n axis=1,\n)\ny = logsoftmax_2d(x.reshape(3, 20)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_axis_1')\n\n# default axis is 1\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_default_axis')\n\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n axis=2,\n)\ny = logsoftmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_axis_2')\n\nnode = onnx.helper.make_node(\n 'LogSoftmax',\n inputs=['x'],\n outputs=['y'],\n axis=-1,\n)\ny = logsoftmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_logsoftmax_negative_axis')", + "summary": "logsoftmax_axis" + } + ], + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output values with the same shape as input tensor (the original size without coercion).", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Loop", + "schema": { + "attributes": [ + { + "description": "The graph run each iteration. It has 2+N inputs: (iteration_num, condition, loop carried dependencies...). It has 1+N+K outputs: (condition, loop carried dependencies..., scan_outputs...). Each scan_output is created by concatenating the value of the specified output value at the end of each iteration of the loop. It is an error if the dimensions or data type of these scan_outputs change across loop iterations.", + "name": "body", + "required": true, + "type": "graph" + } + ], + "description": "Generic Looping construct. This loop has multiple termination conditions:\n\n1) Trip count. Iteration count specified at runtime. Set by\n specifying the input M. Optional. Set to empty string to omit.\n Note that a static trip count (specified at graph construction time) can be\n specified by passing in a constant node for input M.\n2) Loop termination condition. This is an input to the op that determines\n whether to run the first iteration and also a loop-carried dependency for\n the body graph. The body graph must yield a value for the condition variable,\n whether this input is provided or not.\n\nThis table summarizes the operating modes of this operator with equivalent\nC-style code:\n\n Operator inputs defined as (max_trip_count, condition_var).\n\n input (\"\", \"\"):\n for (int i=0; ; ++i) {\n cond = ... // Note this value is ignored, but is required in the body\n }\n\n input (\"\", cond) // Note this is analogous to a while loop\n bool cond = ...;\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (\"\", 1) // Note this is analogous to a do-while loop\n bool cond = true\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (trip_count, \"\") // Note this is analogous to a for loop\n int trip_count = ...\n for (int i=0; i < trip_count; ++i) {\n cond = ...; // ignored\n }\n\n input (trip_count, cond)\n int trip_count = ...;\n bool cond = ...;\n for (int i=0; i < trip_count && cond; ++i) {\n cond = ...;\n }\n\n\n*Sample usage - cond as well as trip count*\n\n graph predict-net {\n %a = Constant[value = ]()\n %b = Constant[value = ]()\n %keepgoing = Constant[value = ]()\n %max_trip_count = Constant[value = ]()\n %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b)\n return\n }\n\n graph body-net (\n %i[INT32, scalar]\n %keepgoing[BOOL, scalar]\n %b[INT32, scalar]\n ) {\n %my_local = Add(%a, %b)\n %b_out = Sub(%a, %b)\n %keepgoing_out = Greater(%my_local, %b_out)\n %user_defined_vals = Add(%b, %b)\n return %keepgoing_out, %b_out, %user_defined_vals\n }\n\n*Sample equivalent C code*\n\n {\n /* User-defined code (enclosing scope) */\n int a = 3, b = 6;\n bool keepgoing = true; // Analogous to input cond\n /* End user-defined code */\n\n /* Implicitly-defined code */\n const int max_trip_count = 10; // Analogous to input M\n int user_defined_vals[]; // Imagine this is resizable\n /* End implicitly-defined code */\n for (int i=0; i < max_trip_count && keepgoing; ++i) {\n /* User-defined code (loop body) */\n int my_local = a + b; // Reading values in the enclosing scope is fine\n b = a - b; // writes fine if we specify b as a loop-carried dependency\n keepgoing = my_local > b; // keepgoing is a loop-carried dependency\n user_defined_vals[i] = b + b;\n /* End user-defined code */\n }\n // my_local = 123; // Can't do this. my_local was defined in the the body\n\n // These below values are live-out from the loop and therefore accessible\n b_out; user_defined_vals; keepgoing_out;\n }\n\nThere are several things of note in this code snippet:\n\n1) Values from the enclosing scope (i.e. variable a here) are in scope and can\n be referenced in the inputs of the loop.\n2) Any variables which you wish to make available in the enclosing scope (i.e.\n the variables b and keepgoing) must be declared as either loop-carried\n dependencies (both at the op inputs and output and at the body net input and\n output) or scan_outputs.\n3) Values created in the body cannot be accessed in the enclosing scope.\n\nNote that the semantics of this op support \"diagonal\" or \"wavefront\" execution.\n(See Step 3 here for an example:\nhttps://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/).\nFrontends should emit multi-layer RNNs as a series of While operators (with\ntime being the inner looping dimension), with each successive layer consuming\nthe scan_outputs from the previous layer, possibly going through several\npoint-wise operators (e.g. dropout, residual connections, linear layer).\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.", + "name": "M", + "option": "optional", + "type": "I" + }, + { + "description": "A boolean termination condition. Optional. Pass empty string to skip.", + "name": "cond", + "option": "optional", + "type": "B" + }, + { + "description": "The initial values of any loop-carried dependencies (values that change across loop iterations)", + "name": "v_initial", + "option": "variadic", + "type": "V" + } + ], + "inputs_range": "3 - ∞", + "max_input": 2147483647, + "max_output": 2147483647, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Final N loop carried dependency values then K scan_outputs", + "name": "v_final_and_scan_outputs", + "option": "variadic", + "type": "V" + } + ], + "outputs_range": "1 - ∞", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "All Tensor types", + "type_param_str": "V" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "tensor of int64, which should be a scalar.", + "type_param_str": "I" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "tensor of bool, which should be a scalar.", + "type_param_str": "B" + } + ] + } + }, + { + "name": "Loop", + "schema": { + "attributes": [ + { + "description": "The graph run each iteration. It has 2+N inputs: (iteration_num, condition, loop carried dependencies...). It has 1+N+K outputs: (condition, loop carried dependencies..., scan_outputs...). Each scan_output is created by concatenating the value of the specified output value at the end of each iteration of the loop. It is an error if the dimensions or data type of these scan_outputs change across loop iterations.", + "name": "body", + "required": true, + "type": "graph" + } + ], + "description": "Generic Looping construct. This loop has multiple termination conditions:\n\n1) Trip count. Iteration count specified at runtime. Set by\n specifying the input M. Optional. Set to empty string to omit.\n Note that a static trip count (specified at graph construction time) can be\n specified by passing in a constant node for input M.\n2) Loop termination condition. This is an input to the op that determines\n whether to run the first iteration and also a loop-carried dependency for\n the body graph. The body graph must yield a value for the condition variable,\n whether this input is provided or not.\n\nThis table summarizes the operating modes of this operator with equivalent\nC-style code:\n\n Operator inputs defined as (max_trip_count, condition_var).\n\n input (\"\", \"\"):\n for (int i=0; ; ++i) {\n cond = ... // Note this value is ignored, but is required in the body\n }\n\n input (\"\", cond) // Note this is analogous to a while loop\n bool cond = ...;\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (\"\", 1) // Note this is analogous to a do-while loop\n bool cond = true\n for (int i=0; cond; ++i) {\n cond = ...;\n }\n\n input (trip_count, \"\") // Note this is analogous to a for loop\n int trip_count = ...\n for (int i=0; i < trip_count; ++i) {\n cond = ...; // ignored\n }\n\n input (trip_count, cond)\n int trip_count = ...;\n bool cond = ...;\n for (int i=0; i < trip_count && cond; ++i) {\n cond = ...;\n }\n\n\n*Sample usage - cond as well as trip count*\n\n graph predict-net {\n %a = Constant[value = ]()\n %b = Constant[value = ]()\n %keepgoing = Constant[value = ]()\n %max_trip_count = Constant[value = ]()\n %keepgoing_out, %b_out, %user_defined_vals = Loop[body = ](%max_trip_count, %keepgoing, %b)\n return\n }\n\n graph body-net (\n %i[INT32, scalar] // iteration number\n %keepgoing_in[BOOL, scalar] // incoming loop-termination-condition; not used\n %b_in[INT32, scalar] // incoming value of loop-carried-dependency b\n ) {\n %my_local = Add(%a, %b_in)\n %b_out = Sub(%a, %b_in) // outgoing value of loop-carried-dependency b\n %keepgoing_out = Greater(%my_local, %b_out) // outgoing loop-termination-condition\n %user_defined_val = Add(%b_in, %b_in) // scan-output value to be accumulated\n return %keepgoing_out, %b_out, %user_defined_val\n }\n\n*Sample equivalent C code*\n\n {\n /* User-defined code (enclosing scope) */\n int a = 3, b = 6;\n bool keepgoing = true; // Analogous to input cond\n /* End user-defined code */\n\n /* Implicitly-defined code */\n const int max_trip_count = 10; // Analogous to input M\n int user_defined_vals[]; // Imagine this is resizable\n /* End implicitly-defined code */\n /* initialize loop-carried variables and scan-output variables */\n bool keepgoing_out = keepgoing\n int b_out = b\n\n for (int i=0; i < max_trip_count && keepgoing_out; ++i) {\n /* Implicitly-defined code: bind actual parameter values\n to formal parameter variables of loop-body */\n bool keepgoing_in = keepgoing_out; \n bool b_in = b_out;\n\n /* User-defined code (loop body) */\n int my_local = a + b_in; // Reading value \"a\" from the enclosing scope is fine\n b_out = a - b_in;\n keepgoing_out = my_local > b_out; \n user_defined_val = b_in + b_in; // b_in and b_out are different variables\n /* End user-defined code */\n\n /* Implicitly defined-code */\n user_defined_vals[i] = user_defined_val // accumulate scan-output values\n }\n // int t = my_local; // Can't do this. my_local is not accessible here.\n\n // The values below are bound to the output variables of the loop and therefore accessible\n // b_out; user_defined_vals; keepgoing_out;\n }\n\nThere are several things of note in this code snippet:\n\n1) Values from the enclosing scope (i.e. variable \"a\" here) are in scope and can\n be referenced in the inputs of the loop.\n2) Any values computed in the loop body that needs to be used in a subsequent\n iteration or after the loop are modelled using a pair of variables in the loop-body,\n consisting of an input variable (eg., b_in) and an output variable (eg., b_out).\n These are referred to as loop-carried dependences. The loop operation node\n supplies the input value of the input variable for the first iteration, and\n returns the output value of the output variable produced by the final\n iteration.\n3) Scan_output variables are used to implicitly concatenate values computed across\n all the iterations. In the above example, the value of user_defined_val computed\n over all iterations are concatenated and returned as the value of user_defined_vals\n after the loop.\n4) Values created in the body cannot be accessed in the enclosing scope,\n except using the mechanism described above.\n\nNote that the semantics of this op support \"diagonal\" or \"wavefront\" execution.\n(See Step 3 here for an example:\nhttps://devblogs.nvidia.com/optimizing-recurrent-neural-networks-cudnn-5/).\nFrontends should emit multi-layer RNNs as a series of While operators (with\ntime being the inner looping dimension), with each successive layer consuming\nthe scan_outputs from the previous layer, possibly going through several\npoint-wise operators (e.g. dropout, residual connections, linear layer).\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "A maximum trip-count for the loop specified at runtime. Optional. Pass empty string to skip.", + "name": "M", + "option": "optional", + "type": "I" + }, + { + "description": "A boolean termination condition. Optional. Pass empty string to skip.", + "name": "cond", + "option": "optional", + "type": "B" + }, + { + "description": "The initial values of any loop-carried dependencies (values that change across loop iterations)", + "name": "v_initial", + "option": "variadic", + "type": "V" + } + ], + "inputs_range": "2 - ∞", + "max_input": 2147483647, + "max_output": 2147483647, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Final N loop carried dependency values then K scan_outputs", + "name": "v_final_and_scan_outputs", + "option": "variadic", + "type": "V" + } + ], + "outputs_range": "1 - ∞", + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "All Tensor types", + "type_param_str": "V" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "tensor of int64, which should be a scalar.", + "type_param_str": "I" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "tensor of bool, which should be a scalar.", + "type_param_str": "B" + } + ] + } + }, + { + "name": "LpNormalization", + "schema": { + "attributes": [ + { + "default": -1, + "description": "The axis on which to apply normalization, -1 mean last axis.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "default": 2, + "description": "The order of the normalization, only 1 or 2 are supported.", + "name": "p", + "required": false, + "type": "int64" + } + ], + "category": "Normalization", + "description": "Given a matrix, apply Lp-normalization along the provided axis.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input matrix", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Matrix after normalization", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "LpPool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding. DEPRECATION NOTE: auto_pad is only intended to support legacy uses, and for framework authors, one is explicitly encouraged to use explicit padding specified in the pads attribute.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": false, + "type": "int64[]" + }, + { + "default": 2.0, + "description": "p value of the Lp norm used to pool over the input data, default is 2.0.", + "name": "p", + "required": false, + "type": "float32" + }, + { + "description": "Padding for the beginning and ending along each axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "LpPool consumes an input tensor X and applies Lp pooling across the\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n Lp pooling consisting of computing the Lp norm on all values of a subset\n of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing.", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimension are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "LpPool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "default": 2, + "description": "p value of the Lp norm used to pool over the input data.", + "name": "p", + "required": false, + "type": "int64" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "LpPool consumes an input tensor X and applies Lp pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n Lp pooling consisting of computing the Lp norm on all values of a subset\n of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing.", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.", + "name": "Y", + "type": "T" + } + ], + "since_version": 2, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "LpPool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "default": 2, + "description": "p value of the Lp norm used to pool over the input data.", + "name": "p", + "required": false, + "type": "int64" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "LpPool consumes an input tensor X and applies Lp pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n Lp pooling consisting of computing the Lp norm on all values of a subset\n of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing.", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from Lp pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.", + "name": "Y", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "MatMul", + "schema": { + "description": "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'MatMul',\n inputs=['a', 'b'],\n outputs=['c'],\n)\n\n# 2d\na = np.random.randn(3, 4).astype(np.float32)\nb = np.random.randn(4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c],\n name='test_matmul_2d')\n\n# 3d\na = np.random.randn(2, 3, 4).astype(np.float32)\nb = np.random.randn(2, 4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c],\n name='test_matmul_3d')\n\n# 4d\na = np.random.randn(1, 2, 3, 4).astype(np.float32)\nb = np.random.randn(1, 2, 4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c],\n name='test_matmul_4d')", + "summary": "matmul" + } + ], + "inputs": [ + { + "description": "N-dimensional matrix A", + "name": "A", + "type": "T" + }, + { + "description": "N-dimensional matrix B", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Matrix multiply results from A * B", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "MatMul", + "schema": { + "description": "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'MatMul',\n inputs=['a', 'b'],\n outputs=['c'],\n)\n\n# 2d\na = np.random.randn(3, 4).astype(np.float32)\nb = np.random.randn(4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c],\n name='test_matmul_2d')\n\n# 3d\na = np.random.randn(2, 3, 4).astype(np.float32)\nb = np.random.randn(2, 4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c],\n name='test_matmul_3d')\n\n# 4d\na = np.random.randn(1, 2, 3, 4).astype(np.float32)\nb = np.random.randn(1, 2, 4, 3).astype(np.float32)\nc = np.matmul(a, b)\nexpect(node, inputs=[a, b], outputs=[c],\n name='test_matmul_4d')", + "summary": "matmul" + } + ], + "inputs": [ + { + "description": "N-dimensional matrix A", + "name": "A", + "type": "T" + }, + { + "description": "N-dimensional matrix B", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Matrix multiply results from A * B", + "name": "Y", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain input and output types to float/int tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "MatMulInteger", + "schema": { + "description": "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.\nThe production MUST never overflow. The accumulation may overflow if and only if in 32 bits.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node('MatMulInteger',\n inputs=['A', 'B', 'a_zero_point', 'b_zero_point'],\n outputs=['Y'],)\n\nA = np.array([[11, 7, 3],\n [10, 6, 2],\n [9, 5, 1],\n [8, 4, 0], ], dtype=np.uint8)\n\na_zero_point = np.array([12], dtype=np.uint8)\n\nB = np.array([[1, 4],\n [2, 5],\n [3, 6], ], dtype=np.uint8)\n\nb_zero_point = np.array([0], dtype=np.uint8)\n\noutput = np.array([[-38, -83],\n [-44, -98],\n [-50, -113],\n [-56, -128], ], dtype=np.int32)\n\nexpect(node, inputs=[A, B, a_zero_point, b_zero_point], outputs=[output],\n name='test_matmulinteger')", + "summary": "matmulinteger" + } + ], + "inputs": [ + { + "description": "N-dimensional matrix A", + "name": "A", + "type": "T1" + }, + { + "description": "N-dimensional matrix B", + "name": "B", + "type": "T2" + }, + { + "description": "Zero point tensor for input 'A'. It's optional and default value is 0. It could be a scalar or a 1-D tensor, which means a per-tensor or per-row quantization. If it's a 1-D tensor, its number of elements should be equal to the number of rows of input 'A'.", + "name": "a_zero_point", + "option": "optional", + "type": "T1" + }, + { + "description": "Zero point tensor for input 'B'. It's optional and default value is 0. It could be a scalar or a 1-D tensor, which means a per-tensor or per-column quantization. If it's a 1-D tensor, its number of elements should be equal to the number of columns of input 'B'.", + "name": "b_zero_point", + "option": "optional", + "type": "T2" + } + ], + "inputs_range": "2 - 4", + "max_input": 4, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Matrix multiply results from A * B", + "name": "Y", + "type": "T3" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain input A data type to 8-bit integer tensor.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain input B data type to 8-bit integer tensor.", + "type_param_str": "T2" + }, + { + "allowed_type_strs": [ + "tensor(int32)" + ], + "description": "Constrain output Y data type as 32-bit integer tensor.", + "type_param_str": "T3" + } + ] + } + }, + { + "name": "Max", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Element-wise max of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 3]).astype(np.float32)\nresult = np.array([3, 5, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_max_example')\n\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_max_one_input')\n\nresult = np.maximum(data_0, data_1)\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_max_two_inputs')", + "summary": "max" + }, + { + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([3, 4, 4]).astype(op_dtype)\n node = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_max_{0}'.format(np.dtype(op_dtype).name))", + "summary": "max_all_numeric_types" + } + ], + "inputs": [ + { + "description": "List of tensors for Max.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "max", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Max", + "schema": { + "description": "Element-wise max of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 3]).astype(np.float32)\nresult = np.array([3, 5, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_max_example')\n\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_max_one_input')\n\nresult = np.maximum(data_0, data_1)\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_max_two_inputs')", + "summary": "max" + }, + { + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([3, 4, 4]).astype(op_dtype)\n node = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_max_{0}'.format(np.dtype(op_dtype).name))", + "summary": "max_all_numeric_types" + } + ], + "inputs": [ + { + "description": "List of tensors for Max.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "max", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Max", + "schema": { + "description": "Element-wise max of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 3]).astype(np.float32)\nresult = np.array([3, 5, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_max_example')\n\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_max_one_input')\n\nresult = np.maximum(data_0, data_1)\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_max_two_inputs')", + "summary": "max" + }, + { + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([3, 4, 4]).astype(op_dtype)\n node = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_max_{0}'.format(np.dtype(op_dtype).name))", + "summary": "max_all_numeric_types" + } + ], + "inputs": [ + { + "description": "List of tensors for max.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor.", + "name": "max", + "type": "T" + } + ], + "since_version": 8, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Max", + "schema": { + "description": "Element-wise max of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 3]).astype(np.float32)\nresult = np.array([3, 5, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_max_example')\n\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_max_one_input')\n\nresult = np.maximum(data_0, data_1)\nnode = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_max_two_inputs')", + "summary": "max" + }, + { + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([3, 4, 4]).astype(op_dtype)\n node = onnx.helper.make_node(\n 'Max',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_max_{0}'.format(np.dtype(op_dtype).name))", + "summary": "max_all_numeric_types" + } + ], + "inputs": [ + { + "description": "List of tensors for max.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor.", + "name": "max", + "type": "T" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "MaxPool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad.\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "\"\"\"\ninput_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2]\nstrides = [1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_1d_default')", + "summary": "maxpool_1d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [11, 12],\n [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_ceil')", + "summary": "maxpool_2d_ceil" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_default')", + "summary": "maxpool_2d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [11, 12],\n [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_dilations')", + "summary": "maxpool_2d_dilations" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2]\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_pads')", + "summary": "maxpool_2d_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_pads')", + "summary": "maxpool_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9, 10],\n [17, 19, 20],\n [22, 24, 25]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_same_upper')", + "summary": "maxpool_2d_precomputed_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9],\n [17, 19]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_strides')", + "summary": "maxpool_2d_precomputed_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_LOWER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides, out_shape)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_same_lower')", + "summary": "maxpool_2d_same_lower" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides, out_shape)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_same_upper')", + "summary": "maxpool_2d_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_strides')", + "summary": "maxpool_2d_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.uint8)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.uint8)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_uint8')", + "summary": "maxpool_2d_uint8" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0, 0, 0], 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_3d_default')", + "summary": "maxpool_3d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y', 'z'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.float32)\nz = np.array([[[\n [12, 13, 14, 14, 14],\n [17, 18, 19, 19, 19],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24]]]]).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y, z], name='test_maxpool_with_argmax_2d_precomputed_pads')", + "summary": "maxpool_with_argmax_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y', 'z'],\n kernel_shape=[2, 2],\n strides=[2, 2],\n storage_order=1\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9],\n [17, 19]]]]).astype(np.float32)\nz = np.array([[[[6, 16],\n [8, 18]]]]).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y, z], name='test_maxpool_with_argmax_2d_precomputed_strides')", + "summary": "maxpool_with_argmax_2d_precomputed_strides" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "MaxPool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "The storage order of the tensor. 0 is row major, and 1 is column major.", + "name": "storage_order", + "required": false, + "type": "int64" + }, + { + "description": "Stride along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - kernel_spatial_shape[i]) / strides_spatial_shape[i] + 1)\n\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - kernel_spatial_shape[i] + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + kernel_spatial_shape[i] - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad.\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "\"\"\"\ninput_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2]\nstrides = [1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_1d_default')", + "summary": "maxpool_1d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [11, 12],\n [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_ceil')", + "summary": "maxpool_2d_ceil" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_default')", + "summary": "maxpool_2d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [11, 12],\n [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_dilations')", + "summary": "maxpool_2d_dilations" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2]\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_pads')", + "summary": "maxpool_2d_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_pads')", + "summary": "maxpool_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9, 10],\n [17, 19, 20],\n [22, 24, 25]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_same_upper')", + "summary": "maxpool_2d_precomputed_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9],\n [17, 19]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_strides')", + "summary": "maxpool_2d_precomputed_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_LOWER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides, out_shape)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_same_lower')", + "summary": "maxpool_2d_same_lower" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides, out_shape)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_same_upper')", + "summary": "maxpool_2d_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_strides')", + "summary": "maxpool_2d_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.uint8)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.uint8)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_uint8')", + "summary": "maxpool_2d_uint8" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0, 0, 0], 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_3d_default')", + "summary": "maxpool_3d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y', 'z'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.float32)\nz = np.array([[[\n [12, 13, 14, 14, 14],\n [17, 18, 19, 19, 19],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24]]]]).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y, z], name='test_maxpool_with_argmax_2d_precomputed_pads')", + "summary": "maxpool_with_argmax_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y', 'z'],\n kernel_shape=[2, 2],\n strides=[2, 2],\n storage_order=1\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9],\n [17, 19]]]]).astype(np.float32)\nz = np.array([[[[6, 16],\n [8, 18]]]]).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y, z], name='test_maxpool_with_argmax_2d_precomputed_strides')", + "summary": "maxpool_with_argmax_2d_precomputed_strides" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used", + "name": "Y", + "type": "T" + }, + { + "description": "Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x ... x Dn).", + "name": "Indices", + "option": "optional", + "type": "I" + } + ], + "outputs_range": "1 - 2", + "since_version": 8, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain index tensor to int64", + "type_param_str": "I" + } + ] + } + }, + { + "name": "MaxPool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "Whether to use ceil or floor (default) to compute the output shape.", + "name": "ceil_mode", + "required": false, + "type": "int64" + }, + { + "description": "Dilation value along each spatial axis of filter.", + "name": "dilations", + "required": false, + "type": "int64[]" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "The storage order of the tensor. 0 is row major, and 1 is column major.", + "name": "storage_order", + "required": false, + "type": "int64" + }, + { + "description": "Stride along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad.\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "\"\"\"\ninput_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2]\nstrides = [1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_1d_default')", + "summary": "maxpool_1d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [11, 12],\n [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_ceil')", + "summary": "maxpool_2d_ceil" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_default')", + "summary": "maxpool_2d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [11, 12],\n [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_dilations')", + "summary": "maxpool_2d_dilations" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2]\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_pads')", + "summary": "maxpool_2d_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_pads')", + "summary": "maxpool_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9, 10],\n [17, 19, 20],\n [22, 24, 25]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_same_upper')", + "summary": "maxpool_2d_precomputed_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9],\n [17, 19]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_strides')", + "summary": "maxpool_2d_precomputed_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_LOWER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides, out_shape)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_same_lower')", + "summary": "maxpool_2d_same_lower" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides, out_shape)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_same_upper')", + "summary": "maxpool_2d_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_strides')", + "summary": "maxpool_2d_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.uint8)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.uint8)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_uint8')", + "summary": "maxpool_2d_uint8" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0, 0, 0], 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_3d_default')", + "summary": "maxpool_3d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y', 'z'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.float32)\nz = np.array([[[\n [12, 13, 14, 14, 14],\n [17, 18, 19, 19, 19],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24]]]]).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y, z], name='test_maxpool_with_argmax_2d_precomputed_pads')", + "summary": "maxpool_with_argmax_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y', 'z'],\n kernel_shape=[2, 2],\n strides=[2, 2],\n storage_order=1\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9],\n [17, 19]]]]).astype(np.float32)\nz = np.array([[[[6, 16],\n [8, 18]]]]).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y, z], name='test_maxpool_with_argmax_2d_precomputed_strides')", + "summary": "maxpool_with_argmax_2d_precomputed_strides" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used", + "name": "Y", + "type": "T" + }, + { + "description": "Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x ... x Dn).", + "name": "Indices", + "option": "optional", + "type": "I" + } + ], + "outputs_range": "1 - 2", + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain index tensor to int64", + "type_param_str": "I" + } + ] + } + }, + { + "name": "MaxPool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "Whether to use ceil or floor (default) to compute the output shape.", + "name": "ceil_mode", + "required": false, + "type": "int64" + }, + { + "description": "Dilation value along each spatial axis of filter. If not present, the dilation defaults to 1 along each spatial axis.", + "name": "dilations", + "required": false, + "type": "int64[]" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "The storage order of the tensor. 0 is row major, and 1 is column major.", + "name": "storage_order", + "required": false, + "type": "int64" + }, + { + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad.\n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "\"\"\"\ninput_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2]\nstrides = [1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_1d_default')", + "summary": "maxpool_1d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [11, 12],\n [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_ceil')", + "summary": "maxpool_2d_ceil" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_default')", + "summary": "maxpool_2d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [11, 12],\n [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_dilations')", + "summary": "maxpool_2d_dilations" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2]\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_pads')", + "summary": "maxpool_2d_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_pads')", + "summary": "maxpool_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9, 10],\n [17, 19, 20],\n [22, 24, 25]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_same_upper')", + "summary": "maxpool_2d_precomputed_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9],\n [17, 19]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_strides')", + "summary": "maxpool_2d_precomputed_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_LOWER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides, out_shape)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_same_lower')", + "summary": "maxpool_2d_same_lower" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides, out_shape)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_same_upper')", + "summary": "maxpool_2d_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_strides')", + "summary": "maxpool_2d_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.uint8)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.uint8)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_uint8')", + "summary": "maxpool_2d_uint8" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0, 0, 0], 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_3d_default')", + "summary": "maxpool_3d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y', 'z'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.float32)\nz = np.array([[[\n [12, 13, 14, 14, 14],\n [17, 18, 19, 19, 19],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24]]]]).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y, z], name='test_maxpool_with_argmax_2d_precomputed_pads')", + "summary": "maxpool_with_argmax_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y', 'z'],\n kernel_shape=[2, 2],\n strides=[2, 2],\n storage_order=1\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9],\n [17, 19]]]]).astype(np.float32)\nz = np.array([[[[6, 16],\n [8, 18]]]]).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y, z], name='test_maxpool_with_argmax_2d_precomputed_strides')", + "summary": "maxpool_with_argmax_2d_precomputed_strides" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used", + "name": "Y", + "type": "T" + }, + { + "description": "Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x ... x Dn).", + "name": "Indices", + "option": "optional", + "type": "I" + } + ], + "outputs_range": "1 - 2", + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain index tensor to int64", + "type_param_str": "I" + } + ] + } + }, + { + "name": "MaxPool", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "Whether to use ceil or floor (default) to compute the output shape.", + "name": "ceil_mode", + "required": false, + "type": "int64" + }, + { + "description": "Dilation value along each spatial axis of filter. If not present, the dilation defaults to 1 along each spatial axis.", + "name": "dilations", + "required": false, + "type": "int64[]" + }, + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "The storage order of the tensor. 0 is row major, and 1 is column major.", + "name": "storage_order", + "required": false, + "type": "int64" + }, + { + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "category": "Pool", + "description": "MaxPool consumes an input tensor X and applies max pooling across\n the tensor according to kernel sizes, stride sizes, and pad lengths.\n max pooling consisting of computing the max on all values of a\n subset of the input tensor according to the kernel size and downsampling the\n data into the output tensor Y for further processing. The output spatial shape will be following:\n ```\n output_spatial_shape[i] = floor((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n or\n ```\n output_spatial_shape[i] = ceil((input_spatial_shape[i] + pad_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1)) / strides_spatial_shape[i] + 1)\n ```\n if ceil_mode is enabled\n\n ```\n * pad_shape[i] is sum of pads along axis i\n ```\n\n `auto_pad` is a DEPRECATED attribute. If you are using them currently, the output spatial shape will be following:\n ```\n VALID: output_spatial_shape[i] = ceil((input_spatial_shape[i] - ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) + 1) / strides_spatial_shape[i])\n SAME_UPPER or SAME_LOWER: output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides_spatial_shape[i])\n ```\n And pad shape will be following if `SAME_UPPER` or `SAME_LOWER`:\n ```\n pad_shape[i] = (output_spatial_shape[i] - 1) * strides_spatial_shape[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]\n ```\n The output of each pooling window is maximum number of elements exclude pad. \n ", + "domain": "ai.onnx", + "examples": [ + { + "code": "\"\"\"\ninput_shape: [1, 3, 32]\noutput_shape: [1, 3, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2],\n)\nx = np.random.randn(1, 3, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2]\nstrides = [1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_1d_default')", + "summary": "maxpool_1d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n ceil_mode=True\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [11, 12],\n [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_ceil')", + "summary": "maxpool_2d_ceil" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_default')", + "summary": "maxpool_2d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 4, 4]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[1, 1],\n dilations=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]]).astype(np.float32)\ny = np.array([[[\n [11, 12],\n [15, 16]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_dilations')", + "summary": "maxpool_2d_dilations" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 28, 28]\noutput_shape: [1, 3, 30, 30]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n pads=[2, 2, 2, 2]\n)\nx = np.random.randn(1, 3, 28, 28).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (3, 3)\nstrides = (1, 1)\npad_bottom = pad_top = pad_right = pad_left = 2\npad_shape = [pad_top + pad_bottom, pad_left + pad_right]\nout_shape = get_output_shape('VALID', np.add(x_shape[2:], pad_shape), kernel_shape, strides)\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_pads')", + "summary": "maxpool_2d_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_pads')", + "summary": "maxpool_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 3, 3]\npad_shape: [2, 2] -> [1, 1, 1, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[3, 3],\n strides=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9, 10],\n [17, 19, 20],\n [22, 24, 25]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_same_upper')", + "summary": "maxpool_2d_precomputed_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9],\n [17, 19]]]]).astype(np.float32)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_precomputed_strides')", + "summary": "maxpool_2d_precomputed_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [1, 0, 1, 0] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_LOWER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_LOWER', x_shape[2:], kernel_shape, strides, out_shape)\npad_bottom = pad_shape[0] // 2\npad_top = pad_shape[0] - pad_bottom\npad_right = pad_shape[1] // 2\npad_left = pad_shape[1] - pad_right\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_same_lower')", + "summary": "maxpool_2d_same_lower" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 32, 32]\npad_shape: [1, 1] -> [0, 1, 0, 1] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2],\n auto_pad='SAME_UPPER'\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (2, 2)\nstrides = (1, 1)\nout_shape = get_output_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides)\npad_shape = get_pad_shape('SAME_UPPER', x_shape[2:], kernel_shape, strides, out_shape)\npad_top = pad_shape[0] // 2\npad_bottom = pad_shape[0] - pad_top\npad_left = pad_shape[1] // 2\npad_right = pad_shape[1] - pad_left\npadded = np.pad(x, ((0, 0), (0, 0), (pad_top, pad_bottom), (pad_left, pad_right)), mode='constant',\n constant_values=np.nan)\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, pad_shape, 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_same_upper')", + "summary": "maxpool_2d_same_upper" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32]\noutput_shape: [1, 3, 10, 10]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n strides=[3, 3]\n)\nx = np.random.randn(1, 3, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = (5, 5)\nstrides = (3, 3)\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, (0, 0), 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_strides')", + "summary": "maxpool_2d_strides" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.uint8)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.uint8)\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_2d_uint8')", + "summary": "maxpool_2d_uint8" + }, + { + "code": "\"\"\"\ninput_shape: [1, 3, 32, 32, 32]\noutput_shape: [1, 3, 31, 31, 31]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y'],\n kernel_shape=[2, 2, 2],\n)\nx = np.random.randn(1, 3, 32, 32, 32).astype(np.float32)\nx_shape = np.shape(x)\nkernel_shape = [2, 2, 2]\nstrides = [1, 1, 1]\nout_shape = get_output_shape('VALID', x_shape[2:], kernel_shape, strides)\npadded = x\ny = pool(padded, x_shape, kernel_shape, strides, out_shape, [0, 0, 0], 'MAX')\n\nexpect(node, inputs=[x], outputs=[y], name='test_maxpool_3d_default')", + "summary": "maxpool_3d_default" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 5, 5]\npad_shape: [4, 4] -> [2, 2, 2, 2] by axis\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y', 'z'],\n kernel_shape=[5, 5],\n pads=[2, 2, 2, 2]\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[\n [13, 14, 15, 15, 15],\n [18, 19, 20, 20, 20],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25],\n [23, 24, 25, 25, 25]]]]).astype(np.float32)\nz = np.array([[[\n [12, 13, 14, 14, 14],\n [17, 18, 19, 19, 19],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24],\n [22, 23, 24, 24, 24]]]]).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y, z], name='test_maxpool_with_argmax_2d_precomputed_pads')", + "summary": "maxpool_with_argmax_2d_precomputed_pads" + }, + { + "code": "\"\"\"\ninput_shape: [1, 1, 5, 5]\noutput_shape: [1, 1, 2, 2]\n\"\"\"\nnode = onnx.helper.make_node(\n 'MaxPool',\n inputs=['x'],\n outputs=['y', 'z'],\n kernel_shape=[2, 2],\n strides=[2, 2],\n storage_order=1\n)\nx = np.array([[[\n [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25],\n]]]).astype(np.float32)\ny = np.array([[[[7, 9],\n [17, 19]]]]).astype(np.float32)\nz = np.array([[[[6, 16],\n [8, 18]]]]).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y, z], name='test_maxpool_with_argmax_2d_precomputed_strides')", + "summary": "maxpool_with_argmax_2d_precomputed_strides" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor from average or max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Floor value of the dimension is used", + "name": "Y", + "type": "T" + }, + { + "description": "Indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. The values in indices of are the indices of the selected values during pooling. The indices are computed as flatten 1-D tensor, and the indices do not consider padding. So the values in indices are in [0, N x C x D1 x ... x Dn).", + "name": "Indices", + "option": "optional", + "type": "I" + } + ], + "outputs_range": "1 - 2", + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain input and output types to float and 8 bit tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain index tensor to int64", + "type_param_str": "I" + } + ] + } + }, + { + "name": "MaxRoiPool", + "schema": { + "attributes": [ + { + "description": "ROI pool output shape (height, width).", + "name": "pooled_shape", + "required": true, + "type": "int64[]" + }, + { + "default": 1.0, + "description": "Multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling.", + "name": "spatial_scale", + "required": false, + "type": "float32" + } + ], + "category": "Pool", + "description": "ROI max pool consumes an input tensor X and region of interests (RoIs) to\n apply max pooling across each RoI, to produce output 4-D tensor of shape\n (num_rois, channels, pooled_shape[0], pooled_shape[1]).", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.", + "name": "X", + "type": "T" + }, + { + "description": "RoIs (Regions of Interest) to pool over. Should be a 2-D tensor of shape (num_rois, 5) given as [[batch_id, x1, y1, x2, y2], ...].", + "name": "rois", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "RoI pooled output 4-D tensor of shape (num_rois, channels, pooled_shape[0], pooled_shape[1]).", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "MaxUnpool", + "schema": { + "attributes": [ + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "description": "MaxUnpool essentially computes the partial inverse of the MaxPool op.\n The input information to this op is typically the the output information from a MaxPool op. The first\n input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output)\n from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding\n to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op.\n The third (optional) input is a tensor that specifies the output size of the unpooling operation.\n\nMaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal\n values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling\n the result of an unpooling operation should give back the original input to the unpooling op.\n\nMaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.\n The third input argument, output_size, is meant to disambiguate the op and produce output tensor of\n known/predictable size.\n\nIn addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,\n which define the exact unpooling op. The attributes typically have the same values as the corrsponding\n pooling op that the unpooling op is trying to invert.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'MaxUnpool',\n inputs=['xT', 'xI', 'output_shape'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nxT = np.array([[[[5, 6],\n [7, 8]]]], dtype=np.float32)\nxI = np.array([[[[5, 7],\n [13, 15]]]], dtype=np.int64)\noutput_shape = np.array((1, 1, 5, 5), dtype=np.int64)\ny = np.array([[[[0, 0, 0, 0, 0],\n [0, 5, 0, 6, 0],\n [0, 0, 0, 0, 0],\n [0, 7, 0, 8, 0],\n [0, 0, 0, 0, 0]]]], dtype=np.float32)\nexpect(node, inputs=[xT, xI, output_shape], outputs=[y], name='test_maxunpool_export_with_output_shape')", + "summary": "with_output_shape" + }, + { + "code": "node = onnx.helper.make_node(\n 'MaxUnpool',\n inputs=['xT', 'xI'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nxT = np.array([[[[1, 2],\n [3, 4]]]], dtype=np.float32)\nxI = np.array([[[[5, 7],\n [13, 15]]]], dtype=np.int64)\ny = np.array([[[[0, 0, 0, 0],\n [0, 1, 0, 2],\n [0, 0, 0, 0],\n [0, 3, 0, 4]]]], dtype=np.float32)\nexpect(node, inputs=[xT, xI], outputs=[y], name='test_maxunpool_export_without_output_shape')", + "summary": "without_output_shape" + } + ], + "inputs": [ + { + "description": "Input data tensor that has to be unpooled. This tensor is typically the first output of the MaxPool op.Dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non-image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T1" + }, + { + "description": "Input data tensor containing the indices corresponding to elements in the first input tensor X.This tensor is typically the second output of the MaxPool op.Dimensions must be the same as input tensor X. The indices are linear, i.e. computed considering the tensor as flattened 1-D tensor, assuming row-major storage. Also, the linear indices should not consider padding. So the values in indices are in the range [0, N x C x D1 x ... x Dn).", + "name": "I", + "type": "T2" + }, + { + "description": "The shape of the output can be explicitly set which will cause pads values to be auto generated. If 'output_shape' is specified, 'pads' values are ignored.", + "name": "output_shape", + "option": "optional", + "type": "T2" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor that contains the result of the unpooling.", + "name": "output", + "type": "T1" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain index tensor to int64", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "MaxUnpool", + "schema": { + "attributes": [ + { + "description": "The size of the kernel along each axis.", + "name": "kernel_shape", + "required": true, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "description": "MaxUnpool essentially computes the partial inverse of the MaxPool op.\n The input information to this op is typically the the output information from a MaxPool op. The first\n input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output)\n from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding\n to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op.\n The third (optional) input is a tensor that specifies the output size of the unpooling operation.\n\nMaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal\n values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling\n the result of an unpooling operation should give back the original input to the unpooling op.\n\nMaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.\n The third input argument, output_size, is meant to disambiguate the op and produce output tensor of\n known/predictable size.\n\nIn addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,\n which define the exact unpooling op. The attributes typically have the same values as the corrsponding\n pooling op that the unpooling op is trying to invert.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'MaxUnpool',\n inputs=['xT', 'xI', 'output_shape'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nxT = np.array([[[[5, 6],\n [7, 8]]]], dtype=np.float32)\nxI = np.array([[[[5, 7],\n [13, 15]]]], dtype=np.int64)\noutput_shape = np.array((1, 1, 5, 5), dtype=np.int64)\ny = np.array([[[[0, 0, 0, 0, 0],\n [0, 5, 0, 6, 0],\n [0, 0, 0, 0, 0],\n [0, 7, 0, 8, 0],\n [0, 0, 0, 0, 0]]]], dtype=np.float32)\nexpect(node, inputs=[xT, xI, output_shape], outputs=[y], name='test_maxunpool_export_with_output_shape')", + "summary": "with_output_shape" + }, + { + "code": "node = onnx.helper.make_node(\n 'MaxUnpool',\n inputs=['xT', 'xI'],\n outputs=['y'],\n kernel_shape=[2, 2],\n strides=[2, 2]\n)\nxT = np.array([[[[1, 2],\n [3, 4]]]], dtype=np.float32)\nxI = np.array([[[[5, 7],\n [13, 15]]]], dtype=np.int64)\ny = np.array([[[[0, 0, 0, 0],\n [0, 1, 0, 2],\n [0, 0, 0, 0],\n [0, 3, 0, 4]]]], dtype=np.float32)\nexpect(node, inputs=[xT, xI], outputs=[y], name='test_maxunpool_export_without_output_shape')", + "summary": "without_output_shape" + } + ], + "inputs": [ + { + "description": "Input data tensor that has to be unpooled. This tensor is typically the first output of the MaxPool op.Dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non-image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. Optionally, if dimension denotation is in effect, the operation expects the input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "X", + "type": "T1" + }, + { + "description": "Input data tensor containing the indices corresponding to elements in the first input tensor X.This tensor is typically the second output of the MaxPool op.Dimensions must be the same as input tensor X. The indices are linear, i.e. computed considering the tensor as flattened 1-D tensor, assuming row-major storage. Also, the linear indices should not consider padding. So the values in indices are in the range [0, N x C x D1 x ... x Dn).", + "name": "I", + "type": "T2" + }, + { + "description": "The shape of the output can be explicitly set which will cause pads values to be auto generated. If 'output_shape' is specified, 'pads' values are ignored.", + "name": "output_shape", + "option": "optional", + "type": "T2" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor that contains the result of the unpooling.", + "name": "output", + "type": "T1" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain index tensor to int64", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "Mean", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Element-wise mean of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([2, 3, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_mean_example')\n\nnode = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_mean_one_input')\n\nresult = np.divide(np.add(data_0, data_1), 2.)\nnode = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_mean_two_inputs')", + "summary": "mean" + } + ], + "inputs": [ + { + "description": "List of tensors for Mean.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "mean", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Mean", + "schema": { + "description": "Element-wise mean of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([2, 3, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_mean_example')\n\nnode = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_mean_one_input')\n\nresult = np.divide(np.add(data_0, data_1), 2.)\nnode = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_mean_two_inputs')", + "summary": "mean" + } + ], + "inputs": [ + { + "description": "List of tensors for Mean.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "mean", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Mean", + "schema": { + "description": "Element-wise mean of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([2, 3, 4]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_mean_example')\n\nnode = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_mean_one_input')\n\nresult = np.divide(np.add(data_0, data_1), 2.)\nnode = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_mean_two_inputs')", + "summary": "mean" + } + ], + "inputs": [ + { + "description": "List of tensors for mean.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor.", + "name": "mean", + "type": "T" + } + ], + "since_version": 8, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "MeanVarianceNormalization", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to caculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance.", + "name": "axes", + "required": false, + "type": "int64[]" + } + ], + "description": "A MeanVarianceNormalization Function: Perform mean variance normalization\n on the input tensor X using formula:
    ``` (X-EX)/sqrt(E(X-EX)^2) ```\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'MeanVarianceNormalization',\n inputs=['X'],\n outputs=['Y']\n)\n\ninput_data = np.array([[[[0.8439683], [0.5665144], [0.05836735]],\n [[0.02916367], [0.12964272], [0.5060197]],\n [[0.79538304], [0.9411346], [0.9546573]]],\n [[[0.17730942], [0.46192095], [0.26480448]],\n [[0.6746842], [0.01665257], [0.62473077]],\n [[0.9240844], [0.9722341], [0.11965699]]],\n [[[0.41356155], [0.9129373], [0.59330076]],\n [[0.81929934], [0.7862604], [0.11799799]],\n [[0.69248444], [0.54119414], [0.07513223]]]], dtype=np.float32)\n\n# Calculate expected output data\ndata_mean = np.mean(input_data, axis=(0, 2, 3), keepdims=1)\ndata_mean_squared = np.power(data_mean, 2)\ndata_squared = np.power(input_data, 2)\ndata_squared_mean = np.mean(data_squared, axis=(0, 2, 3), keepdims=1)\nstd = np.sqrt(data_squared_mean - data_mean_squared)\nexpected_output = (input_data - data_mean) / (std + 1e-9)\n\nexpect(node, inputs=[input_data], outputs=[expected_output],\n name='test_mvn')", + "summary": "meanvariancenormalization" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Min", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Element-wise min of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 0]).astype(np.float32)\nresult = np.array([1, 2, 0]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_min_example')\n\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_min_one_input')\n\nresult = np.minimum(data_0, data_1)\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_min_two_inputs')", + "summary": "min" + }, + { + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([1, 2, 1]).astype(op_dtype)\n node = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_min_{0}'.format(np.dtype(op_dtype).name))", + "summary": "min_all_numeric_types" + } + ], + "inputs": [ + { + "description": "List of tensors for Min", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "min", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Min", + "schema": { + "description": "Element-wise min of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 0]).astype(np.float32)\nresult = np.array([1, 2, 0]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_min_example')\n\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_min_one_input')\n\nresult = np.minimum(data_0, data_1)\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_min_two_inputs')", + "summary": "min" + }, + { + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([1, 2, 1]).astype(op_dtype)\n node = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_min_{0}'.format(np.dtype(op_dtype).name))", + "summary": "min_all_numeric_types" + } + ], + "inputs": [ + { + "description": "List of tensors for Min", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "min", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Min", + "schema": { + "description": "Element-wise min of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 0]).astype(np.float32)\nresult = np.array([1, 2, 0]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_min_example')\n\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_min_one_input')\n\nresult = np.minimum(data_0, data_1)\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_min_two_inputs')", + "summary": "min" + }, + { + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([1, 2, 1]).astype(op_dtype)\n node = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_min_{0}'.format(np.dtype(op_dtype).name))", + "summary": "min_all_numeric_types" + } + ], + "inputs": [ + { + "description": "List of tensors for min.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor.", + "name": "min", + "type": "T" + } + ], + "since_version": 8, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Min", + "schema": { + "description": "Element-wise min of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 2, 1]).astype(np.float32)\ndata_1 = np.array([1, 4, 4]).astype(np.float32)\ndata_2 = np.array([2, 5, 0]).astype(np.float32)\nresult = np.array([1, 2, 0]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_min_example')\n\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_min_one_input')\n\nresult = np.minimum(data_0, data_1)\nnode = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_min_two_inputs')", + "summary": "min" + }, + { + "code": "for op_dtype in all_numeric_dtypes:\n data_0 = np.array([3, 2, 1]).astype(op_dtype)\n data_1 = np.array([1, 4, 4]).astype(op_dtype)\n result = np.array([1, 2, 1]).astype(op_dtype)\n node = onnx.helper.make_node(\n 'Min',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_min_{0}'.format(np.dtype(op_dtype).name))", + "summary": "min_all_numeric_types" + } + ], + "inputs": [ + { + "description": "List of tensors for min.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor.", + "name": "min", + "type": "T" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Mod", + "schema": { + "attributes": [ + { + "description": "Whether the operator should behave like fmod (default=0 meaning it will do integer mods); Set this to 1 to force fmod treatment", + "name": "fmod", + "required": false, + "type": "int64" + } + ], + "description": "Performs element-wise binary modulus (with Numpy-style broadcasting support). \n The sign of the remainder is the same as that of the Divisor.\n \n Mod operator can also behave like C fmod() or numpy.fmod. In this case, the sign of the remainder however, will be the same as the Dividend \n (in contrast to integer mod). To force a behavior like numpy.fmod() an 'fmod' Attribute is provided.\n This attribute is set to 0 by default causing the behavior to be like integer mod. \n Setting this attribute to 1 causes the remainder to be calculated similar to that of numpy.fmod().\n\n If the input type is floating point, then `fmod` attribute must be set to 1.\n \n In case of dividend being zero, the results will be platform dependent.\n\n This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.arange(0, 30).reshape([3, 2, 5])\ny = np.array([7])\nz = np.mod(x, y)\nz\n# array([[[0, 1, 2, 3, 4],\n# [5, 6, 0, 1, 2]],\n\n# [[3, 4, 5, 6, 0],\n# [1, 2, 3, 4, 5]],\n\n# [[6, 0, 1, 2, 3],\n# [4, 5, 6, 0, 1]]], dtype=int32)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_broadcast')", + "summary": "mod_broadcast" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n fmod=1\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int64)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int64)\nz = np.fmod(x, y) # expected output [ 0, 1, 5, 0, -1, 3]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_int64_fmod')", + "summary": "mod_int64_fmod" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n fmod=1\n)\n\nx = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float16)\ny = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float16)\nz = np.fmod(x, y) # expected output [-0.10156, 0.3984 , 5. , 0.10156, -0.3984 , 3.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_mixed_sign_float16')", + "summary": "mod_mixed_sign_float16" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n fmod=1\n)\n\nx = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float32)\ny = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float32)\nz = np.fmod(x, y) # expected output [-0.10000038, 0.39999962, 5. , 0.10000038, -0.39999962, 3.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_mixed_sign_float32')", + "summary": "mod_mixed_sign_float32" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n fmod=1\n)\n\nx = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float64)\ny = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float64)\nz = np.fmod(x, y) # expected output [-0.1, 0.4, 5. , 0.1, -0.4, 3.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_mixed_sign_float64')", + "summary": "mod_mixed_sign_float64" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int16)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int16)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_mixed_sign_int16')", + "summary": "mod_mixed_sign_int16" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int32)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int32)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_mixed_sign_int32')", + "summary": "mod_mixed_sign_int32" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int64)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int64)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_mixed_sign_int64')", + "summary": "mod_mixed_sign_int64" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int8)\ny = np.array([2, -3, 8, -2, 3, 5]).astype(np.int8)\nz = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_mixed_sign_int8')", + "summary": "mod_mixed_sign_int8" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint16)\ny = np.array([2, 3, 8]).astype(np.uint16)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_uint16')", + "summary": "mod_uint16" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint32)\ny = np.array([2, 3, 8]).astype(np.uint32)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_uint32')", + "summary": "mod_uint32" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint64)\ny = np.array([2, 3, 8]).astype(np.uint64)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_uint64')", + "summary": "mod_uint64" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mod',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([4, 7, 5]).astype(np.uint8)\ny = np.array([2, 3, 8]).astype(np.uint8)\nz = np.mod(x, y) # expected output [0, 1, 5]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mod_uint8')", + "summary": "mod_uint8" + } + ], + "inputs": [ + { + "description": "Dividend tensor", + "name": "A", + "type": "T" + }, + { + "description": "Divisor tensor", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Remainder tensor", + "name": "C", + "type": "T" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Momentum", + "schema": { + "attributes": [ + { + "description": "The decay factor of momentum. It should be a scalar.", + "name": "alpha", + "required": true, + "type": "float32" + }, + { + "description": "The coefficient of gradient in computing new momentum. It should be a scalar.", + "name": "beta", + "required": true, + "type": "float32" + }, + { + "description": "Its value should be either \"nesterov\" or \"standard\". The value \"nesterov\" leads to the use of Nesterov's momentum while \"standard\" invokes stochastic gradient method using standard momentum", + "name": "mode", + "required": true, + "type": "string" + }, + { + "description": "Coefficient of 0.5 * norm_coefficient * ||X||^2.", + "name": "norm_coefficient", + "required": true, + "type": "float32" + } + ], + "description": "Compute one iteration of stochastic gradient update with momentum.\n This operator can conduct the optimization of multiple tensor variables.\n\n Let's define the behavior of this operator. As you can imagine, SG with momentum requires\n several parameters:\n \n - The learning-rate \"R\".\n - The update count \"T\". That is, the number of conducted training iterations. It should\n be zero in the first training iteration.\n - A L2-norm regularization coefficient \"norm_coefficient\".\n - A decay coefficient of previous accumulated gradient (i.e., momentum) \"alpha\".\n - The scaling coefficient of current gradient \"beta\".\n - An attribute to choose either standard momentum or Nesterov's momentum \"mode\" should\n be used.\n\n For the sake of simplicity, assume that there is only one tensor (called \"X\") to be optimized.\n Other necessary inputs are \"X\"'s gradient (called \"G\") and \"X\"'s momentum (called \"V\"). This\n Momentum operator maps all these inputs to the new value of \"X\" (called \"X_new\") and its new\n momentum (called \"V_new\").\n \n This operator supports two different momentum algorithms. Set the attribute \"mode\" to\n \"nesterov\" if Nesterov's momentum is desired. Otherwise, set the attribute \"model\" to\n \"standard\" to use standard momentum. Computation details are described subsequently.\n\n Let \"+\", \"-\", \"*\", and \"/\" are all element-wise operations with numpy-style broadcasting.\n\n Pseudo code for SG with standard momentum:\n\n // Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared\n // values of all elements in X.\n G_regularized = norm_coefficient * X + G\n\n // In the first training iteration, beta should always be 1.\n beta_adjusted = T > 0 ? beta : 1\n\n // Compute the current momentum based on previous momentum and the current gradient.\n V_new = alpha * V + beta_adjusted * G_regularized\n\n // Update X.\n X_new = X - R * V_new\n\n Pseudo code for SG with Nesterov's momentum:\n\n // Add gradient of 0.5 * norm_coefficient * ||X||^2, where ||X|| is the sum of squared\n // values of all elements in X.\n G_regularized = norm_coefficient * X + G;\n\n // In the first training iteration, beta should always be 1.\n beta_adjusted = T > 0 ? beta : 1\n\n // Compute the current momentum based on previous momentum and the current gradient.\n V_new = alpha * V + beta_adjusted * G_regularized;\n\n // Compute final update direction and then update X.\n X_new = X - R * (G_regularized + alpha * V_new)\n\n If one assign this operators to optimize multiple inputs, for example, \"X_1\" and \"X_2\". The same\n pseudo code would be extended to handle all tensors jointly. More specifically, we can view \"X\" as a\n concatenation of \"X_1\" and \"X_2\" (of course, their gradient and accumulate gradient should\n be concatenated too) and then our pseudo code becomes applicable.\n", + "domain": "ai.onnx.preview.training", + "examples": [ + { + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nalpha = 0.95\nbeta = 0.1\n\n# Create operator.\nnode = onnx.helper.make_node('Momentum',\n inputs=['R', 'T', 'X', 'G', 'V'],\n outputs=['X_new', 'V_new'],\n norm_coefficient=norm_coefficient,\n alpha=alpha,\n beta=beta,\n mode='standard',\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN\n )\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\nx = np.array([1.2, 2.8], dtype=np.float32)\ng = np.array([-0.94, -2.5], dtype=np.float32)\nv = np.array([1.7, 3.6], dtype=np.float32)\n\n# Compute expected outputs of Momentum.\nx_new, v_new = apply_momentum(r, t, x, g, v,\n norm_coefficient, alpha, beta)\n\n# Check results.\nexpect(node, inputs=[r, t, x, g, v],\n outputs=[x_new, v_new], name='test_momentum',\n opset_imports=[onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])", + "summary": "momentum" + }, + { + "code": "# Define operator attributes.\nnorm_coefficient = 0.001\nalpha = 0.95\nbeta = 0.85\n\nnode = onnx.helper.make_node('Momentum',\n inputs=['R', 'T', 'X1', 'X2',\n 'G1', 'G2', 'H1', 'H2'],\n outputs=['X1_new', 'X2_new',\n 'V1_new', 'V2_new'],\n norm_coefficient=norm_coefficient,\n alpha=alpha,\n beta=beta,\n mode='standard',\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN\n )\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\n\nx1 = np.array([1.0], dtype=np.float32)\ng1 = np.array([-1.0], dtype=np.float32)\nv1 = np.array([2.0], dtype=np.float32)\n\nx2 = np.array([1.0, 2.0], dtype=np.float32)\ng2 = np.array([-1.0, -3.0], dtype=np.float32)\nv2 = np.array([4.0, 1.0], dtype=np.float32)\n\n# Compute expected outputs of Momentum.\nx1_new, v1_new = apply_momentum(r, t, x1, g1, v1,\n norm_coefficient, alpha, beta)\nx2_new, v2_new = apply_momentum(r, t, x2, g2, v2,\n norm_coefficient, alpha, beta)\n\n# Check results.\nexpect(node, inputs=[r, t, x1, x2, g1, g2, v1, v2],\n outputs=[x1_new, x2_new, v1_new, v2_new], name='test_momentum_multiple',\n opset_imports=[onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])", + "summary": "momentum_multiple" + }, + { + "code": "# Define operator attributes.\nnorm_coefficient = 0.01\nalpha = 0.95\nbeta = 1.0\n\n# Create operator.\nnode = onnx.helper.make_node('Momentum',\n inputs=['R', 'T', 'X', 'G', 'V'],\n outputs=['X_new', 'V_new'],\n norm_coefficient=norm_coefficient,\n alpha=alpha,\n beta=beta,\n mode='nesterov',\n domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN\n )\n\n# Define operator inputs.\nr = np.array(0.1, dtype=np.float32) # scalar\nt = np.array(0, dtype=np.int64) # scalar\nx = np.array([1.2, 2.8], dtype=np.float32)\ng = np.array([-0.94, -2.5], dtype=np.float32)\nv = np.array([1.7, 3.6], dtype=np.float32)\n\n# Compute expected outputs of Momentum.\nx_new, v_new = apply_nesterov(r, t, x, g, v,\n norm_coefficient, alpha, beta)\n\n# Check results.\nexpect(node, inputs=[r, t, x, g, v],\n outputs=[x_new, v_new], name='test_nesterov_momentum',\n opset_imports=[onnx.helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])", + "summary": "nesterov_momentum" + } + ], + "inputs": [ + { + "description": "The learning rate.", + "name": "R", + "type": "T1" + }, + { + "description": "Update count of \"X\". It should be a scalar.", + "name": "T", + "type": "T2" + }, + { + "description": "It sequentially contains the current values of optimized tensors, then their gradient tensors, and finally their momentum tensors. For example, if two tensors \"X_1\" and \"X_2\" are optimized, The expected input list would be [\"X_1\", \"X_2\", gradient of \"X_1\", gradient of \"X_2\", momentum of \"X_1\", momentum of \"X_2\"].", + "name": "inputs", + "option": "variadic", + "type": "T3" + } + ], + "inputs_range": "3 - ∞", + "max_input": 2147483647, + "max_output": 2147483647, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "It sequentially contains the new values of optimized tensors and then the new values of their momentum tensors. For example, if two tensors \"X_1\" and \"X_2\" are optimized, the output list would be [new value of \"X_1,\" new value of \"X_2\" new momentum of \"X_1\", new momentum of \"X_2\"].", + "name": "outputs", + "option": "variadic", + "type": "T3" + } + ], + "outputs_range": "1 - ∞", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input types to float scalars.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain input types to 64-bit integer scalars.", + "type_param_str": "T2" + }, + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input types to float tensors.", + "type_param_str": "T3" + } + ] + } + }, + { + "name": "Mul", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions. See doc for details.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + }, + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Performs element-wise binary multiplication (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Mul',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = x * y # expected output [4., 10., 18.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mul_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mul')", + "summary": "mul" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mul',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mul_bcast')", + "summary": "mul_broadcast" + } + ], + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Mul", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions. See doc for details.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + } + ], + "description": "Performs element-wise binary multiplication (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Mul',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = x * y # expected output [4., 10., 18.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mul_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mul')", + "summary": "mul" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mul',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mul_bcast')", + "summary": "mul_broadcast" + } + ], + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Mul", + "schema": { + "description": "Performs element-wise binary multiplication (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Mul',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = x * y # expected output [4., 10., 18.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mul_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mul')", + "summary": "mul" + }, + { + "code": "node = onnx.helper.make_node(\n 'Mul',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x * y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_mul_bcast')", + "summary": "mul_broadcast" + } + ], + "inputs": [ + { + "description": "First operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same element type as two inputs", + "name": "C", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Multinomial", + "schema": { + "attributes": [ + { + "default": 6, + "description": "(Optional) The data type for the elements of the output tensor, if not specified, we will use int32.", + "name": "dtype", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "Number of times to sample.", + "name": "sample_size", + "required": false, + "type": "int64" + }, + { + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one.", + "name": "seed", + "required": false, + "type": "float32" + } + ], + "description": "Generate a tensor of samples from a multinomial distribution according to the probabilities\nof each of the possible outcomes.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input tensor with shape [batch_size, class_size], where class_size is the number of all possible outcomes. Each value along the axis zero represents the unnormalized log-probability of each corresponding outcome in a batch.", + "name": "input", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor with shape [batch_size, sample_size], where sample_size is the number of times to sample. Each value along the axis zero represents the outcome of the corresponding sample in a batch.", + "name": "output", + "type": "T2" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input types to float tensors.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain output types to integral tensors.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "Neg", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Neg takes one input data (Tensor) and produces one output data\n(Tensor) where each element flipped sign, y = -x, is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Neg',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-4, 2]).astype(np.float32)\ny = np.negative(x) # expected output [4., -2.],\nexpect(node, inputs=[x], outputs=[y],\n name='test_neg_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.negative(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_neg')", + "summary": "neg" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Neg", + "schema": { + "description": "Neg takes one input data (Tensor) and produces one output data\n(Tensor) where each element flipped sign, y = -x, is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Neg',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-4, 2]).astype(np.float32)\ny = np.negative(x) # expected output [4., -2.],\nexpect(node, inputs=[x], outputs=[y],\n name='test_neg_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.negative(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_neg')", + "summary": "neg" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(int32)", + "tensor(int8)", + "tensor(int16)", + "tensor(int64)", + "tensor(float16)", + "tensor(double)" + ], + "description": "Constrain input and output types to signed numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "NegativeLogLikelihoodLoss", + "schema": { + "attributes": [ + { + "description": "Specifies a target value that is ignored and does not contribute to the input gradient. It's an optional value.", + "name": "ignore_index", + "required": false, + "type": "int64" + }, + { + "default": "mean", + "description": "Type of reduction to apply to loss: none, sum, mean (default). 'none': the output is the loss for each sample. 'sum': the output will be summed. 'mean': the sum of the output will be divided by the sum of applied weights.", + "name": "reduction", + "required": false, + "type": "string" + } + ], + "description": "A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss.\nIts \"input\" tensor has the shape of (N, C, d1, d2, ..., dk) where k >= 0.\nThe \"input\" tensor contains log-probabilities for input[n, :, d_1, d_2,..., d_k] being in a class of [0, C).\nThe operator's \"target\" input tensor has the shape of (N, d1, d2, ..., dk). It encodes class labels (one of C classes)\nor it may contain a special value (indicated by an attribute ignore_index) for N x d1 x d2 x ... x dk samples.\nThe loss value for input[n, :, d_1, d_2,...d_k] being classified as class c = target[n][d_1][d_2]...[d_k] is computed as:\n\n loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k].\n\nWhen an optional \"weight\" is provided, the sample loss is calculated as:\n\n loss[n][d_1][d_2]...[d_k] = -input[n][c][d_1][d_2]...[d_k] * weight[c].\n\nloss is zero for the case when target-value equals ignore_index.\n \n loss[n][d_1][d_2]...[d_k] = 0, when target[n][d_1][d_2]...[d_k] = ignore_index\n\nIf \"reduction\" attribute is set to \"none\", the operator's output will be the above loss with shape (N, d1, d2, ..., dk).\nIf \"reduction\" attribute is set to \"mean\" (the default attribute value), the output loss is (weight) averaged:\n\n mean(loss), if \"weight\" is not provided,\n\nor if weight is provided,\n\n sum(loss) / sum(weight[target[n][d_1][d_2]...[d_k]]]), for all samples.\n\nIf \"reduction\" attribute is set to \"sum\", the output is a scalar:\n sum(loss).\n\nSee also https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss.\n\nExample 1:\n\n // negative log likelihood loss, \"none\" reduction\n N, C, d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target = [[2, 1], [0, 2]]\n\n loss = np.zeros((N, d1))\n for n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1]\n\n // print(loss)\n // [[-3. -2.]\n // [-0. -2.]]\n\nExample 2:\n\n // weighted negative log likelihood loss, sum reduction\n N, C, d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target = [[2, 1], [0, 2]]\n weight = [0.2, 0.3, 0.1]\n loss = np.zeros((N, d1))\n for n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n\n loss = np.sum(loss)\n // print(loss)\n // -1.1\n\nExample 3:\n\n // weighted negative log likelihood loss, mean reduction\n N, C, d1 = 2, 3, 2\n input = [[[1.0, 2.0], [2.0, 2.0], [3.0, 2.0]],\n [[0.0, 1.0], [2.0, 2.0], [1.0, 2]]]\n target = [[2, 1], [0, 2]]\n weight = [0.2, 0.3, 0.1]\n loss = np.zeros((N, d1))\n weight_total = 0\n for n in range(N):\n for d_1 in range(d1):\n c = target[n][d_1]\n loss[n][d_1] = -input[n][c][d_1] * weight[c]\n weight_total = weight_total + weight[c]\n\n loss = np.sum(loss) / weight_total\n // print(loss)\n // -1.57\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "reduction = 'none'\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target'],\n outputs=['loss'],\n reduction=reduction\n)\n\nN, C = 3, 5\nnp.random.seed(0)\ninput = np.random.rand(N, C).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, ))\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=None, reduction=reduction)\n\nexpect(node, inputs=[input, target], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NC')", + "summary": "input_shape_is_NC" + }, + { + "code": "reduction = 'mean'\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target'],\n outputs=['loss'],\n reduction=reduction\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1))\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=None, reduction=reduction)\n\nexpect(node, inputs=[input, target], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1')", + "summary": "input_shape_is_NCd1" + }, + { + "code": "reduction = 'mean'\nignore_index = np.int64(1)\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target'],\n outputs=['loss'],\n reduction=reduction,\n ignore_index=ignore_index\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1))\ntarget[0][0] = np.int64(1)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=None, reduction=reduction, ignore_index=ignore_index)\n\nexpect(node, inputs=[input, target], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1_ignore_index')", + "summary": "input_shape_is_NCd1_ignore_index" + }, + { + "code": "reduction = 'mean'\nignore_index = np.int64(-1)\n\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target', 'weight'],\n outputs=['loss'],\n reduction=reduction,\n ignore_index=ignore_index)\n\nN, C, dim1 = 3, 5, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1))\ntarget[0][0] = -1\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input,\n target,\n weight=weight,\n reduction=reduction,\n ignore_index=ignore_index)\n\nexpect(node, inputs=[input, target, weight], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1_mean_weight_negative_ignore_index')", + "summary": "input_shape_is_NCd1_mean_weight_negative_ignore_index" + }, + { + "code": "reduction = 'mean'\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target', 'weight'],\n outputs=['loss'],\n reduction=reduction\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1))\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=weight, reduction=reduction)\n\nexpect(node, inputs=[input, target, weight], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1_weight')", + "summary": "input_shape_is_NCd1_weight" + }, + { + "code": "reduction = 'mean'\nignore_index = np.int64(1)\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target', 'weight'],\n outputs=['loss'],\n reduction=reduction,\n ignore_index=ignore_index\n)\n\nN, C, d1 = 3, 5, 2\nnp.random.seed(0)\ninput = np.random.rand(N, C, d1).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, d1))\ntarget[0][0] = np.int64(1)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=weight, reduction=reduction, ignore_index=ignore_index)\n\nexpect(node, inputs=[input, target, weight], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_iinput_shape_is_NCd1_weight_ignore_index')", + "summary": "input_shape_is_NCd1_weight_ignore_index" + }, + { + "code": "reduction = 'none'\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target'],\n outputs=['loss'],\n reduction=reduction\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2))\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=None, reduction=reduction)\n\nexpect(node, inputs=[input, target], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2')", + "summary": "input_shape_is_NCd1d2" + }, + { + "code": "reduction = 'mean'\nignore_index = np.int64(1)\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target'],\n outputs=['loss'],\n reduction=reduction,\n ignore_index=ignore_index\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2))\ntarget[0][0][0] = np.int64(1)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, reduction=reduction, ignore_index=ignore_index)\n\nexpect(node, inputs=[input, target], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2_no_weight_reduction_mean_ignore_index')", + "summary": "input_shape_is_NCd1d2_no_weight_reduction_mean_ignore_index" + }, + { + "code": "reduction = 'mean'\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target'],\n outputs=['loss'],\n reduction=reduction\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2))\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=None, reduction=reduction)\n\nexpect(node, inputs=[input, target], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2_reduction_mean')", + "summary": "input_shape_is_NCd1d2_reduction_mean" + }, + { + "code": "reduction = 'sum'\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target'],\n outputs=['loss'],\n reduction=reduction\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2))\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=None, reduction=reduction)\n\nexpect(node, inputs=[input, target], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2_reduction_sum')", + "summary": "input_shape_is_NCd1d2_reduction_sum" + }, + { + "code": "reduction = 'none'\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target', 'weight'],\n outputs=['loss'],\n reduction=reduction\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2))\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=weight, reduction=reduction)\n\nexpect(node, inputs=[input, target, weight], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight')", + "summary": "input_shape_is_NCd1d2_with_weight" + }, + { + "code": "reduction = 'mean'\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target', 'weight'],\n outputs=['loss'],\n reduction=reduction\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2))\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=weight, reduction=reduction)\n\nexpect(node, inputs=[input, target, weight], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_mean')", + "summary": "input_shape_is_NCd1d2_with_weight_reduction_mean" + }, + { + "code": "reduction = 'sum'\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target', 'weight'],\n outputs=['loss'],\n reduction=reduction\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2))\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=weight, reduction=reduction)\n\nexpect(node, inputs=[input, target, weight], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_sum')", + "summary": "input_shape_is_NCd1d2_with_weight_reduction_sum" + }, + { + "code": "reduction = 'sum'\nignore_index = np.int64(0)\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target', 'weight'],\n outputs=['loss'],\n reduction=reduction,\n ignore_index=ignore_index\n)\n\nN, C, dim1, dim2 = 3, 5, 6, 6\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2))\ntarget[0][0][0] = np.int64(0)\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input, target, weight=weight, reduction=reduction, ignore_index=ignore_index)\n\nexpect(node, inputs=[input, target, weight], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_sum_ignore_index')", + "summary": "input_shape_is_NCd1d2_with_weight_reduction_sum_ignore_index" + }, + { + "code": "reduction = 'none'\nignore_index = np.int64(-5)\n\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target'],\n outputs=['loss'],\n reduction=reduction,\n ignore_index=ignore_index)\n\nN, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3))\ntarget[0][0][0][0] = -5\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input,\n target,\n reduction=reduction,\n ignore_index=ignore_index)\n\nexpect(node, inputs=[input, target], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index')", + "summary": "input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index" + }, + { + "code": "reduction = 'sum'\nignore_index = np.int64(10)\n\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target', 'weight'],\n outputs=['loss'],\n reduction=reduction,\n ignore_index=ignore_index)\n\nN, C = 3, 5\nnp.random.seed(0)\ninput = np.random.rand(N, C).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N))\ntarget[0] = 10\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input,\n target,\n weight=weight,\n reduction=reduction,\n ignore_index=ignore_index)\n\nexpect(node, inputs=[input, target, weight], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index')", + "summary": "input_shape_is_NCd1d2d3_sum_weight_high_ignore_index" + }, + { + "code": "reduction = 'mean'\n\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target', 'weight'],\n outputs=['loss'],\n reduction=reduction)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5))\nweight = np.random.rand(C).astype(np.float32)\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input,\n target,\n weight=weight,\n reduction=reduction)\n\nexpect(node, inputs=[input, target, weight], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3d4d5_mean_weight')", + "summary": "input_shape_is_NCd1d2d3d4d5_mean_weight" + }, + { + "code": "reduction = 'none'\n\nnode = onnx.helper.make_node(\n 'NegativeLogLikelihoodLoss',\n inputs=['input', 'target'],\n outputs=['loss'],\n reduction=reduction)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\ninput = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\ntarget = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5))\n\nnegative_log_likelihood_loss = compute_negative_log_likelihood_loss(input,\n target,\n reduction=reduction)\n\nexpect(node, inputs=[input, target], outputs=[negative_log_likelihood_loss],\n name='test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3d4d5_none_no_weight')", + "summary": "input_shape_is_NCd1d2d3d4d5_none_no_weight" + } + ], + "inputs": [ + { + "description": "Input tensor of shape (N, C) or (N, C, d1, d2, ..., dk).", + "name": "input", + "type": "T" + }, + { + "description": "Target tensor of shape (N) or (N, d1, d2, ..., dk). Target element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the target values should either be in the range [0, C) or have the value ignore_index.", + "name": "target", + "type": "Tind" + }, + { + "description": "Optional rescaling weight tensor. If given, it has to be a tensor of size C. Otherwise, it is treated as if having all ones.", + "name": "weight", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "The negative log likelihood loss", + "name": "loss", + "type": "T" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input, weight, and output types to floating-point tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain target to integer types", + "type_param_str": "Tind" + } + ] + } + }, + { + "name": "NonMaxSuppression", + "schema": { + "attributes": [ + { + "description": "Integer indicate the format of the box data. The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box data is supplied as [x_center, y_center, width, height]. Mostly used for Pytorch models.", + "name": "center_point_box", + "required": false, + "type": "int64" + } + ], + "description": "Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes.\nBounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box.\nNote that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to\northogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system\nresult in the same boxes being selected by the algorithm.\nThe selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes.\nThe bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices'],\n center_point_box=1\n)\nboxes = np.array([[\n [0.5, 0.5, 1.0, 1.0],\n [0.5, 0.6, 1.0, 1.0],\n [0.5, 0.4, 1.0, 1.0],\n [0.5, 10.5, 1.0, 1.0],\n [0.5, 10.6, 1.0, 1.0],\n [0.5, 100.5, 1.0, 1.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_center_point_box_format')", + "summary": "nonmaxsuppression_center_point_box_format" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [1.0, 1.0, 0.0, 0.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, 0.9, 1.0, -0.1],\n [0.0, 10.0, 1.0, 11.0],\n [1.0, 10.1, 0.0, 11.1],\n [1.0, 101.0, 0.0, 100.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_flipped_coordinates')", + "summary": "nonmaxsuppression_flipped_coordinates" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_identical_boxes')", + "summary": "nonmaxsuppression_identical_boxes" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_limit_output_size')", + "summary": "nonmaxsuppression_limit_output_size" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_single_box')", + "summary": "nonmaxsuppression_single_box" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_suppress_by_IOU')", + "summary": "nonmaxsuppression_suppress_by_IOU" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.4]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_suppress_by_IOU_and_scores')", + "summary": "nonmaxsuppression_suppress_by_IOU_and_scores" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[[0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]],\n [[0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]],\n [[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_two_batches')", + "summary": "nonmaxsuppression_two_batches" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3],\n [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_two_classes')", + "summary": "nonmaxsuppression_two_classes" + } + ], + "inputs": [ + { + "description": "An input tensor with shape [num_batches, spatial_dimension, 4]. The single box data format is indicated by center_point_box.", + "name": "boxes", + "type": "tensor(float)" + }, + { + "description": "An input tensor with shape [num_batches, num_classes, spatial_dimension]", + "name": "scores", + "type": "tensor(float)" + }, + { + "description": "Integer representing the maximum number of boxes to be selected per batch per class. It is a scalar. Default to 0, which means no output.", + "name": "max_output_boxes_per_class", + "option": "optional", + "type": "tensor(int64)" + }, + { + "description": "Float representing the threshold for deciding whether boxes overlap too much with respect to IOU. It is scalar. Value range [0, 1]. Default to 0.", + "name": "iou_threshold", + "option": "optional", + "type": "tensor(float)" + }, + { + "description": "Float representing the threshold for deciding when to remove boxes based on score. It is a scalar.", + "name": "score_threshold", + "option": "optional", + "type": "tensor(float)" + } + ], + "inputs_range": "2 - 5", + "max_input": 5, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "selected indices from the boxes tensor. [num_selected_indices, 3], the selected index format is [batch_index, class_index, box_index].", + "name": "selected_indices", + "type": "tensor(int64)" + } + ], + "since_version": 10, + "support_level": "common" + } + }, + { + "name": "NonMaxSuppression", + "schema": { + "attributes": [ + { + "description": "Integer indicate the format of the box data. The default is 0. 0 - the box data is supplied as [y1, x1, y2, x2] where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Mostly used for TF models. 1 - the box data is supplied as [x_center, y_center, width, height]. Mostly used for Pytorch models.", + "name": "center_point_box", + "required": false, + "type": "int64" + } + ], + "description": "Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes.\nBounding boxes with score less than score_threshold are removed. Bounding box format is indicated by attribute center_point_box.\nNote that this algorithm is agnostic to where the origin is in the coordinate system and more generally is invariant to\northogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system\nresult in the same boxes being selected by the algorithm.\nThe selected_indices output is a set of integers indexing into the input collection of bounding boxes representing the selected boxes.\nThe bounding box coordinates corresponding to the selected indices can then be obtained using the Gather or GatherND operation.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices'],\n center_point_box=1\n)\nboxes = np.array([[\n [0.5, 0.5, 1.0, 1.0],\n [0.5, 0.6, 1.0, 1.0],\n [0.5, 0.4, 1.0, 1.0],\n [0.5, 10.5, 1.0, 1.0],\n [0.5, 10.6, 1.0, 1.0],\n [0.5, 100.5, 1.0, 1.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_center_point_box_format')", + "summary": "nonmaxsuppression_center_point_box_format" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [1.0, 1.0, 0.0, 0.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, 0.9, 1.0, -0.1],\n [0.0, 10.0, 1.0, 11.0],\n [1.0, 10.1, 0.0, 11.1],\n [1.0, 101.0, 0.0, 100.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_flipped_coordinates')", + "summary": "nonmaxsuppression_flipped_coordinates" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_identical_boxes')", + "summary": "nonmaxsuppression_identical_boxes" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_limit_output_size')", + "summary": "nonmaxsuppression_limit_output_size" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_single_box')", + "summary": "nonmaxsuppression_single_box" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_suppress_by_IOU')", + "summary": "nonmaxsuppression_suppress_by_IOU" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([3]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.4]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_suppress_by_IOU_and_scores')", + "summary": "nonmaxsuppression_suppress_by_IOU_and_scores" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[[0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]],\n [[0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]],\n [[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_two_batches')", + "summary": "nonmaxsuppression_two_batches" + }, + { + "code": "node = onnx.helper.make_node(\n 'NonMaxSuppression',\n inputs=['boxes', 'scores', 'max_output_boxes_per_class', 'iou_threshold', 'score_threshold'],\n outputs=['selected_indices']\n)\nboxes = np.array([[\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0]\n]]).astype(np.float32)\nscores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3],\n [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\nmax_output_boxes_per_class = np.array([2]).astype(np.int64)\niou_threshold = np.array([0.5]).astype(np.float32)\nscore_threshold = np.array([0.0]).astype(np.float32)\nselected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]]).astype(np.int64)\n\nexpect(node, inputs=[boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold], outputs=[selected_indices], name='test_nonmaxsuppression_two_classes')", + "summary": "nonmaxsuppression_two_classes" + } + ], + "inputs": [ + { + "description": "An input tensor with shape [num_batches, spatial_dimension, 4]. The single box data format is indicated by center_point_box.", + "name": "boxes", + "type": "tensor(float)" + }, + { + "description": "An input tensor with shape [num_batches, num_classes, spatial_dimension]", + "name": "scores", + "type": "tensor(float)" + }, + { + "description": "Integer representing the maximum number of boxes to be selected per batch per class. It is a scalar. Default to 0, which means no output.", + "name": "max_output_boxes_per_class", + "option": "optional", + "type": "tensor(int64)" + }, + { + "description": "Float representing the threshold for deciding whether boxes overlap too much with respect to IOU. It is scalar. Value range [0, 1]. Default to 0.", + "name": "iou_threshold", + "option": "optional", + "type": "tensor(float)" + }, + { + "description": "Float representing the threshold for deciding when to remove boxes based on score. It is a scalar.", + "name": "score_threshold", + "option": "optional", + "type": "tensor(float)" + } + ], + "inputs_range": "2 - 5", + "max_input": 5, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "selected indices from the boxes tensor. [num_selected_indices, 3], the selected index format is [batch_index, class_index, box_index].", + "name": "selected_indices", + "type": "tensor(int64)" + } + ], + "since_version": 11, + "support_level": "common" + } + }, + { + "name": "NonZero", + "schema": { + "description": "Returns the indices of the elements that are non-zero\n (in row-major order - by dimension).\n NonZero behaves similar to numpy.nonzero:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.nonzero.html\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'NonZero',\n inputs=['condition'],\n outputs=['result'],\n)\n\ncondition = np.array([[1, 0], [1, 1]], dtype=np.bool)\nresult = np.array((np.nonzero(condition))) # expected output [[0, 1, 1], [0, 0, 1]]\nexpect(node, inputs=[condition], outputs=[result],\n name='test_nonzero_example')", + "summary": "nonzero" + } + ], + "inputs": [ + { + "description": "input", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "output", + "name": "Y", + "type": "tensor(int64)" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Normalizer", + "schema": { + "attributes": [ + { + "default": "MAX", + "description": "One of 'MAX,' 'L1,' 'L2'", + "name": "norm", + "required": false, + "type": "string" + } + ], + "description": "Normalize the input. There are three normalization modes, which have the corresponding formulas,\n defined using element-wise infix operators '/' and '^' and tensor-wide functions 'max' and 'sum':
    \n
    \n Max: Y = X / max(X)
    \n L1: Y = X / sum(X)
    \n L2: Y = sqrt(X^2 / sum(X^2)}
    \n In all modes, if the divisor is zero, Y == X.\n
    \n For batches, that is, [N,C] tensors, normalization is done along the C axis. In other words, each row\n of the batch is normalized independently.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Data to be encoded, a tensor of shape [N,C] or [C]", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Encoded output data", + "name": "Y", + "type": "tensor(float)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ], + "description": "The input must be a tensor of a numeric type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Not", + "schema": { + "category": "Logic", + "description": "Returns the negation of the input tensor element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Not',\n inputs=['x'],\n outputs=['not'],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(np.bool)\nexpect(node, inputs=[x], outputs=[np.logical_not(x)],\n name='test_not_2d')\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\nexpect(node, inputs=[x], outputs=[np.logical_not(x)],\n name='test_not_3d')\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\nexpect(node, inputs=[x], outputs=[np.logical_not(x)],\n name='test_not_4d')", + "summary": "not" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains input/output to boolean tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "OneHot", + "schema": { + "attributes": [ + { + "default": -1, + "description": "(Optional) Axis along which one-hot representation in added. Default: axis=-1. axis=-1 means that the additional dimension will be inserted as the innermost/last dimension in the output tensor.", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "description": "Produces a one-hot tensor based on inputs.\n The locations represented by the index values in the 'indices' input tensor will have 'on_value'\n and the other locations will have 'off_value' in the output tensor, where 'on_value' and 'off_value'\n are specified as part of required input argument 'values', which is a two-element tensor of format\n [off_value, on_value]. The rank of the output tensor will be one greater than the rank of the\n input tensor. The additional dimension is for one-hot representation. The additional dimension will\n be inserted at the position specified by 'axis'. If 'axis' is not specified then then additional\n dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional\n dimension is specified by required scalar input 'depth'. The type of the output tensor is the same\n as the type of the 'values' input. Any entries in the 'indices' input tensor with values outside\n the range [0, depth) will result in one-hot representation with all 'off_value' values in the\n output tensor.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "axisValue = 1\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n 'OneHot',\n inputs=['indices', 'depth', 'values'],\n outputs=['y'],\n axis=axisValue\n)\nindices = np.array([[1, 9],\n [2, 4]], dtype=np.float32)\ndepth = np.array([10], dtype=np.float32)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(node, inputs=[indices, depth, values], outputs=[y], name='test_onehot_with_axis')", + "summary": "with_axis" + }, + { + "code": "axisValue = -2\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n 'OneHot',\n inputs=['indices', 'depth', 'values'],\n outputs=['y'],\n axis=axisValue\n)\nindices = np.array([[1, 9],\n [2, 4]], dtype=np.float32)\ndepth = np.array([10], dtype=np.float32)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(node, inputs=[indices, depth, values], outputs=[y], name='test_onehot_with_negative_axis')", + "summary": "with_negative_axis" + }, + { + "code": "axisValue = 1\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n 'OneHot',\n inputs=['indices', 'depth', 'values'],\n outputs=['y'],\n axis=axisValue\n)\nindices = np.array([0, -7, -8], dtype=np.int64)\n\ndepth = np.array([10], dtype=np.float32)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(node, inputs=[indices, depth, values], outputs=[y], name='test_onehot_negative_indices')", + "summary": "with_negative_indices" + }, + { + "code": "on_value = 5\noff_value = 2\noutput_type = np.int32\nnode = onnx.helper.make_node(\n 'OneHot',\n inputs=['indices', 'depth', 'values'],\n outputs=['y']\n)\nindices = np.array([0, 7, 8], dtype=np.int64)\ndepth = np.float32(12)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(node, inputs=[indices, depth, values], outputs=[y], name='test_onehot_without_axis')", + "summary": "without_axis" + } + ], + "inputs": [ + { + "description": "Input tensor containing indices. The values must be non-negative integers. Any entries in the 'indices' input tensor with values outside the range [0, depth) will result in one-hot representation with all 'off_value' values in the output tensor.In case 'indices' is of non-integer type, the values will be casted to int64 before use.", + "name": "indices", + "type": "T1" + }, + { + "description": "Scalar specifying the number of classes in one-hot tensor. This is also the size of the one-hot dimension (specified by 'axis' attribute) added on in the output tensor. The values in the 'indices' input tensor are expected to be in the range [0, depth). In case 'depth' is of non-integer type, it will be casted to int64 before use.", + "name": "depth", + "type": "T2" + }, + { + "description": "Rank 1 tensor containing exactly two elements, in the format [off_value, on_value], where 'on_value' is the value used for filling locations specified in 'indices' input tensor, and 'off_value' is the value used for filling locations other than those specified in 'indices' input tensor. ", + "name": "values", + "type": "T3" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank one greater than input tensor 'indices', i.e. rank(output) = rank(indices) + 1. The data type for the elements of the output tensor is the same as the type of input 'values' is used.", + "name": "output", + "type": "T3" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input to only numeric types.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input to only numeric types.", + "type_param_str": "T2" + }, + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain to any tensor type.", + "type_param_str": "T3" + } + ] + } + }, + { + "name": "OneHot", + "schema": { + "attributes": [ + { + "default": -1, + "description": "(Optional) Axis along which one-hot representation in added. Default: axis=-1. axis=-1 means that the additional dimension will be inserted as the innermost/last dimension in the output tensor. Negative value means counting dimensions from the back. Accepted range is [-r-1, r] where r = rank(indices).", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "description": "Produces a one-hot tensor based on inputs.\n The locations represented by the index values in the 'indices' input tensor will have 'on_value'\n and the other locations will have 'off_value' in the output tensor, where 'on_value' and 'off_value'\n are specified as part of required input argument 'values', which is a two-element tensor of format\n [off_value, on_value]. The rank of the output tensor will be one greater than the rank of the\n input tensor. The additional dimension is for one-hot representation. The additional dimension will\n be inserted at the position specified by 'axis'. If 'axis' is not specified then then additional\n dimension will be inserted as the innermost dimension, i.e. axis=-1. The size of the additional\n dimension is specified by required scalar input 'depth'. The type of the output tensor is the same\n as the type of the 'values' input. Any entries in the 'indices' input tensor with values outside\n the range [-depth, depth-1] will result in one-hot representation with all 'off_value' values in the\n output tensor.\n\n when axis = 0:\n output[input[i, j, k], i, j, k] = 1 for all i, j, k and 0 otherwise.\n\n when axis = -1:\n output[i, j, k, input[i, j, k]] = 1 for all i, j, k and 0 otherwise.\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "axisValue = 1\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n 'OneHot',\n inputs=['indices', 'depth', 'values'],\n outputs=['y'],\n axis=axisValue\n)\nindices = np.array([[1, 9],\n [2, 4]], dtype=np.float32)\ndepth = np.array([10], dtype=np.float32)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(node, inputs=[indices, depth, values], outputs=[y], name='test_onehot_with_axis')", + "summary": "with_axis" + }, + { + "code": "axisValue = -2\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n 'OneHot',\n inputs=['indices', 'depth', 'values'],\n outputs=['y'],\n axis=axisValue\n)\nindices = np.array([[1, 9],\n [2, 4]], dtype=np.float32)\ndepth = np.array([10], dtype=np.float32)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(node, inputs=[indices, depth, values], outputs=[y], name='test_onehot_with_negative_axis')", + "summary": "with_negative_axis" + }, + { + "code": "axisValue = 1\non_value = 3\noff_value = 1\noutput_type = np.float32\nnode = onnx.helper.make_node(\n 'OneHot',\n inputs=['indices', 'depth', 'values'],\n outputs=['y'],\n axis=axisValue\n)\nindices = np.array([0, -7, -8], dtype=np.int64)\n\ndepth = np.array([10], dtype=np.float32)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, axis=axisValue, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(node, inputs=[indices, depth, values], outputs=[y], name='test_onehot_negative_indices')", + "summary": "with_negative_indices" + }, + { + "code": "on_value = 5\noff_value = 2\noutput_type = np.int32\nnode = onnx.helper.make_node(\n 'OneHot',\n inputs=['indices', 'depth', 'values'],\n outputs=['y']\n)\nindices = np.array([0, 7, 8], dtype=np.int64)\ndepth = np.float32(12)\nvalues = np.array([off_value, on_value], dtype=output_type)\ny = one_hot(indices, depth, dtype=output_type)\ny = y * (on_value - off_value) + off_value\nexpect(node, inputs=[indices, depth, values], outputs=[y], name='test_onehot_without_axis')", + "summary": "without_axis" + } + ], + "inputs": [ + { + "description": "Input tensor containing indices. Any entries in the 'indices' input tensor with values outside the range [-depth, depth-1] will result in one-hot representation with all 'off_value' values in the output tensor.In case 'indices' is of non-integer type, the values will be casted to int64 before use.", + "name": "indices", + "type": "T1" + }, + { + "description": "Scalar specifying the number of classes in one-hot tensor. This is also the size of the one-hot dimension (specified by 'axis' attribute) added on in the output tensor. The values in the 'indices' input tensor are expected to be in the range [-depth, depth-1]. In case 'depth' is of non-integer type, it will be casted to int64 before use.", + "name": "depth", + "type": "T2" + }, + { + "description": "Rank 1 tensor containing exactly two elements, in the format [off_value, on_value], where 'on_value' is the value used for filling locations specified in 'indices' input tensor, and 'off_value' is the value used for filling locations other than those specified in 'indices' input tensor. ", + "name": "values", + "type": "T3" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank one greater than input tensor 'indices', i.e. rank(output) = rank(indices) + 1. The data type for the elements of the output tensor is the same as the type of input 'values' is used.", + "name": "output", + "type": "T3" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input to only numeric types.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input to only numeric types.", + "type_param_str": "T2" + }, + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain to any tensor type.", + "type_param_str": "T3" + } + ] + } + }, + { + "name": "OneHotEncoder", + "schema": { + "attributes": [ + { + "description": "List of categories, ints.
    One and only one of the 'cats_*' attributes must be defined.", + "name": "cats_int64s", + "required": false, + "type": "int64[]" + }, + { + "description": "List of categories, strings.
    One and only one of the 'cats_*' attributes must be defined.", + "name": "cats_strings", + "required": false, + "type": "string[]" + }, + { + "default": 1, + "description": "If true and category is not present, will return all zeros; if false and a category if not found, the operator will fail.", + "name": "zeros", + "required": false, + "type": "int64" + } + ], + "description": "Replace each input element with an array of ones and zeros, where a single\n one is placed at the index of the category that was passed in. The total category count \n will determine the size of the extra dimension of the output array Y.
    \n For example, if we pass a tensor with a single value of 4, and a category count of 8, \n the output will be a tensor with ``[0,0,0,0,1,0,0,0]``.
    \n This operator assumes every input feature is from the same set of categories.
    \n If the input is a tensor of float, int32, or double, the data will be cast\n to integers and the cats_int64s category list will be used for the lookups.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Data to be encoded.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Encoded output data, having one more dimension than X.", + "name": "Y", + "type": "tensor(float)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)", + "tensor(int32)", + "tensor(float)", + "tensor(double)" + ], + "description": "The input must be a tensor of a numeric type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Or", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + } + ], + "category": "Logic", + "description": "Returns the tensor resulted from performing the `or` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Or',\n inputs=['x', 'y'],\n outputs=['or'],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(np.bool)\ny = (np.random.randn(3, 4) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or2d')\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or3d')\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or4d')", + "summary": "or" + }, + { + "code": "node = onnx.helper.make_node(\n 'Or',\n inputs=['x', 'y'],\n outputs=['or'],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(5) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or_bcast3v1d')\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(4, 5) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or_bcast3v2d')\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(5, 6) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or_bcast4v2d')\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or_bcast4v3d')\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or_bcast4v4d')", + "summary": "or_broadcast" + } + ], + "inputs": [ + { + "description": "Left input tensor for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Right input tensor for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains input to boolean tensor.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Or", + "schema": { + "category": "Logic", + "description": "Returns the tensor resulted from performing the `or` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Or',\n inputs=['x', 'y'],\n outputs=['or'],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(np.bool)\ny = (np.random.randn(3, 4) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or2d')\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or3d')\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or4d')", + "summary": "or" + }, + { + "code": "node = onnx.helper.make_node(\n 'Or',\n inputs=['x', 'y'],\n outputs=['or'],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(5) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or_bcast3v1d')\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(4, 5) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or_bcast3v2d')\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(5, 6) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or_bcast4v2d')\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or_bcast4v3d')\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(np.bool)\nz = np.logical_or(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_or_bcast4v4d')", + "summary": "or_broadcast" + } + ], + "inputs": [ + { + "description": "First input operand for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Second input operand for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains input to boolean tensor.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "PRelu", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "category": "Activation", + "description": "PRelu takes input data (Tensor) and slope tensor as input, and produces one\noutput data (Tensor) where the function `f(x) = slope * x for x < 0`,\n`f(x) = x for x >= 0`., is applied to the data tensor elementwise.\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'PRelu',\n inputs=['x', 'slope'],\n outputs=['y'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y],\n name='test_prelu_example')", + "summary": "prelu" + }, + { + "code": "node = onnx.helper.make_node(\n 'PRelu',\n inputs=['x', 'slope'],\n outputs=['y'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y],\n name='test_prelu_broadcast')", + "summary": "prelu_broadcast" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + }, + { + "description": "Slope tensor. If `Slope` is of size 1, the value is sharedacross different channels", + "name": "slope", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "PRelu", + "schema": { + "category": "Activation", + "description": "PRelu takes input data (Tensor) and slope tensor as input, and produces one\noutput data (Tensor) where the function `f(x) = slope * x for x < 0`,\n`f(x) = x for x >= 0`., is applied to the data tensor elementwise.\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'PRelu',\n inputs=['x', 'slope'],\n outputs=['y'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y],\n name='test_prelu_example')", + "summary": "prelu" + }, + { + "code": "node = onnx.helper.make_node(\n 'PRelu',\n inputs=['x', 'slope'],\n outputs=['y'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y],\n name='test_prelu_broadcast')", + "summary": "prelu_broadcast" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + }, + { + "description": "Slope tensor. If `Slope` is of size 1, the value is sharedacross different channels", + "name": "slope", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "PRelu", + "schema": { + "category": "Activation", + "description": "PRelu takes input data (Tensor) and slope tensor as input, and produces one\noutput data (Tensor) where the function `f(x) = slope * x for x < 0`,\n`f(x) = x for x >= 0`., is applied to the data tensor elementwise.\nThis operator supports **unidirectional broadcasting** (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'PRelu',\n inputs=['x', 'slope'],\n outputs=['y'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y],\n name='test_prelu_example')", + "summary": "prelu" + }, + { + "code": "node = onnx.helper.make_node(\n 'PRelu',\n inputs=['x', 'slope'],\n outputs=['y'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y],\n name='test_prelu_broadcast')", + "summary": "prelu_broadcast" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + }, + { + "description": "Slope tensor. The shape of slope can be smaller then first input X; if so, its shape must be unidirectional broadcastable to X", + "name": "slope", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor (same size as X)", + "name": "Y", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "PRelu", + "schema": { + "category": "Activation", + "description": "PRelu takes input data (Tensor) and slope tensor as input, and produces one\noutput data (Tensor) where the function `f(x) = slope * x for x < 0`,\n`f(x) = x for x >= 0`., is applied to the data tensor elementwise.\nThis operator supports **unidirectional broadcasting** (tensor slope should be unidirectional broadcastable to input tensor X); for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'PRelu',\n inputs=['x', 'slope'],\n outputs=['y'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y],\n name='test_prelu_example')", + "summary": "prelu" + }, + { + "code": "node = onnx.helper.make_node(\n 'PRelu',\n inputs=['x', 'slope'],\n outputs=['y'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\nslope = np.random.randn(5).astype(np.float32)\ny = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope\n\nexpect(node, inputs=[x, slope], outputs=[y],\n name='test_prelu_broadcast')", + "summary": "prelu_broadcast" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + }, + { + "description": "Slope tensor. The shape of slope can be smaller then first input X; if so, its shape must be unidirectional broadcastable to X", + "name": "slope", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor (same size as X)", + "name": "Y", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain input and output types to float/int tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Pad", + "schema": { + "attributes": [ + { + "default": "constant", + "description": "Three modes: constant(default), reflect, edge", + "name": "mode", + "required": false, + "type": "string" + }, + { + "description": "List of integers indicate the padding element count at the beginning and end of each axis, for 2D it is the number of pixel. `paddings` rank should be double of the input's rank. `paddings` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`.", + "name": "paddings", + "required": true, + "type": "int64[]" + }, + { + "description": "One float, indicates the value to be filled, default is 0", + "name": "value", + "required": false, + "type": "float32" + } + ], + "category": "Tensor", + "description": "Given `data` tensor, paddings, mode, and value.\nExample:\n Insert 0 paddings to the beginning of the second dimension.\n data = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n paddings = [0, 0, 2, 0]\n output = [\n [\n [0.0, 0.0, 1.0, 1.2],\n [0.0, 0.0, 2.3, 3.4],\n [0.0, 0.0, 4.5, 5.7],\n ],\n ]\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Pad',\n inputs=['x', 'pads', 'value'],\n outputs=['y'],\n mode='constant'\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(np.int64) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\ny = pad_impl(\n x,\n pads,\n 'constant',\n 1.2\n)\n\nexpect(node, inputs=[x, pads, value], outputs=[y],\n name='test_constant_pad')", + "summary": "constant_pad" + }, + { + "code": "for mode in ['edge', 'reflect']:\n node = onnx.helper.make_node(\n 'Pad',\n inputs=['x', 'pads'],\n outputs=['y'],\n mode=mode\n )\n x = np.random.randn(1, 3, 4, 5).astype(np.int32)\n pads = np.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(np.int64) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\n y = pad_impl(\n x,\n pads,\n mode\n )\n\n expect(node, inputs=[x, pads], outputs=[y],\n name='test_{}_pad'.format(mode))", + "summary": "reflection_and_edge_pad" + } + ], + "inputs": [ + { + "description": "Input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Tensor after padding.", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Pad", + "schema": { + "attributes": [ + { + "default": "constant", + "description": "Three modes: constant(default), reflect, edge", + "name": "mode", + "required": false, + "type": "string" + }, + { + "description": "List of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D it is the number of pixels. `pads` rank should be double of the input's rank. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`.", + "name": "pads", + "required": true, + "type": "int64[]" + }, + { + "description": "One float, indicates the value to be filled.", + "name": "value", + "required": false, + "type": "float32" + } + ], + "category": "Tensor", + "description": "Given `data` tensor, pads, mode, and value.\nExample:\n Insert 0 pads to the beginning of the second dimension.\n data = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ]\n pads = [0, 2, 0, 0]\n output = [\n [\n [0.0, 0.0, 1.0, 1.2],\n [0.0, 0.0, 2.3, 3.4],\n [0.0, 0.0, 4.5, 5.7],\n ],\n ]\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Pad',\n inputs=['x', 'pads', 'value'],\n outputs=['y'],\n mode='constant'\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(np.int64) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\ny = pad_impl(\n x,\n pads,\n 'constant',\n 1.2\n)\n\nexpect(node, inputs=[x, pads, value], outputs=[y],\n name='test_constant_pad')", + "summary": "constant_pad" + }, + { + "code": "for mode in ['edge', 'reflect']:\n node = onnx.helper.make_node(\n 'Pad',\n inputs=['x', 'pads'],\n outputs=['y'],\n mode=mode\n )\n x = np.random.randn(1, 3, 4, 5).astype(np.int32)\n pads = np.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(np.int64) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\n y = pad_impl(\n x,\n pads,\n mode\n )\n\n expect(node, inputs=[x, pads], outputs=[y],\n name='test_{}_pad'.format(mode))", + "summary": "reflection_and_edge_pad" + } + ], + "inputs": [ + { + "description": "Input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Tensor after padding.", + "name": "output", + "type": "T" + } + ], + "since_version": 2, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Pad", + "schema": { + "attributes": [ + { + "default": "constant", + "description": "Supported modes: `constant`(default), `reflect`, `edge`", + "name": "mode", + "required": false, + "type": "string" + } + ], + "category": "Tensor", + "description": "Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`, \na padded tensor (`output`) is generated.\n\nThe three supported `modes` are (similar to corresponding modes supported by `numpy.pad`):\n\n1) `constant`(default) - pads with a given constant value as specified by `constant_value` (which defaults to 0)\n\n2) `reflect` - pads with the reflection of the vector mirrored on the first and last values of the vector along each axis\n\n3) `edge` - pads with the edge values of array\n\n\nExample 1 (`constant` mode):\n Insert 0 pads to the beginning of the second dimension.\n\n data = \n [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ] \n\n pads = [0, 2, 0, 0]\n\n mode = 'constant'\n\n constant_value = 0.0\n\n output = \n [\n [\n [0.0, 0.0, 1.0, 1.2],\n [0.0, 0.0, 2.3, 3.4],\n [0.0, 0.0, 4.5, 5.7],\n ],\n ]\n\n\nExample 2 (`reflect` mode):\n data = \n [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ] \n\n pads = [0, 2, 0, 0]\n\n mode = 'reflect'\n\n output = \n [\n [\n [1.0, 1.2, 1.0, 1.2],\n [2.3, 3.4, 2.3, 3.4],\n [4.5, 5.7, 4.5, 5.7],\n ],\n ]\n\n\nExample 3 (`edge` mode):\n data = \n [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n ] \n\n pads = [0, 2, 0, 0]\n\n mode = 'edge'\n\n output = \n [\n [\n [1.0, 1.0, 1.0, 1.2],\n [2.3, 2.3, 2.3, 3.4],\n [4.5, 4.5, 4.5, 5.7],\n ],\n ]\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Pad',\n inputs=['x', 'pads', 'value'],\n outputs=['y'],\n mode='constant'\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\npads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(np.int64) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\nvalue = np.float32(1.2)\ny = pad_impl(\n x,\n pads,\n 'constant',\n 1.2\n)\n\nexpect(node, inputs=[x, pads, value], outputs=[y],\n name='test_constant_pad')", + "summary": "constant_pad" + }, + { + "code": "for mode in ['edge', 'reflect']:\n node = onnx.helper.make_node(\n 'Pad',\n inputs=['x', 'pads'],\n outputs=['y'],\n mode=mode\n )\n x = np.random.randn(1, 3, 4, 5).astype(np.int32)\n pads = np.array([0, 0, 1, 1, 0, 0, 1, 1]).astype(np.int64) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]\n y = pad_impl(\n x,\n pads,\n mode\n )\n\n expect(node, inputs=[x, pads], outputs=[y],\n name='test_{}_pad'.format(mode))", + "summary": "reflection_and_edge_pad" + } + ], + "inputs": [ + { + "description": "Input tensor.", + "name": "data", + "type": "T" + }, + { + "description": "Tensor of integers indicating the number of padding elements to add or remove (if negative) at the beginning and end of each axis. For 2D input tensor, it is the number of pixels. `pads` should be a 1D tensor of shape [2 * input_rank]. `pads` format should be: [x1_begin, x2_begin,...,x1_end, x2_end,...], where xi_begin is the number of pad values added at the beginning of axis `i` and xi_end, the number of pad values added at the end of axis `i`.", + "name": "pads", + "type": "tensor(int64)" + }, + { + "description": "(Optional) A scalar value to be used if the mode chosen is `constant` (by default it is 0).", + "name": "constant_value", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Tensor after padding.", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input and output to only numeric types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Pow", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions. See doc for details.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + } + ], + "description": "Pow takes input data (Tensor) and exponent Tensor, and\nproduces one output data (Tensor) where the function `f(x) = x^exponent`,\nis applied to the data tensor elementwise.\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_example')\n\nx = np.arange(60).reshape(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow')", + "summary": "pow" + }, + { + "code": "node = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array(2).astype(np.float32)\nz = pow(x, y) # expected output [1., 4., 9.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_bcast_scalar')\n\nnode = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\nx = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)\ny = np.array([1, 2, 3]).astype(np.float32)\n# expected output [[1, 4, 27], [4, 25, 216]]\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_bcast_array')", + "summary": "pow_broadcast" + }, + { + "code": "node = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_int64')\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int64_float32')\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_int32')\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int32_float32')\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_uint64')\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_uint32')\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int64_int64')\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int32_int32')", + "summary": "types" + } + ], + "inputs": [ + { + "description": "Input tensor of any shape, base of the exponent.", + "name": "X", + "type": "T" + }, + { + "description": "Input tensor of any shape broadcastable to X shape, the exponent component.", + "name": "Y", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor (same size as X)", + "name": "Z", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Pow", + "schema": { + "description": "Pow takes input data (Tensor) and exponent Tensor, and\nproduces one output data (Tensor) where the function `f(x) = x^exponent`,\nis applied to the data tensor elementwise.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_example')\n\nx = np.arange(60).reshape(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow')", + "summary": "pow" + }, + { + "code": "node = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array(2).astype(np.float32)\nz = pow(x, y) # expected output [1., 4., 9.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_bcast_scalar')\n\nnode = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\nx = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)\ny = np.array([1, 2, 3]).astype(np.float32)\n# expected output [[1, 4, 27], [4, 25, 216]]\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_bcast_array')", + "summary": "pow_broadcast" + }, + { + "code": "node = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_int64')\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int64_float32')\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_int32')\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int32_float32')\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_uint64')\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_uint32')\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int64_int64')\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int32_int32')", + "summary": "types" + } + ], + "inputs": [ + { + "description": "First operand, base of the exponent.", + "name": "X", + "type": "T" + }, + { + "description": "Second operand, power of the exponent.", + "name": "Y", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor (same size as X)", + "name": "Z", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Pow", + "schema": { + "description": "Pow takes input data (Tensor) and exponent Tensor, and\nproduces one output data (Tensor) where the function `f(x) = x^exponent`,\nis applied to the data tensor elementwise.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_example')\n\nx = np.arange(60).reshape(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow')", + "summary": "pow" + }, + { + "code": "node = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array(2).astype(np.float32)\nz = pow(x, y) # expected output [1., 4., 9.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_bcast_scalar')\n\nnode = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\nx = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)\ny = np.array([1, 2, 3]).astype(np.float32)\n# expected output [[1, 4, 27], [4, 25, 216]]\nz = pow(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_bcast_array')", + "summary": "pow_broadcast" + }, + { + "code": "node = onnx.helper.make_node(\n 'Pow',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_int64')\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int64_float32')\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_int32')\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.float32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int32_float32')\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint64)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_uint64')\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([4, 5, 6]).astype(np.uint32)\nz = pow(x, y) # expected output [1., 32., 729.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_float32_uint32')\n\nx = np.array([1, 2, 3]).astype(np.int64)\ny = np.array([4, 5, 6]).astype(np.int64)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int64_int64')\n\nx = np.array([1, 2, 3]).astype(np.int32)\ny = np.array([4, 5, 6]).astype(np.int32)\nz = pow(x, y) # expected output [1, 32, 729]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_pow_types_int32_int32')", + "summary": "types" + } + ], + "inputs": [ + { + "description": "First operand, base of the exponent.", + "name": "X", + "type": "T" + }, + { + "description": "Second operand, power of the exponent.", + "name": "Y", + "type": "T1" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor (same size as X)", + "name": "Z", + "type": "T" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input X and output types to float/int tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input Y types to float/int tensors.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "QLinearConv", + "schema": { + "attributes": [ + { + "default": "NOTSET", + "description": "auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that the output spatial size match the input.In case of odd number add the extra padding at the end for SAME_UPPER and at the beginning for SAME_LOWER. VALID mean no padding.", + "name": "auto_pad", + "required": false, + "type": "string" + }, + { + "description": "dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis.", + "name": "dilations", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "number of groups input channels and output channels are divided into. default is 1.", + "name": "group", + "required": false, + "type": "int64" + }, + { + "description": "The shape of the convolution kernel. If not present, should be inferred from input 'w'.", + "name": "kernel_shape", + "required": false, + "type": "int64[]" + }, + { + "description": "Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0.The value represent the number of pixels added to the beginning and end part of the corresponding axis.`pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number ofpixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`.This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaultsto 0 along start and end of each spatial axis.", + "name": "pads", + "required": false, + "type": "int64[]" + }, + { + "description": "Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis.", + "name": "strides", + "required": false, + "type": "int64[]" + } + ], + "description": "The convolution operator consumes a quantized input tensor, its scale and zero point,\na quantized filter, its scale and zero point, and output's scale and zero point,\nand computes the quantized output. Each scale and zero-point pair must have same shape.\nIt means they must be either scalars (per tensor) or 1-D tensors (per output channel).\nEach input or output and its related zero point must have same type.\nWhen bias is present it must be quantized using scale = input scale * weight scale and \nzero point as 0.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node('QLinearConv',\n inputs=['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'],\n outputs=['y'],)\n\nx = np.array([[255, 174, 162, 25, 203, 168, 58],\n [15, 59, 237, 95, 129, 0, 64],\n [56, 242, 153, 221, 168, 12, 166],\n [232, 178, 186, 195, 237, 162, 237],\n [188, 39, 124, 77, 80, 102, 43],\n [127, 230, 21, 83, 41, 40, 134],\n [255, 154, 92, 141, 42, 148, 247], ], dtype=np.uint8).reshape((1, 1, 7, 7))\n\nx_scale = np.float32(0.00369204697)\nx_zero_point = np.uint8(132)\n\nw = np.array([0], dtype=np.uint8).reshape((1, 1, 1, 1))\n\nw_scale = np.array([0.00172794575], dtype=np.float32)\nw_zero_point = np.array([255], dtype=np.uint8)\n\ny_scale = np.float32(0.00162681262)\ny_zero_point = np.uint8(123)\n\noutput = np.array([[0, 81, 93, 230, 52, 87, 197],\n [240, 196, 18, 160, 126, 255, 191],\n [199, 13, 102, 34, 87, 243, 89],\n [23, 77, 69, 60, 18, 93, 18],\n [67, 216, 131, 178, 175, 153, 212],\n [128, 25, 234, 172, 214, 215, 121],\n [0, 101, 163, 114, 213, 107, 8], ], dtype=np.uint8).reshape((1, 1, 7, 7))\n\nexpect(node, inputs=[x, x_scale, x_zero_point, w, w_scale, w_zero_point, y_scale, y_zero_point], outputs=[output],\n name='test_qlinearconv')", + "summary": "qlinearconv" + } + ], + "inputs": [ + { + "description": "Input data tensor from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). Optionally, if dimension denotation is in effect, the operation expects input data tensor to arrive with the dimension denotation of [DATA_BATCH, DATA_CHANNEL, DATA_FEATURE, DATA_FEATURE ...].", + "name": "x", + "type": "T1" + }, + { + "description": "Scale tensor for input 'x'. It's a scalar, which means a per-tensor/layer quantization.", + "name": "x_scale", + "type": "tensor(float)" + }, + { + "description": "Zero point tensor for input 'x'. It's a scalar, which means a per-tensor/layer quantization.", + "name": "x_zero_point", + "type": "T1" + }, + { + "description": "The weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. Optionally, if dimension denotation is in effect, the operation expects the weight tensor to arrive with the dimension denotation of [FILTER_OUT_CHANNEL, FILTER_IN_CHANNEL, FILTER_SPATIAL, FILTER_SPATIAL ...]. X.shape[1] == (W.shape[1] * group) == C (assuming zero based indices for the shape array). Or in other words FILTER_IN_CHANNEL should be equal to DATA_CHANNEL. ", + "name": "w", + "type": "T2" + }, + { + "description": "Scale tensor for input 'w'. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it's a 1-D tensor, its number of elements should be equal to the number of output channels (M).", + "name": "w_scale", + "type": "tensor(float)" + }, + { + "description": "Zero point tensor for input 'w'. It could be a scalar or a 1-D tensor, which means a per-tensor/layer or per output channel quantization. If it's a 1-D tensor, its number of elements should be equal to the number of output channels (M).", + "name": "w_zero_point", + "type": "T2" + }, + { + "description": "Scale tensor for output 'y'. It's a scalar, which means a per-tensor/layer quantization.", + "name": "y_scale", + "type": "tensor(float)" + }, + { + "description": "Zero point tensor for output 'y'. It's a scalar, which means a per-tensor/layer quantization.", + "name": "y_zero_point", + "type": "T3" + }, + { + "description": "Optional 1D bias to be added to the convolution, has size of M. Bias must be quantized using scale = x_scale * w_scale and zero_point = 0", + "name": "B", + "option": "optional", + "type": "T4" + } + ], + "inputs_range": "8 - 9", + "max_input": 9, + "max_output": 1, + "min_input": 8, + "min_output": 1, + "outputs": [ + { + "description": "Output data tensor that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.", + "name": "y", + "type": "T3" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain input type to 8-bit integer tensor.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain filter type to 8-bit integer tensor.", + "type_param_str": "T2" + }, + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain output type to 8-bit integer tensor.", + "type_param_str": "T3" + }, + { + "allowed_type_strs": [ + "tensor(int32)" + ], + "description": "Constrain bias type to 32-bit integer tensor.", + "type_param_str": "T4" + } + ] + } + }, + { + "name": "QLinearMatMul", + "schema": { + "description": "Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html.\nIt consumes two quantized input tensors, their scales and zero points, scale and zero point of output, and computes the quantized output.\nThe quantization formula is y = saturate((x / y_scale) + y_zero_point). For (x / y_scale), it is rounding to nearest ties to even.\nRefer to https://en.wikipedia.org/wiki/Rounding for details. Scale and zero point must have same shape.\nThey must be either scalar (per tensor) or 1-D tensor (per row for 'a' and per column for 'b'). If scale and zero point are 1-D tensor,\nthe number of elements of scale and zero point tensor of input 'a' and output 'y' should be equal to the number of rows of input 'a',\nand the number of elements of scale and zero point tensor of input 'b' should be equal to the number of columns of input 'b'.\nProduction must never overflow, and accumulation may overflow if and only if in 32 bits.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node('QLinearMatMul',\n inputs=['a', 'a_scale', 'a_zero_point', 'b', 'b_scale', 'b_zero_point', 'y_scale', 'y_zero_point'],\n outputs=['y'],)\n\n#2D\na = np.array([[208, 236, 0, 238],\n [3, 214, 255, 29], ], dtype=np.uint8)\n\na_scale = np.array([0.0066], dtype=np.float32)\na_zero_point = np.array([113], dtype=np.uint8)\n\nb = np.array([[152, 51, 244],\n [60, 26, 255],\n [0, 127, 246],\n [127, 254, 247]], dtype=np.uint8)\n\nb_scale = np.array([0.00705], dtype=np.float32)\nb_zero_point = np.array([114], dtype=np.uint8)\n\ny_scale = np.array([0.0107], dtype=np.float32)\ny_zero_point = np.array([118], dtype=np.uint8)\n\noutput = np.array([[168, 115, 255],\n [1, 66, 151], ], dtype=np.uint8)\n\nexpect(node, inputs=[a, a_scale, a_zero_point, b, b_scale, b_zero_point, y_scale, y_zero_point], outputs=[output],\n name='test_qlinearmatmul_2D')\n\n#3D\na = np.array([[[208, 236, 0, 238],\n [3, 214, 255, 29]],\n [[208, 236, 0, 238],\n [3, 214, 255, 29]]], dtype=np.uint8)\n\na_scale = np.array([0.0066], dtype=np.float32)\na_zero_point = np.array([113], dtype=np.uint8)\n\nb = np.array([[[152, 51, 244],\n [60, 26, 255],\n [0, 127, 246],\n [127, 254, 247]],\n [[152, 51, 244],\n [60, 26, 255],\n [0, 127, 246],\n [127, 254, 247]]], dtype=np.uint8)\n\nb_scale = np.array([0.00705], dtype=np.float32)\nb_zero_point = np.array([114], dtype=np.uint8)\n\ny_scale = np.array([0.0107], dtype=np.float32)\ny_zero_point = np.array([118], dtype=np.uint8)\n\noutput = np.array([[[168, 115, 255],\n [1, 66, 151]],\n [[168, 115, 255],\n [1, 66, 151]]], dtype=np.uint8)\n\nexpect(node, inputs=[a, a_scale, a_zero_point, b, b_scale, b_zero_point, y_scale, y_zero_point], outputs=[output],\n name='test_qlinearmatmul_3D')", + "summary": "qlinearmatmul" + } + ], + "inputs": [ + { + "description": "N-dimensional quantized matrix a", + "name": "a", + "type": "T1" + }, + { + "description": "scale of quantized input a", + "name": "a_scale", + "type": "tensor(float)" + }, + { + "description": "zero point of quantized input a", + "name": "a_zero_point", + "type": "T1" + }, + { + "description": "N-dimensional quantized matrix b", + "name": "b", + "type": "T2" + }, + { + "description": "scale of quantized input b", + "name": "b_scale", + "type": "tensor(float)" + }, + { + "description": "zero point of quantized input b", + "name": "b_zero_point", + "type": "T2" + }, + { + "description": "scale of quantized output y", + "name": "y_scale", + "type": "tensor(float)" + }, + { + "description": "zero point of quantized output y", + "name": "y_zero_point", + "type": "T3" + } + ], + "max_input": 8, + "max_output": 1, + "min_input": 8, + "min_output": 1, + "outputs": [ + { + "description": "Quantized matrix multiply results from a * b", + "name": "y", + "type": "T3" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain input a and its zero point data type to 8-bit integer tensor.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain input b and its zero point data type to 8-bit integer tensor.", + "type_param_str": "T2" + }, + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain output y and its zero point data type to 8-bit integer tensor.", + "type_param_str": "T3" + } + ] + } + }, + { + "name": "QuantizeLinear", + "schema": { + "description": "The linear per-tensor/layer quantization operator. It consumes a high precision tensor, a scale, a zero point to compute the low precision / quantized tensor.\nThe quantization formula is y = saturate ((x / y_scale) + y_zero_point). For saturation, it saturates to [0, 255] if it's uint8, or [-128, 127] if it's int8.\nFor (x / y_scale), it's rounding to nearest ties to even. Refer to https://en.wikipedia.org/wiki/Rounding for details. 'y_zero_point' and 'y' must have same type.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node('QuantizeLinear',\n inputs=['x', 'y_scale', 'y_zero_point'],\n outputs=['y'],)\n\nx = np.array([0, 2, 3, 1000, -254, -1000]).astype(np.float32)\ny_scale = np.float32(2)\ny_zero_point = np.uint8(128)\ny = np.array([128, 129, 130, 255, 1, 0]).astype(np.uint8)\n\nexpect(node, inputs=[x, y_scale, y_zero_point], outputs=[y],\n name='test_quantizelinear')", + "summary": "quantizelinear" + } + ], + "inputs": [ + { + "description": "N-D full precision Input tensor to be quantized.", + "name": "x", + "type": "T1" + }, + { + "description": "Scale for doing quantization to get 'y'. It's a scalar, which means a per-tensor/layer quantization.", + "name": "y_scale", + "type": "tensor(float)" + }, + { + "description": "Zero point for doing quantization to get 'y'. It's a scalar, which means a per-tensor/layer quantization. Default value is uint8 typed 0 if it's not specified.", + "name": "y_zero_point", + "option": "optional", + "type": "T2" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "N-D quantized output tensor. It has same shape as input 'x'.", + "name": "y", + "type": "T2" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(int32)" + ], + "description": "Constrain 'x' to float or int32 tensor.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int8)", + "tensor(uint8)" + ], + "description": "Constrain 'y_zero_point' and 'y' to 8-bit integer tensor.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "RNN", + "schema": { + "attributes": [ + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01.", + "name": "activation_alpha", + "required": false, + "type": "float32[]" + }, + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.", + "name": "activation_beta", + "required": false, + "type": "float32[]" + }, + { + "description": "One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default `Tanh` if not specified.", + "name": "activations", + "required": false, + "type": "string[]" + }, + { + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified.", + "name": "clip", + "required": false, + "type": "float32" + }, + { + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional.", + "name": "direction", + "required": false, + "type": "string" + }, + { + "description": "Number of neurons in the hidden layer", + "name": "hidden_size", + "required": false, + "type": "int64" + }, + { + "description": "The sequence output for the hidden is optional if 0. Default 0.", + "name": "output_sequence", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "Computes an one-layer simple RNN. This operator is usually supported\nvia some custom implementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`i` - input gate\n\n`t` - time step (t-1 means previous time step)\n\n`Wi` - W parameter weight matrix for input gate\n\n`Ri` - R recurrence weight matrix for input gate\n\n`Wbi` - W parameter bias vector for input gate\n\n`Rbi` - R parameter bias vector for input gate\n\n`WBi` - W parameter weight matrix for backward input gate\n\n`RBi` - R recurrence weight matrix for backward input gate\n\n`WBbi` - WR bias vectors for backward input gate\n\n`RBbi` - RR bias vectors for backward input gate\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Tanh):\n\n - Ht = f(Xt*(Wi^T) + Ht-1*Ri + Wbi + Rbi)\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([[[1., 2.], [3., 4.], [5., 6.]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 4\nweight_scale = 0.1\n\nnode = onnx.helper.make_node(\n 'RNN',\n inputs=['X', 'W', 'R'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\nrnn = RNN_Helper(X=input, W=W, R=R)\n_, Y_h = rnn.step()\nexpect(node, inputs=[input, W, R], outputs=[Y_h.astype(np.float32)], name='test_simple_rnn_defaults')", + "summary": "defaults" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\ncustom_bias = 0.1\nweight_scale = 0.1\n\nnode = onnx.helper.make_node(\n 'RNN',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, hidden_size)).astype(np.float32)\nR_B = np.zeros((1, hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\nrnn = RNN_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = rnn.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)],\n name='test_simple_rnn_with_initial_bias')", + "summary": "initial_bias" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],\n [[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\n\nnode = onnx.helper.make_node(\n 'RNN',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = np.random.randn(1, hidden_size, input_size).astype(np.float32)\nR = np.random.randn(1, hidden_size, hidden_size).astype(np.float32)\n\n# Adding custom bias\nW_B = np.random.randn(1, hidden_size).astype(np.float32)\nR_B = np.random.randn(1, hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\nrnn = RNN_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = rnn.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_rnn_seq_length')", + "summary": "seq_length" + } + ], + "inputs": [ + { + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`.", + "name": "X", + "type": "T" + }, + { + "description": "The weight tensor for input gate. Concatenation of `Wi` and `WBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, input_size]`.", + "name": "W", + "type": "T" + }, + { + "description": "The recurrence weight tensor. Concatenation of `Ri` and `RBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, hidden_size]`.", + "name": "R", + "type": "T" + }, + { + "description": "The bias tensor for input gate. Concatenation of `[Wbi, Rbi]` and `[WBbi, RBbi]` (if bidirectional). The tensor has shape `[num_directions, 2*hidden_size]`. Optional: If not specified - assumed to be 0.", + "name": "B", + "option": "optional", + "type": "T" + }, + { + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`.", + "name": "sequence_lens", + "option": "optional", + "type": "T1" + }, + { + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "initial_h", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "3 - 6", + "max_input": 6, + "max_output": 2, + "min_input": 3, + "min_output": 0, + "outputs": [ + { + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. It is optional if `output_sequence` is 0.", + "name": "Y", + "option": "optional", + "type": "T" + }, + { + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "Y_h", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "0 - 2", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)" + ], + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "RNN", + "schema": { + "attributes": [ + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.For example with LeakyRelu, the default alpha is 0.01.", + "name": "activation_alpha", + "required": false, + "type": "float32[]" + }, + { + "description": "Optional scaling values used by some activation functions. The values are consumed in the order of activation functions, for example (f, g, h) in LSTM. Default values are the same as of corresponding ONNX operators.", + "name": "activation_beta", + "required": false, + "type": "float32[]" + }, + { + "description": "One (or two if bidirectional) activation function for input gate. The activation function must be one of the activation functions specified above. Optional: Default `Tanh` if not specified.", + "name": "activations", + "required": false, + "type": "string[]" + }, + { + "description": "Cell clip threshold. Clipping bounds the elements of a tensor in the range of [-threshold, +threshold] and is applied to the input of activations. No clip if not specified.", + "name": "clip", + "required": false, + "type": "float32" + }, + { + "default": "forward", + "description": "Specify if the RNN is forward, reverse, or bidirectional. Must be one of forward (default), reverse, or bidirectional.", + "name": "direction", + "required": false, + "type": "string" + }, + { + "description": "Number of neurons in the hidden layer", + "name": "hidden_size", + "required": false, + "type": "int64" + } + ], + "category": "Layer", + "description": "Computes an one-layer simple RNN. This operator is usually supported\nvia some custom implementation such as CuDNN.\n\nNotations:\n\n`X` - input tensor\n\n`i` - input gate\n\n`t` - time step (t-1 means previous time step)\n\n`Wi` - W parameter weight matrix for input gate\n\n`Ri` - R recurrence weight matrix for input gate\n\n`Wbi` - W parameter bias vector for input gate\n\n`Rbi` - R parameter bias vector for input gate\n\n`WBi` - W parameter weight matrix for backward input gate\n\n`RBi` - R recurrence weight matrix for backward input gate\n\n`WBbi` - WR bias vectors for backward input gate\n\n`RBbi` - RR bias vectors for backward input gate\n\n`H` - Hidden state\n\n`num_directions` - 2 if direction == bidirectional else 1\n\nActivation functions:\n\n Relu(x) - max(0, x)\n\n Tanh(x) - (1 - e^{-2x})/(1 + e^{-2x})\n\n Sigmoid(x) - 1/(1 + e^{-x})\n\n (NOTE: Below are optional)\n\n Affine(x) - alpha*x + beta\n\n LeakyRelu(x) - x if x >= 0 else alpha * x\n\n ThresholdedRelu(x) - x if x >= alpha else 0\n\n ScaledTanh(x) - alpha*Tanh(beta*x)\n\n HardSigmoid(x) - min(max(alpha*x + beta, 0), 1)\n\n Elu(x) - x if x >= 0 else alpha*(e^x - 1)\n\n Softsign(x) - x/(1 + |x|)\n\n Softplus(x) - log(1 + e^x)\n\nEquations (Default: f=Tanh):\n\n - Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi)\nThis operator has **optional** inputs/outputs. See [the doc](https://github.com/onnx/onnx/blob/master/docs/IR.md) for more details about the representation of optional arguments. An empty string may be used in the place of an actual argument's name to indicate a missing argument. Trailing optional arguments (those not followed by an argument that is present) may also be simply omitted.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([[[1., 2.], [3., 4.], [5., 6.]]]).astype(np.float32)\n\ninput_size = 2\nhidden_size = 4\nweight_scale = 0.1\n\nnode = onnx.helper.make_node(\n 'RNN',\n inputs=['X', 'W', 'R'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\nrnn = RNN_Helper(X=input, W=W, R=R)\n_, Y_h = rnn.step()\nexpect(node, inputs=[input, W, R], outputs=[Y_h.astype(np.float32)], name='test_simple_rnn_defaults')", + "summary": "defaults" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\ncustom_bias = 0.1\nweight_scale = 0.1\n\nnode = onnx.helper.make_node(\n 'RNN',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32)\nR = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32)\n\n# Adding custom bias\nW_B = custom_bias * np.ones((1, hidden_size)).astype(np.float32)\nR_B = np.zeros((1, hidden_size)).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\nrnn = RNN_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = rnn.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)],\n name='test_simple_rnn_with_initial_bias')", + "summary": "initial_bias" + }, + { + "code": "input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],\n [[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]]]).astype(np.float32)\n\ninput_size = 3\nhidden_size = 5\n\nnode = onnx.helper.make_node(\n 'RNN',\n inputs=['X', 'W', 'R', 'B'],\n outputs=['', 'Y'],\n hidden_size=hidden_size\n)\n\nW = np.random.randn(1, hidden_size, input_size).astype(np.float32)\nR = np.random.randn(1, hidden_size, hidden_size).astype(np.float32)\n\n# Adding custom bias\nW_B = np.random.randn(1, hidden_size).astype(np.float32)\nR_B = np.random.randn(1, hidden_size).astype(np.float32)\nB = np.concatenate((W_B, R_B), axis=1)\n\nrnn = RNN_Helper(X=input, W=W, R=R, B=B)\n_, Y_h = rnn.step()\nexpect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_rnn_seq_length')", + "summary": "seq_length" + } + ], + "inputs": [ + { + "description": "The input sequences packed (and potentially padded) into one 3-D tensor with the shape of `[seq_length, batch_size, input_size]`.", + "name": "X", + "type": "T" + }, + { + "description": "The weight tensor for input gate. Concatenation of `Wi` and `WBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, input_size]`.", + "name": "W", + "type": "T" + }, + { + "description": "The recurrence weight tensor. Concatenation of `Ri` and `RBi` (if bidirectional). The tensor has shape `[num_directions, hidden_size, hidden_size]`.", + "name": "R", + "type": "T" + }, + { + "description": "The bias tensor for input gate. Concatenation of `[Wbi, Rbi]` and `[WBbi, RBbi]` (if bidirectional). The tensor has shape `[num_directions, 2*hidden_size]`. Optional: If not specified - assumed to be 0.", + "name": "B", + "option": "optional", + "type": "T" + }, + { + "description": "Optional tensor specifying lengths of the sequences in a batch. If not specified - assumed all sequences in the batch to have length `seq_length`. It has shape `[batch_size]`.", + "name": "sequence_lens", + "option": "optional", + "type": "T1" + }, + { + "description": "Optional initial value of the hidden. If not specified - assumed to be 0. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "initial_h", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "3 - 6", + "max_input": 6, + "max_output": 2, + "min_input": 3, + "min_output": 0, + "outputs": [ + { + "description": "A tensor that concats all the intermediate output values of the hidden. It has shape `[seq_length, num_directions, batch_size, hidden_size]`. ", + "name": "Y", + "option": "optional", + "type": "T" + }, + { + "description": "The last output value of the hidden. It has shape `[num_directions, batch_size, hidden_size]`.", + "name": "Y_h", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "0 - 2", + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)" + ], + "description": "Constrain seq_lens to integer tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "RandomNormal", + "schema": { + "attributes": [ + { + "default": 1, + "description": "The data type for the elements of the output tensor. Default is TensorProto::FLOAT.", + "name": "dtype", + "required": false, + "type": "int64" + }, + { + "description": "The mean of the normal distribution.", + "name": "mean", + "required": false, + "type": "float32" + }, + { + "default": 1.0, + "description": "The standard deviation of the normal distribution.", + "name": "scale", + "required": false, + "type": "float32" + }, + { + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one.", + "name": "seed", + "required": false, + "type": "float32" + }, + { + "description": "The shape of the output tensor.", + "name": "shape", + "required": true, + "type": "int64[]" + } + ], + "description": "Generate a tensor with random values drawn from a normal distribution. The shape\nof the tensor is specified by the `shape` argument and the parameter of the normal distribution\nspecified by `mean` and `scale`.\n\nThe data type is specified by the 'dtype' argument. The 'dtype' argument must\nbe one of the data types specified in the 'DataType' enum field in the\nTensorProto message.\n", + "domain": "ai.onnx", + "max_input": 0, + "max_output": 1, + "min_input": 0, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of random values drawn from normal distribution", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "RandomNormalLike", + "schema": { + "attributes": [ + { + "description": "(Optional) The data type for the elements of the output tensor, if not specified, we will use the data type of the input tensor.", + "name": "dtype", + "required": false, + "type": "int64" + }, + { + "description": "The mean of the normal distribution.", + "name": "mean", + "required": false, + "type": "float32" + }, + { + "default": 1.0, + "description": "The standard deviation of the normal distribution.", + "name": "scale", + "required": false, + "type": "float32" + }, + { + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one.", + "name": "seed", + "required": false, + "type": "float32" + } + ], + "description": "Generate a tensor with random values drawn from a normal distribution.\nThe shape of the output tensor is copied from the shape of the input tensor,\nand the parameters of the normal distribution are specified by `mean` and `scale`.\n\nThe data type is specified by the 'dtype' argument, or copied from the input tensor if not provided.\nThe 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the\nTensorProto message, and be valid as an output type.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input tensor to copy shape and optionally type information from.", + "name": "input", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of random values drawn from normal distribution", + "name": "output", + "type": "T2" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain output types to float tensors.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "RandomUniform", + "schema": { + "attributes": [ + { + "default": 1, + "description": "The data type for the elements of the output tensor. If not specified, default is TensorProto::FLOAT.", + "name": "dtype", + "required": false, + "type": "int64" + }, + { + "default": 1.0, + "description": "Upper boundary of the output values.", + "name": "high", + "required": false, + "type": "float32" + }, + { + "description": "Lower boundary of the output values.", + "name": "low", + "required": false, + "type": "float32" + }, + { + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one.", + "name": "seed", + "required": false, + "type": "float32" + }, + { + "description": "The shape of the output tensor.", + "name": "shape", + "required": true, + "type": "int64[]" + } + ], + "description": "Generate a tensor with random values drawn from a uniform distribution. The shape\nof the tensor is specified by the `shape` argument and the range by `low` and `high`.\n\nThe data type is specified by the 'dtype' argument. The 'dtype' argument must\nbe one of the data types specified in the 'DataType' enum field in the\nTensorProto message.\n", + "domain": "ai.onnx", + "max_input": 0, + "max_output": 1, + "min_input": 0, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of random values drawn from uniform distribution", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "RandomUniformLike", + "schema": { + "attributes": [ + { + "description": "(Optional) The data type for the elements of the output tensor, if not specified, we will use the data type of the input tensor.", + "name": "dtype", + "required": false, + "type": "int64" + }, + { + "default": 1.0, + "description": "Upper boundary of the output values.", + "name": "high", + "required": false, + "type": "float32" + }, + { + "description": "Lower boundary of the output values.", + "name": "low", + "required": false, + "type": "float32" + }, + { + "description": "(Optional) Seed to the random generator, if not specified we will auto generate one.", + "name": "seed", + "required": false, + "type": "float32" + } + ], + "description": "Generate a tensor with random values drawn from a uniform distribution.\nThe shape of the output tensor is copied from the shape of the input tensor,\nand the parameters of the uniform distribution are specified by `low` and `high`.\n\nThe data type is specified by the 'dtype' argument, or copied from the input tensor if not provided.\nThe 'dtype' argument must be one of the data types specified in the 'DataType' enum field in the\nTensorProto message and be valid as an output type.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input tensor to copy shape and optionally type information from.", + "name": "input", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of random values drawn from uniform distribution", + "name": "output", + "type": "T2" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain to any tensor type. If the dtype attribute is not provided this must be a valid output type.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain output types to float tensors.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "Range", + "schema": { + "description": "Generate a tensor containing a sequence of numbers that begin at `start` and extends by increments of `delta`\nup to `limit` (exclusive).\n\nThe number of elements in the output of range is computed as below-\n\n`number_of_elements = max( ceil( (limit - start) / delta ) , 0 )`\n\nThe pseudocode determining the contents of the output is shown below-\n\n`for(int i=0; i) and produces one output data\n(Tensor) where the reciprocal is, y = 1/x, is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Reciprocal',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-4, 2]).astype(np.float32)\ny = np.reciprocal(x) # expected output [-0.25, 0.5],\nexpect(node, inputs=[x], outputs=[y],\n name='test_reciprocal_example')\n\nx = np.random.rand(3, 4, 5).astype(np.float32) + 0.5\ny = np.reciprocal(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_reciprocal')", + "summary": "reciprocal" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Reciprocal", + "schema": { + "description": "Reciprocal takes one input data (Tensor) and produces one output data\n(Tensor) where the reciprocal is, y = 1/x, is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Reciprocal',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-4, 2]).astype(np.float32)\ny = np.reciprocal(x) # expected output [-0.25, 0.5],\nexpect(node, inputs=[x], outputs=[y],\n name='test_reciprocal_example')\n\nx = np.random.rand(3, 4, 5).astype(np.float32) + 0.5\ny = np.reciprocal(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_reciprocal')", + "summary": "reciprocal" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceL1", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor.", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the L1 norm of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL1',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[78.]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [2]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceL1',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[3., 7.], [11., 15.], [19., 23.]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL1',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_keep_dims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_keep_dims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL1',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_negative_axes_keep_dims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_negative_axes_keep_dims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceL1", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the L1 norm of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL1',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[78.]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [2]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceL1',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[3., 7.], [11., 15.], [19., 23.]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL1',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_keep_dims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_keep_dims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL1',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[3.], [7.]], [[11.], [15.]], [[19.], [23.]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_negative_axes_keep_dims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(a=np.abs(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l1_negative_axes_keep_dims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceL2", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor.", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the L2 norm of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL2',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=axes, keepdims=keepdims == 1))\n#print(reduced)\n#[[[25.49509757]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=axes, keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [2]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceL2',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n#print(reduced)\n#[[2.23606798, 5.],\n# [7.81024968, 10.63014581],\n# [13.45362405, 16.2788206]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL2',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n#print(reduced)\n#[[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_keep_dims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_l2_keep_dims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL2',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n#[[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_negative_axes_keep_dims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_negative_axes_keep_dims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceL2", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the L2 norm of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL2',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=axes, keepdims=keepdims == 1))\n#print(reduced)\n#[[[25.49509757]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=axes, keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [2]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceL2',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n#print(reduced)\n#[[2.23606798, 5.],\n# [7.81024968, 10.63014581],\n# [13.45362405, 16.2788206]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL2',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n#print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n#print(reduced)\n#[[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_keep_dims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_l2_keep_dims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceL2',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)\n# print(data)\n#[[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]\n\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n#[[[2.23606798], [5.]]\n# [[7.81024968], [10.63014581]]\n# [[13.45362405], [16.2788206 ]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_negative_axes_keep_dims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sqrt(np.sum(\n a=np.square(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_l2_negative_axes_keep_dims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceLogSum", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor.", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the log sum of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'ReduceLogSum',\n inputs=['data'],\n outputs=[\"reduced\"]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, keepdims=True))\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_default')", + "summary": "keepdims" + }, + { + "code": "node = onnx.helper.make_node(\n 'ReduceLogSum',\n inputs=['data'],\n outputs=[\"reduced\"],\n axes=[-2]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, axis=(-2), keepdims=True))\n# print(reduced)\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_negative_axes')", + "summary": "negative_axes_keepdims" + }, + { + "code": "node = onnx.helper.make_node(\n 'ReduceLogSum',\n inputs=['data'],\n outputs=[\"reduced\"],\n axes=[2, 1],\n keepdims=0\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, axis=(2, 1), keepdims=False))\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_desc_axes')\n\nnode = onnx.helper.make_node(\n 'ReduceLogSum',\n inputs=['data'],\n outputs=[\"reduced\"],\n axes=[0, 1],\n keepdims=0\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, axis=(0, 1), keepdims=False))\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_asc_axes')", + "summary": "nokeepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceLogSum", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the log sum of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'ReduceLogSum',\n inputs=['data'],\n outputs=[\"reduced\"]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, keepdims=True))\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_default')", + "summary": "keepdims" + }, + { + "code": "node = onnx.helper.make_node(\n 'ReduceLogSum',\n inputs=['data'],\n outputs=[\"reduced\"],\n axes=[-2]\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, axis=(-2), keepdims=True))\n# print(reduced)\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_negative_axes')", + "summary": "negative_axes_keepdims" + }, + { + "code": "node = onnx.helper.make_node(\n 'ReduceLogSum',\n inputs=['data'],\n outputs=[\"reduced\"],\n axes=[2, 1],\n keepdims=0\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, axis=(2, 1), keepdims=False))\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_desc_axes')\n\nnode = onnx.helper.make_node(\n 'ReduceLogSum',\n inputs=['data'],\n outputs=[\"reduced\"],\n axes=[0, 1],\n keepdims=0\n)\ndata = np.random.ranf([3, 4, 5]).astype(np.float32)\nreduced = np.log(np.sum(data, axis=(0, 1), keepdims=False))\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_asc_axes')", + "summary": "nokeepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceLogSumExp", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor.", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the log sum exponent of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceLogSumExp',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=axes,\n keepdims=keepdims == 1))\n# print(reduced)\n# [[[60.00671387]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=axes,\n keepdims=keepdims == 1))\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ReduceLogSumExp',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32)\nreduced = np.log(np.sum(\n np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n#[[20., 2.31326175]\n# [40.00004578, 2.31326175]\n# [60.00671387, 2.31326175]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.log(np.sum(\n np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ReduceLogSumExp',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=tuple(axes),\n keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=tuple(axes),\n keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ReduceLogSumExp',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=tuple(axes),\n keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=tuple(axes),\n keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceLogSumExp", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the log sum exponent of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceLogSumExp',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=axes,\n keepdims=keepdims == 1))\n# print(reduced)\n# [[[60.00671387]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=axes,\n keepdims=keepdims == 1))\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\nnode = onnx.helper.make_node(\n 'ReduceLogSumExp',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32)\nreduced = np.log(np.sum(\n np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n# print(reduced)\n#[[20., 2.31326175]\n# [40.00004578, 2.31326175]\n# [60.00671387, 2.31326175]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.log(np.sum(\n np.exp(data), axis=tuple(axes), keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ReduceLogSumExp',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=tuple(axes),\n keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=tuple(axes),\n keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ReduceLogSumExp',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=tuple(axes),\n keepdims=keepdims == 1))\n# print(reduced)\n# [[[20., 2.31326175]]\n# [[40.00004578, 2.31326175]]\n# [[60.00671387, 2.31326175]]]\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.log(np.sum(np.exp(data),\n axis=tuple(axes),\n keepdims=keepdims == 1))\n\nexpect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_exp_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceMax", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor.", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the max of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n[[[60.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_default_axes_keepdim_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceMax", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the max of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n[[[60.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_default_axes_keepdim_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceMax", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the max of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n[[[60.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_default_axes_keepdim_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMax',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_max_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint8)", + "tensor(int8)" + ], + "description": "Constrain input and output types to high-precision and 8 bit numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceMean", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor.", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the mean of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMean',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.mean(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[18.25]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceMean',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[12.5, 1.5]\n# [35., 1.5]\n# [57.5, 1.5]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMean',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMean',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceMean", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the mean of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMean',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.mean(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[18.25]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceMean',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[12.5, 1.5]\n# [35., 1.5]\n# [57.5, 1.5]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMean',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMean',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[12.5, 1.5]]\n# [[35., 1.5]]\n# [[57.5, 1.5]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.mean(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_mean_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceMin", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor.", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the min of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMin',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[1.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceMin',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMin', inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMin', inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceMin", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the min of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMin',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[1.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceMin',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMin', inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMin', inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceMin", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the min of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMin',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[1.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceMin',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMin', inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceMin', inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_min_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(uint8)", + "tensor(int8)" + ], + "description": "Constrain input and output types to high-precision and 8 bit numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceProd", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor.", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the product of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceProd',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[4.790016e+08]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceProd',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[3., 8.]\n# [35., 48.]\n# [99., 120.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceProd',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceProd',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceProd", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the product of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceProd',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[4.790016e+08]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=axes, keepdims=keepdims == 1)\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceProd',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[3., 8.]\n# [35., 48.]\n# [99., 120.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceProd',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceProd',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[3., 8.]]\n# [[35., 48.]]\n# [[99., 120.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.prod(data, axis=tuple(axes), keepdims=keepdims == 1)\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_prod_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceSum", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor.", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the sum of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSum',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[78.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceSum',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[4., 6.]\n# [12., 14.]\n# [20., 22.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSum',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[4., 6.]]\n# [[12., 14.]]\n# [[20., 22.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSum',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[4., 6.]]\n# [[12., 14.]]\n# [[20., 22.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceSum", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the sum of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSum',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(data, axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[78.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceSum',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[4., 6.]\n# [12., 14.]\n# [20., 22.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSum',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[4., 6.]]\n# [[12., 14.]]\n# [[20., 22.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSum',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[4., 6.]]\n# [[12., 14.]]\n# [[20., 22.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceSumSquare", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor.", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the sum square of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSumSquare',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(np.square(data), axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[650.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceSumSquare',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[10., 20.]\n# [74., 100.]\n# [202., 244.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSumSquare',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[10., 20.]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSumSquare',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[10., 20.s]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "ReduceSumSquare", + "schema": { + "attributes": [ + { + "description": "A list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "default": 1, + "description": "Keep the reduced dimension or not, default 1 mean keep reduced dimension.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Computes the sum square of the input tensor's element along the provided axes. The resulted\ntensor has the same rank as the input if keepdims equal 1. If keepdims equal 0, then\nthe resulted tensor have the reduced dimension pruned.\n\nThe above behavior is similar to numpy, with the exception that numpy default keepdims to\nFalse instead of True.", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSumSquare',\n inputs=['data'],\n outputs=['reduced'],\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(np.square(data), axis=axes, keepdims=keepdims == 1)\n#print(reduced)\n#[[[650.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_default_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=axes, keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_default_axes_keepdims_random')", + "summary": "default_axes_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n 'ReduceSumSquare',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[10., 20.]\n# [74., 100.]\n# [202., 244.]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_do_not_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_do_not_keepdims_random')", + "summary": "do_not_keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [1]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSumSquare',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n#print(reduced)\n#[[[10., 20.]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_keepdims_random')", + "summary": "keepdims" + }, + { + "code": "shape = [3, 2, 2]\naxes = [-2]\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n 'ReduceSumSquare',\n inputs=['data'],\n outputs=['reduced'],\n axes=axes,\n keepdims=keepdims)\n\ndata = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n#[[[10., 20.s]]\n# [[74., 100.]]\n# [[202., 244.]]]\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_negative_axes_keepdims_example')\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.sum(np.square(data), axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(node, inputs=[data], outputs=[reduced], name='test_reduce_sum_square_negative_axes_keepdims_random')", + "summary": "negative_axes_keepdims" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reduced output tensor.", + "name": "reduced", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Relu", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "category": "Activation", + "description": "Relu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = max(0, x), is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Relu',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_relu')", + "summary": "relu" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Relu", + "schema": { + "category": "Activation", + "description": "Relu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = max(0, x), is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Relu',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_relu')", + "summary": "relu" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Reshape", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + }, + { + "description": "New shape", + "name": "shape", + "required": false, + "type": "int64[]" + } + ], + "category": "Shape", + "description": "Reshape the input tensor similar to numpy.reshape.\nIt takes a tensor as input and an argument `shape`. It outputs the reshaped tensor.\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is unchanged (i.e. taken\nfrom the input tensor).", + "domain": "ai.onnx", + "examples": [ + { + "code": "original_shape = [2, 3, 4]\ntest_cases = {\n 'reordered_all_dims': np.array([4, 2, 3], dtype=np.int64),\n 'reordered_last_dims': np.array([2, 4, 3], dtype=np.int64),\n 'reduced_dims': np.array([2, 12], dtype=np.int64),\n 'extended_dims': np.array([2, 3, 2, 2], dtype=np.int64),\n 'one_dim': np.array([24], dtype=np.int64),\n 'negative_dim': np.array([2, -1, 2], dtype=np.int64),\n 'negative_extended_dims': np.array([-1, 2, 3, 4], dtype=np.int64),\n 'zero_dim': np.array([2, 0, 4, 1], dtype=np.int64),\n 'zero_and_negative_dim': np.array([2, 0, 1, -1], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n 'Reshape',\n inputs=['data', 'shape'],\n outputs=['reshaped'],\n )\n\n reshaped = reshape_reference_implementation(data, shape)\n\n expect(node, inputs=[data, shape], outputs=[reshaped],\n name='test_reshape_' + test_name)", + "summary": "reshape" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reshaped data.", + "name": "reshaped", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Reshape", + "schema": { + "category": "Shape", + "description": "Reshape the input tensor similar to numpy.reshape.\nFirst input is the data tensor, second input is a shape tensor which specifies the output shape. It outputs the reshaped tensor.\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is unchanged (i.e. taken\nfrom the input tensor).", + "domain": "ai.onnx", + "examples": [ + { + "code": "original_shape = [2, 3, 4]\ntest_cases = {\n 'reordered_all_dims': np.array([4, 2, 3], dtype=np.int64),\n 'reordered_last_dims': np.array([2, 4, 3], dtype=np.int64),\n 'reduced_dims': np.array([2, 12], dtype=np.int64),\n 'extended_dims': np.array([2, 3, 2, 2], dtype=np.int64),\n 'one_dim': np.array([24], dtype=np.int64),\n 'negative_dim': np.array([2, -1, 2], dtype=np.int64),\n 'negative_extended_dims': np.array([-1, 2, 3, 4], dtype=np.int64),\n 'zero_dim': np.array([2, 0, 4, 1], dtype=np.int64),\n 'zero_and_negative_dim': np.array([2, 0, 1, -1], dtype=np.int64),\n}\ndata = np.random.random_sample(original_shape).astype(np.float32)\n\nfor test_name, shape in test_cases.items():\n node = onnx.helper.make_node(\n 'Reshape',\n inputs=['data', 'shape'],\n outputs=['reshaped'],\n )\n\n reshaped = reshape_reference_implementation(data, shape)\n\n expect(node, inputs=[data, shape], outputs=[reshaped],\n name='test_reshape_' + test_name)", + "summary": "reshape" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + }, + { + "description": "Specified shape for output.", + "name": "shape", + "type": "tensor(int64)" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Reshaped data.", + "name": "reshaped", + "type": "T" + } + ], + "since_version": 5, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Resize", + "schema": { + "attributes": [ + { + "default": "nearest", + "description": "Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc)", + "name": "mode", + "required": false, + "type": "string" + } + ], + "description": "Resize the input tensor.\nEach dimension value of the output tensor is:\n output_dimension = floor(input_dimension * scale).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.47119141 2.78125 4.08251953]\n# [ 6.71142578 8.02148438 9.32275391]\n# [11.91650391 13.2265625 14.52783203]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_cubic')", + "summary": "resize_downsample_scales_cubic" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n cubic_coeff_a=-0.5,\n exclude_outside=True\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.36812675 2.6695014 4.0133367 ]\n# [ 6.57362535 7.875 9.2188353 ]\n# [11.94896657 13.25034122 14.59417652]]]]\noutput = interpolate_nd(data, lambda x: cubic_coeffs(x, A=-0.5), scale_factors=scales,\n exclude_outside=True).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_cubic_A_n0p5_exclude_outside')", + "summary": "resize_downsample_scales_cubic_A_n0p5_exclude_outside" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n coordinate_transformation_mode='align_corners'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1. 2.39519159 3.79038317]\n# [ 6.58076634 7.97595793 9.37114951]\n# [12.16153268 13.55672427 14.95191585]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, scale_factors=scales, coordinate_transformation_mode='align_corners').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_cubic_align_corners')", + "summary": "resize_downsample_scales_cubic_align_corners" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='linear',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[2.6666665 4.3333331]]]]\noutput = interpolate_nd(\n data, linear_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_linear')", + "summary": "resize_downsample_scales_linear" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='linear',\n coordinate_transformation_mode='align_corners'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.142857]]]]\noutput = interpolate_nd(\n data, linear_coeffs, scale_factors=scales, coordinate_transformation_mode='align_corners').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_linear_align_corners')", + "summary": "resize_downsample_scales_linear_align_corners" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data, nearest_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_nearest')", + "summary": "resize_downsample_scales_nearest" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='cubic',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.63078704 3.00462963 4.37847222]\n# [ 7.12615741 8.5 9.87384259]\n# [12.62152778 13.99537037 15.36921296]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, output_size=sizes).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_downsample_sizes_cubic')", + "summary": "resize_downsample_sizes_cubic" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='linear',\n coordinate_transformation_mode='pytorch_half_pixel'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 3, 1], dtype=np.int64)\n\n# [[[[ 1.6666666]\n# [ 7. ]\n# [12.333333 ]]]]\noutput = interpolate_nd(\n data, linear_coeffs, output_size=sizes, coordinate_transformation_mode='pytorch_half_pixel').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_downsample_sizes_linear_pytorch_half_pixel')", + "summary": "resize_downsample_sizes_linear_pytorch_half_pixel" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 1, 3], dtype=np.int64)\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data, nearest_coeffs, output_size=sizes).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_downsample_sizes_nearest')", + "summary": "resize_downsample_sizes_nearest" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n coordinate_transformation_mode='tf_half_pixel_for_nn'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 3, 2], dtype=np.int64)\n\n# [[[[ 6. 8.]\n# [10. 12.]\n# [14. 16.]]]]\noutput = interpolate_nd(\n data, nearest_coeffs, output_size=sizes, coordinate_transformation_mode='tf_half_pixel_for_nn').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn')", + "summary": "resize_downsample_sizes_nearest_tf_half_pixel_for_nn" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='linear',\n coordinate_transformation_mode='tf_crop_and_resize'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 0.6, 0.8], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(data, linear_coeffs, output_size=sizes, roi=roi,\n coordinate_transformation_mode='tf_crop_and_resize').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_tf_crop_and_resize')", + "summary": "resize_tf_crop_and_resize" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='linear',\n coordinate_transformation_mode='tf_crop_and_resize',\n extrapolation_value=10.0\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 1.2, 1.7], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 10. 10. ]\n# [12.400001 10. 10. ]\n# [10. 10. 10. ]]]]\noutput = interpolate_nd(data, linear_coeffs, output_size=sizes, roi=roi,\n coordinate_transformation_mode='tf_crop_and_resize', extrapolation_value=10.0).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_tf_crop_and_resize')", + "summary": "resize_tf_crop_and_resize_extrapolation_value" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.47265625 0.76953125 1.24609375 1.875 2.28125\n# 2.91015625 3.38671875 3.68359375]\n# [ 1.66015625 1.95703125 2.43359375 3.0625 3.46875\n# 4.09765625 4.57421875 4.87109375]\n# [ 3.56640625 3.86328125 4.33984375 4.96875 5.375\n# 6.00390625 6.48046875 6.77734375]\n# [ 6.08203125 6.37890625 6.85546875 7.484375 7.890625\n# 8.51953125 8.99609375 9.29296875]\n# [ 7.70703125 8.00390625 8.48046875 9.109375 9.515625\n# 10.14453125 10.62109375 10.91796875]\n# [10.22265625 10.51953125 10.99609375 11.625 12.03125\n# 12.66015625 13.13671875 13.43359375]\n# [12.12890625 12.42578125 12.90234375 13.53125 13.9375\n# 14.56640625 15.04296875 15.33984375]\n# [13.31640625 13.61328125 14.08984375 14.71875 15.125\n# 15.75390625 16.23046875 16.52734375]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_cubic')", + "summary": "resize_upsample_scales_cubic" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n cubic_coeff_a=-0.5,\n exclude_outside=True\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.55882353 0.81494204 1.35698249 1.89705882 2.39705882\n# 2.93713516 3.47917561 3.73529412]\n# [ 1.58329755 1.83941606 2.38145651 2.92153285 3.42153285\n# 3.96160918 4.50364964 4.75976814]\n# [ 3.75145936 4.00757787 4.54961832 5.08969466 5.58969466\n# 6.12977099 6.67181144 6.92792995]\n# [ 5.91176471 6.16788321 6.70992366 7.25 7.75\n# 8.29007634 8.83211679 9.08823529]\n# [ 7.91176471 8.16788321 8.70992366 9.25 9.75\n# 10.29007634 10.83211679 11.08823529]\n# [10.07207005 10.32818856 10.87022901 11.41030534 11.91030534\n# 12.45038168 12.99242213 13.24854064]\n# [12.24023186 12.49635036 13.03839082 13.57846715 14.07846715\n# 14.61854349 15.16058394 15.41670245]\n# [13.26470588 13.52082439 14.06286484 14.60294118 15.10294118\n# 15.64301751 16.18505796 16.44117647]]]]\noutput = interpolate_nd(data, lambda x: cubic_coeffs(x, A=-0.5), scale_factors=scales,\n exclude_outside=True).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_cubic_A_n0p5_exclude_outside')", + "summary": "resize_upsample_scales_cubic_A_n0p5_exclude_outside" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n coordinate_transformation_mode='align_corners'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.34110787 1.80029155 2.32944606 2.67055394\n# 3.19970845 3.65889213 4. ]\n# [ 2.36443149 2.70553936 3.16472303 3.69387755 4.03498542\n# 4.56413994 5.02332362 5.36443149]\n# [ 4.20116618 4.54227405 5.00145773 5.53061224 5.87172012\n# 6.40087464 6.86005831 7.20116618]\n# [ 6.31778426 6.65889213 7.1180758 7.64723032 7.98833819\n# 8.51749271 8.97667638 9.31778426]\n# [ 7.68221574 8.02332362 8.48250729 9.01166181 9.35276968\n# 9.8819242 10.34110787 10.68221574]\n# [ 9.79883382 10.13994169 10.59912536 11.12827988 11.46938776\n# 11.99854227 12.45772595 12.79883382]\n# [11.63556851 11.97667638 12.43586006 12.96501458 13.30612245\n# 13.83527697 14.29446064 14.63556851]\n# [13. 13.34110787 13.80029155 14.32944606 14.67055394\n# 15.19970845 15.65889213 16. ]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, scale_factors=scales, coordinate_transformation_mode='align_corners').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_cubic_align_corners')", + "summary": "resize_upsample_scales_cubic_align_corners" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n coordinate_transformation_mode='asymmetric'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\nroi = np.array([], dtype=np.float32)\n\n# [[[[ 1. 1.40625 2. 2.5 3. 3.59375 4.\n# 4.09375]\n# [ 2.625 3.03125 3.625 4.125 4.625 5.21875 5.625\n# 5.71875]\n# [ 5. 5.40625 6. 6.5 7. 7.59375 8.\n# 8.09375]\n# [ 7. 7.40625 8. 8.5 9. 9.59375 10.\n# 10.09375]\n# [ 9. 9.40625 10. 10.5 11. 11.59375 12.\n# 12.09375]\n# [11.375 11.78125 12.375 12.875 13.375 13.96875 14.375\n# 14.46875]\n# [13. 13.40625 14. 14.5 15. 15.59375 16.\n# 16.09375]\n# [13.375 13.78125 14.375 14.875 15.375 15.96875 16.375\n# 16.46875]]]]\noutput = interpolate_nd(data, lambda x: cubic_coeffs(x, A=-0.75), scale_factors=scales,\n coordinate_transformation_mode='asymmetric').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_cubic_asymmetric')", + "summary": "resize_upsample_scales_cubic_asymmetric" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='linear',\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.25 1.75 2. ]\n# [1.5 1.75 2.25 2.5 ]\n# [2.5 2.75 3.25 3.5 ]\n# [3. 3.25 3.75 4. ]]]]\noutput = interpolate_nd(\n data, linear_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_linear')", + "summary": "resize_upsample_scales_linear" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='linear',\n coordinate_transformation_mode='align_corners'\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.33333333 1.66666667 2. ]\n# [1.66666667 2. 2.33333333 2.66666667]\n# [2.33333333 2.66666667 3. 3.33333333]\n# [3. 3.33333333 3.66666667 4. ]]]]\noutput = interpolate_nd(\n data, linear_coeffs, scale_factors=scales, coordinate_transformation_mode='align_corners').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_linear_align_corners')", + "summary": "resize_upsample_scales_linear_align_corners" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, nearest_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_nearest')", + "summary": "resize_upsample_scales_nearest" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='cubic',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 9, 10], dtype=np.int64)\n\n# [[[[ 0.45507922 0.64057922 0.97157922 1.42257922 1.90732922\n# 2.22332922 2.70807922 3.15907922 3.49007922 3.67557922]\n# [ 1.39437963 1.57987963 1.91087963 2.36187963 2.84662963\n# 3.16262963 3.64737963 4.09837963 4.42937963 4.61487963]\n# [ 2.95130693 3.13680693 3.46780693 3.91880693 4.40355693\n# 4.71955693 5.20430693 5.65530693 5.98630693 6.17180693]\n# [ 5.20525069 5.39075069 5.72175069 6.17275069 6.65750069\n# 6.97350069 7.45825069 7.90925069 8.24025069 8.42575069]\n# [ 6.88975 7.07525 7.40625 7.85725 8.342\n# 8.658 9.14275 9.59375 9.92475 10.11025 ]\n# [ 8.57424931 8.75974931 9.09074931 9.54174931 10.02649931\n# 10.34249931 10.82724931 11.27824931 11.60924931 11.79474931]\n# [10.82819307 11.01369307 11.34469307 11.79569307 12.28044307\n# 12.59644307 13.08119307 13.53219307 13.86319307 14.04869307]\n# [12.38512037 12.57062037 12.90162037 13.35262037 13.83737037\n# 14.15337037 14.63812037 15.08912037 15.42012037 15.60562037]\n# [13.32442078 13.50992078 13.84092078 14.29192078 14.77667078\n# 15.09267078 15.57742078 16.02842078 16.35942078 16.54492078]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, output_size=sizes).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_upsample_sizes_cubic')", + "summary": "resize_upsample_sizes_cubic" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, nearest_coeffs, output_size=sizes).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_upsample_sizes_nearest')", + "summary": "resize_upsample_sizes_nearest" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n coordinate_transformation_mode='half_pixel',\n nearest_mode='ceil'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data, lambda x: nearest_coeffs(x, mode='ceil'), output_size=sizes).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_upsample_sizes_nearest_ceil_half_pixel')", + "summary": "resize_upsample_sizes_nearest_ceil_half_pixel" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n coordinate_transformation_mode='align_corners',\n nearest_mode='floor'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [13. 13. 13. 14. 14. 15. 15. 16.]]]]\noutput = interpolate_nd(\n data, lambda x: nearest_coeffs(x, mode='floor'), output_size=sizes, coordinate_transformation_mode='align_corners').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_upsample_sizes_nearest_floor_align_corners')", + "summary": "resize_upsample_sizes_nearest_floor_align_corners" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n coordinate_transformation_mode='asymmetric',\n nearest_mode='round_prefer_ceil'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data, lambda x: nearest_coeffs(x, mode='round_prefer_ceil'),\n output_size=sizes, coordinate_transformation_mode='asymmetric').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric')", + "summary": "resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric" + } + ], + "inputs": [ + { + "description": "N-D tensor", + "name": "X", + "type": "T" + }, + { + "description": "The scale array along each dimension. It takes value greater than 0. If it's less than 1, it's sampling down, otherwise, it's upsampling. The number of elements of 'scales' should be the same as the rank of input 'X'.", + "name": "scales", + "type": "tensor(float)" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "N-D tensor after resizing", + "name": "Y", + "type": "T" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input 'X' and output 'Y' to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Resize", + "schema": { + "attributes": [ + { + "default": "half_pixel", + "description": "\nThis attribute describes how to transform the coordinate in the resized tensor to the coordinate in the original tensor.
    \n\nThe coordinate of each dimension is transformed individually. Let's describe a case using axis x as an example. \nDenote x_resized as the coordinate of axis x in the resized tensor, x_original as the coordinate of axis x in the original tensor, length_original as the length of the original tensor in axis x, length_resized as the length of the resized tensor in axis x, roi_x = (start_x, end_x) of the axis x in input \"roi\", scale = length_resized / length_original,
    \n\nif coordinate_transformation_mode is \"half_pixel\",
    \nx_original = (x_resized + 0.5) / scale - 0.5,
    \n\nif coordinate_transformation_mode is \"pytorch_half_pixel\",
    \nx_original = length_resized > 1 ? (x_resized + 0.5) / scale - 0.5 : 0,
    \n\nif coordinate_transformation_mode is \"align_corners\",
    \nx_original = x_resized * (length_original - 1) / (length_resized - 1),
    \n\nif coordinate_transformation_mode is \"asymmetric\",
    \nx_original = x_resized / scale,
    \n\nif coordinate_transformation_mode is \"tf_half_pixel_for_nn\",
    \nx_original = (x_resized + 0.5) / scale,
    \n\nif coordinate_transformation_mode is \"tf_crop_and_resize\",
    \nx_original = length_resized > 1 ? start_x * (length_original - 1) + x_resized * (end_x - start_x) * (length_original - 1) / (length_resized - 1) : 0.5 * (start_x + end_x) * (length_original - 1).", + "name": "coordinate_transformation_mode", + "required": false, + "type": "string" + }, + { + "default": -0.75, + "description": "The coefficient 'a' used in cubic interpolation. Two common choice are -0.5 (in some cases of TensorFlow) and -0.75 (in PyTorch). Check out Equation (4) in https://ieeexplore.ieee.org/document/1163711 for the details. This attribute is valid only if \"mode\" is \"cubic\".", + "name": "cubic_coeff_a", + "required": false, + "type": "float32" + }, + { + "description": "If set to 1, the weight of sampling locations outside the tensor will be set to 0 and the weight will be renormalized so that their sum is 1.0. The default value is 0.", + "name": "exclude_outside", + "required": false, + "type": "int64" + }, + { + "description": "When coordinate_transformation_mode is \"tf_crop_and_resize\" and x_original is outside the range [0, length_original - 1], this value is used as the corresponding output value. Default is 0.0f.", + "name": "extrapolation_value", + "required": false, + "type": "float32" + }, + { + "default": "nearest", + "description": "Three interpolation modes: nearest (default), linear and cubic. The \"linear\" mode includes linear interpolation for 1D tensor and N-linear interpolation for N-D tensor (for example, bilinear interpolation for 2D tensor). The \"cubic\" mode includes cubic interpolation for 1D tensor and N-cubic interpolation for N-D tensor (for example, bicubic interpolation for 2D tensor).", + "name": "mode", + "required": false, + "type": "string" + }, + { + "default": "round_prefer_floor", + "description": "Four modes: round_prefer_floor (default, as known as round half down), round_prefer_ceil (as known as round half up), floor, ceil. Only used by nearest interpolation. It indicates how to get \"nearest\" pixel in input tensor from x_original, so this attribute is valid only if \"mode\" is \"nearest\".", + "name": "nearest_mode", + "required": false, + "type": "string" + } + ], + "description": "Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor.\nEach dimension value of the output tensor is:\n output_dimension = floor(input_dimension * (roi_end - roi_start) * scale) if input \\\"sizes\\\" is not specified.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.47119141 2.78125 4.08251953]\n# [ 6.71142578 8.02148438 9.32275391]\n# [11.91650391 13.2265625 14.52783203]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_cubic')", + "summary": "resize_downsample_scales_cubic" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n cubic_coeff_a=-0.5,\n exclude_outside=True\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1.36812675 2.6695014 4.0133367 ]\n# [ 6.57362535 7.875 9.2188353 ]\n# [11.94896657 13.25034122 14.59417652]]]]\noutput = interpolate_nd(data, lambda x: cubic_coeffs(x, A=-0.5), scale_factors=scales,\n exclude_outside=True).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_cubic_A_n0p5_exclude_outside')", + "summary": "resize_downsample_scales_cubic_A_n0p5_exclude_outside" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n coordinate_transformation_mode='align_corners'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)\n\n# [[[[ 1. 2.39519159 3.79038317]\n# [ 6.58076634 7.97595793 9.37114951]\n# [12.16153268 13.55672427 14.95191585]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, scale_factors=scales, coordinate_transformation_mode='align_corners').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_cubic_align_corners')", + "summary": "resize_downsample_scales_cubic_align_corners" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='linear',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[2.6666665 4.3333331]]]]\noutput = interpolate_nd(\n data, linear_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_linear')", + "summary": "resize_downsample_scales_linear" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='linear',\n coordinate_transformation_mode='align_corners'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.142857]]]]\noutput = interpolate_nd(\n data, linear_coeffs, scale_factors=scales, coordinate_transformation_mode='align_corners').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_linear_align_corners')", + "summary": "resize_downsample_scales_linear_align_corners" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data, nearest_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_downsample_scales_nearest')", + "summary": "resize_downsample_scales_nearest" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='cubic',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 1.63078704 3.00462963 4.37847222]\n# [ 7.12615741 8.5 9.87384259]\n# [12.62152778 13.99537037 15.36921296]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, output_size=sizes).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_downsample_sizes_cubic')", + "summary": "resize_downsample_sizes_cubic" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='linear',\n coordinate_transformation_mode='pytorch_half_pixel'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 3, 1], dtype=np.int64)\n\n# [[[[ 1.6666666]\n# [ 7. ]\n# [12.333333 ]]]]\noutput = interpolate_nd(\n data, linear_coeffs, output_size=sizes, coordinate_transformation_mode='pytorch_half_pixel').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_downsample_sizes_linear_pytorch_half_pixel')", + "summary": "resize_downsample_sizes_linear_pytorch_half_pixel" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 1, 3], dtype=np.int64)\n\n# [[[[1. 3.]]]]\noutput = interpolate_nd(\n data, nearest_coeffs, output_size=sizes).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_downsample_sizes_nearest')", + "summary": "resize_downsample_sizes_nearest" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n coordinate_transformation_mode='tf_half_pixel_for_nn'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 3, 2], dtype=np.int64)\n\n# [[[[ 6. 8.]\n# [10. 12.]\n# [14. 16.]]]]\noutput = interpolate_nd(\n data, nearest_coeffs, output_size=sizes, coordinate_transformation_mode='tf_half_pixel_for_nn').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn')", + "summary": "resize_downsample_sizes_nearest_tf_half_pixel_for_nn" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='linear',\n coordinate_transformation_mode='tf_crop_and_resize'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 0.6, 0.8], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 7.9 8.2 ]\n# [ 8.8 9.1 9.400001 ]\n# [10. 10.3 10.6 ]]]]\noutput = interpolate_nd(data, linear_coeffs, output_size=sizes, roi=roi,\n coordinate_transformation_mode='tf_crop_and_resize').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_tf_crop_and_resize')", + "summary": "resize_tf_crop_and_resize" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='linear',\n coordinate_transformation_mode='tf_crop_and_resize',\n extrapolation_value=10.0\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\n# Note: for some rois, the result may be different with that of TF for inaccurate floating point\nroi = np.array([0, 0, 0.4, 0.6, 1, 1, 1.2, 1.7], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 3, 3], dtype=np.int64)\n\n# [[[[ 7.6000004 10. 10. ]\n# [12.400001 10. 10. ]\n# [10. 10. 10. ]]]]\noutput = interpolate_nd(data, linear_coeffs, output_size=sizes, roi=roi,\n coordinate_transformation_mode='tf_crop_and_resize', extrapolation_value=10.0).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_tf_crop_and_resize')", + "summary": "resize_tf_crop_and_resize_extrapolation_value" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.47265625 0.76953125 1.24609375 1.875 2.28125\n# 2.91015625 3.38671875 3.68359375]\n# [ 1.66015625 1.95703125 2.43359375 3.0625 3.46875\n# 4.09765625 4.57421875 4.87109375]\n# [ 3.56640625 3.86328125 4.33984375 4.96875 5.375\n# 6.00390625 6.48046875 6.77734375]\n# [ 6.08203125 6.37890625 6.85546875 7.484375 7.890625\n# 8.51953125 8.99609375 9.29296875]\n# [ 7.70703125 8.00390625 8.48046875 9.109375 9.515625\n# 10.14453125 10.62109375 10.91796875]\n# [10.22265625 10.51953125 10.99609375 11.625 12.03125\n# 12.66015625 13.13671875 13.43359375]\n# [12.12890625 12.42578125 12.90234375 13.53125 13.9375\n# 14.56640625 15.04296875 15.33984375]\n# [13.31640625 13.61328125 14.08984375 14.71875 15.125\n# 15.75390625 16.23046875 16.52734375]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_cubic')", + "summary": "resize_upsample_scales_cubic" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n cubic_coeff_a=-0.5,\n exclude_outside=True\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 0.55882353 0.81494204 1.35698249 1.89705882 2.39705882\n# 2.93713516 3.47917561 3.73529412]\n# [ 1.58329755 1.83941606 2.38145651 2.92153285 3.42153285\n# 3.96160918 4.50364964 4.75976814]\n# [ 3.75145936 4.00757787 4.54961832 5.08969466 5.58969466\n# 6.12977099 6.67181144 6.92792995]\n# [ 5.91176471 6.16788321 6.70992366 7.25 7.75\n# 8.29007634 8.83211679 9.08823529]\n# [ 7.91176471 8.16788321 8.70992366 9.25 9.75\n# 10.29007634 10.83211679 11.08823529]\n# [10.07207005 10.32818856 10.87022901 11.41030534 11.91030534\n# 12.45038168 12.99242213 13.24854064]\n# [12.24023186 12.49635036 13.03839082 13.57846715 14.07846715\n# 14.61854349 15.16058394 15.41670245]\n# [13.26470588 13.52082439 14.06286484 14.60294118 15.10294118\n# 15.64301751 16.18505796 16.44117647]]]]\noutput = interpolate_nd(data, lambda x: cubic_coeffs(x, A=-0.5), scale_factors=scales,\n exclude_outside=True).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_cubic_A_n0p5_exclude_outside')", + "summary": "resize_upsample_scales_cubic_A_n0p5_exclude_outside" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n coordinate_transformation_mode='align_corners'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[ 1. 1.34110787 1.80029155 2.32944606 2.67055394\n# 3.19970845 3.65889213 4. ]\n# [ 2.36443149 2.70553936 3.16472303 3.69387755 4.03498542\n# 4.56413994 5.02332362 5.36443149]\n# [ 4.20116618 4.54227405 5.00145773 5.53061224 5.87172012\n# 6.40087464 6.86005831 7.20116618]\n# [ 6.31778426 6.65889213 7.1180758 7.64723032 7.98833819\n# 8.51749271 8.97667638 9.31778426]\n# [ 7.68221574 8.02332362 8.48250729 9.01166181 9.35276968\n# 9.8819242 10.34110787 10.68221574]\n# [ 9.79883382 10.13994169 10.59912536 11.12827988 11.46938776\n# 11.99854227 12.45772595 12.79883382]\n# [11.63556851 11.97667638 12.43586006 12.96501458 13.30612245\n# 13.83527697 14.29446064 14.63556851]\n# [13. 13.34110787 13.80029155 14.32944606 14.67055394\n# 15.19970845 15.65889213 16. ]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, scale_factors=scales, coordinate_transformation_mode='align_corners').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_cubic_align_corners')", + "summary": "resize_upsample_scales_cubic_align_corners" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='cubic',\n coordinate_transformation_mode='asymmetric'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\nroi = np.array([], dtype=np.float32)\n\n# [[[[ 1. 1.40625 2. 2.5 3. 3.59375 4.\n# 4.09375]\n# [ 2.625 3.03125 3.625 4.125 4.625 5.21875 5.625\n# 5.71875]\n# [ 5. 5.40625 6. 6.5 7. 7.59375 8.\n# 8.09375]\n# [ 7. 7.40625 8. 8.5 9. 9.59375 10.\n# 10.09375]\n# [ 9. 9.40625 10. 10.5 11. 11.59375 12.\n# 12.09375]\n# [11.375 11.78125 12.375 12.875 13.375 13.96875 14.375\n# 14.46875]\n# [13. 13.40625 14. 14.5 15. 15.59375 16.\n# 16.09375]\n# [13.375 13.78125 14.375 14.875 15.375 15.96875 16.375\n# 16.46875]]]]\noutput = interpolate_nd(data, lambda x: cubic_coeffs(x, A=-0.75), scale_factors=scales,\n coordinate_transformation_mode='asymmetric').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_cubic_asymmetric')", + "summary": "resize_upsample_scales_cubic_asymmetric" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='linear',\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.25 1.75 2. ]\n# [1.5 1.75 2.25 2.5 ]\n# [2.5 2.75 3.25 3.5 ]\n# [3. 3.25 3.75 4. ]]]]\noutput = interpolate_nd(\n data, linear_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_linear')", + "summary": "resize_upsample_scales_linear" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='linear',\n coordinate_transformation_mode='align_corners'\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)\n\n# [[[[1. 1.33333333 1.66666667 2. ]\n# [1.66666667 2. 2.33333333 2.66666667]\n# [2.33333333 2.66666667 3. 3.33333333]\n# [3. 3.33333333 3.66666667 4. ]]]]\noutput = interpolate_nd(\n data, linear_coeffs, scale_factors=scales, coordinate_transformation_mode='align_corners').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_linear_align_corners')", + "summary": "resize_upsample_scales_linear_align_corners" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\n# [[[[1. 1. 1. 2. 2. 2.]\n# [1. 1. 1. 2. 2. 2.]\n# [3. 3. 3. 4. 4. 4.]\n# [3. 3. 3. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, nearest_coeffs, scale_factors=scales).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales], outputs=[output],\n name='test_resize_upsample_scales_nearest')", + "summary": "resize_upsample_scales_nearest" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='cubic',\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 9, 10], dtype=np.int64)\n\n# [[[[ 0.45507922 0.64057922 0.97157922 1.42257922 1.90732922\n# 2.22332922 2.70807922 3.15907922 3.49007922 3.67557922]\n# [ 1.39437963 1.57987963 1.91087963 2.36187963 2.84662963\n# 3.16262963 3.64737963 4.09837963 4.42937963 4.61487963]\n# [ 2.95130693 3.13680693 3.46780693 3.91880693 4.40355693\n# 4.71955693 5.20430693 5.65530693 5.98630693 6.17180693]\n# [ 5.20525069 5.39075069 5.72175069 6.17275069 6.65750069\n# 6.97350069 7.45825069 7.90925069 8.24025069 8.42575069]\n# [ 6.88975 7.07525 7.40625 7.85725 8.342\n# 8.658 9.14275 9.59375 9.92475 10.11025 ]\n# [ 8.57424931 8.75974931 9.09074931 9.54174931 10.02649931\n# 10.34249931 10.82724931 11.27824931 11.60924931 11.79474931]\n# [10.82819307 11.01369307 11.34469307 11.79569307 12.28044307\n# 12.59644307 13.08119307 13.53219307 13.86319307 14.04869307]\n# [12.38512037 12.57062037 12.90162037 13.35262037 13.83737037\n# 14.15337037 14.63812037 15.08912037 15.42012037 15.60562037]\n# [13.32442078 13.50992078 13.84092078 14.29192078 14.77667078\n# 15.09267078 15.57742078 16.02842078 16.35942078 16.54492078]]]]\noutput = interpolate_nd(\n data, cubic_coeffs, output_size=sizes).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_upsample_sizes_cubic')", + "summary": "resize_upsample_sizes_cubic" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 7, 8], dtype=np.int64)\n\n# [[[[1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [1. 1. 1. 1. 2. 2. 2. 2.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]\n# [3. 3. 3. 3. 4. 4. 4. 4.]]]]\noutput = interpolate_nd(\n data, nearest_coeffs, output_size=sizes).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_upsample_sizes_nearest')", + "summary": "resize_upsample_sizes_nearest" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n coordinate_transformation_mode='half_pixel',\n nearest_mode='ceil'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data, lambda x: nearest_coeffs(x, mode='ceil'), output_size=sizes).astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_upsample_sizes_nearest_ceil_half_pixel')", + "summary": "resize_upsample_sizes_nearest_ceil_half_pixel" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n coordinate_transformation_mode='align_corners',\n nearest_mode='floor'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 1. 1. 1. 2. 2. 3. 3. 4.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 5. 5. 5. 6. 6. 7. 7. 8.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [ 9. 9. 9. 10. 10. 11. 11. 12.]\n# [13. 13. 13. 14. 14. 15. 15. 16.]]]]\noutput = interpolate_nd(\n data, lambda x: nearest_coeffs(x, mode='floor'), output_size=sizes, coordinate_transformation_mode='align_corners').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_upsample_sizes_nearest_floor_align_corners')", + "summary": "resize_upsample_sizes_nearest_floor_align_corners" + }, + { + "code": "node = onnx.helper.make_node(\n 'Resize',\n inputs=['X', 'roi', 'scales', 'sizes'],\n outputs=['Y'],\n mode='nearest',\n coordinate_transformation_mode='asymmetric',\n nearest_mode='round_prefer_ceil'\n)\n\ndata = np.array([[[\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n]]], dtype=np.float32)\n\nroi = np.array([], dtype=np.float32)\nscales = np.array([], dtype=np.float32)\nsizes = np.array([1, 1, 8, 8], dtype=np.int64)\n\n# [[[[ 1. 2. 2. 3. 3. 4. 4. 4.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 5. 6. 6. 7. 7. 8. 8. 8.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [ 9. 10. 10. 11. 11. 12. 12. 12.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]\n# [13. 14. 14. 15. 15. 16. 16. 16.]]]]\noutput = interpolate_nd(\n data, lambda x: nearest_coeffs(x, mode='round_prefer_ceil'),\n output_size=sizes, coordinate_transformation_mode='asymmetric').astype(np.float32)\n\nexpect(node, inputs=[data, roi, scales, sizes], outputs=[output],\n name='test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric')", + "summary": "resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric" + } + ], + "inputs": [ + { + "description": "N-D tensor", + "name": "X", + "type": "T1" + }, + { + "description": "1-D tensor given as [start1, ..., startN, end1, ..., endN], where N is the rank of X. The RoIs' coordinates are normalized in the coordinate system of the input image. It only takes effect when coordinate_transformation_mode is \"tf_crop_and_resize\"", + "name": "roi", + "type": "T2" + }, + { + "description": "The scale array along each dimension. It takes value greater than 0. If it's less than 1, it's sampling down, otherwise, it's upsampling. The number of elements of 'scales' should be the same as the rank of input 'X'. Only one of 'scales' and 'sizes' can be specified. If 'size' is needed, the user can use an empty string as the name of 'scales' in this operator's input list.", + "name": "scales", + "type": "tensor(float)" + }, + { + "description": "The size of the output tensor. The number of elements of 'sizes' should be the same as the rank of input 'X'. Only one of 'scales' and 'sizes' can be specified.", + "name": "sizes", + "option": "optional", + "type": "tensor(int64)" + } + ], + "inputs_range": "3 - 4", + "max_input": 4, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "N-D tensor after resizing", + "name": "Y", + "type": "T1" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input 'X' and output 'Y' to all tensor types.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain roi type to float or double.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "ReverseSequence", + "schema": { + "attributes": [ + { + "default": 1, + "description": "(Optional) Specify which axis is batch axis. Must be one of 1 (default), or 0.", + "name": "batch_axis", + "required": false, + "type": "int64" + }, + { + "description": "(Optional) Specify which axis is time axis. Must be one of 0 (default), or 1.", + "name": "time_axis", + "required": false, + "type": "int64" + } + ], + "description": "Reverse batch of sequences having different lengths specified by `sequence_lens`.\n\nFor each slice i iterating on batch axis, the operator reverses the first sequence_lens[i] elements on time axis,\nand copies elements whose index's beyond sequence_lens[i] to the output. So the output slice i contains reversed\nsequences on the first sequence_lens[i] elements, then have original values copied for the other elements.\n\nExample 1:\n input = [[0.0, 4.0, 8.0, 12.0],\n [1.0, 5.0, 9.0, 13.0],\n [2.0, 6.0, 10.0, 14.0],\n [3.0, 7.0, 11.0, 15.0]]\n sequence_lens = [4, 3, 2, 1]\n time_axis = 0\n batch_axis = 1\n\n output = [[3.0, 6.0, 9.0, 12.0],\n [2.0, 5.0, 8.0, 13.0],\n [1.0, 4.0, 10.0, 14.0],\n [0.0, 7.0, 11.0, 15.0]]\n\nExample 2:\n input = [[0.0, 1.0, 2.0, 3.0 ],\n [4.0, 5.0, 6.0, 7.0 ],\n [8.0, 9.0, 10.0, 11.0],\n [12.0, 13.0, 14.0, 15.0]]\n sequence_lens = [1, 2, 3, 4]\n time_axis = 1\n batch_axis = 0\n\n output = [[0.0, 1.0, 2.0, 3.0 ],\n [5.0, 4.0, 6.0, 7.0 ],\n [10.0, 9.0, 8.0, 11.0],\n [15.0, 14.0, 13.0, 12.0]]\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'ReverseSequence',\n inputs=['x', 'sequence_lens'],\n outputs=['y'],\n time_axis=1,\n batch_axis=0,\n)\nx = np.array([[0.0, 1.0, 2.0, 3.0],\n [4.0, 5.0, 6.0, 7.0],\n [8.0, 9.0, 10.0, 11.0],\n [12.0, 13.0, 14.0, 15.0]], dtype=np.float32)\nsequence_lens = np.array([1, 2, 3, 4], dtype=np.int64)\n\ny = np.array([[0.0, 1.0, 2.0, 3.0],\n [5.0, 4.0, 6.0, 7.0],\n [10.0, 9.0, 8.0, 11.0],\n [15.0, 14.0, 13.0, 12.0]], dtype=np.float32)\n\nexpect(node, inputs=[x, sequence_lens], outputs=[y],\n name='test_reversesequence_batch')", + "summary": "reversesequence_batch" + }, + { + "code": "node = onnx.helper.make_node(\n 'ReverseSequence',\n inputs=['x', 'sequence_lens'],\n outputs=['y'],\n time_axis=0,\n batch_axis=1,\n)\nx = np.array([[0.0, 4.0, 8.0, 12.0],\n [1.0, 5.0, 9.0, 13.0],\n [2.0, 6.0, 10.0, 14.0],\n [3.0, 7.0, 11.0, 15.0]], dtype=np.float32)\nsequence_lens = np.array([4, 3, 2, 1], dtype=np.int64)\n\ny = np.array([[3.0, 6.0, 9.0, 12.0],\n [2.0, 5.0, 8.0, 13.0],\n [1.0, 4.0, 10.0, 14.0],\n [0.0, 7.0, 11.0, 15.0]], dtype=np.float32)\n\nexpect(node, inputs=[x, sequence_lens], outputs=[y],\n name='test_reversesequence_time')", + "summary": "reversesequence_time" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 2.", + "name": "input", + "type": "T" + }, + { + "description": "Tensor specifying lengths of the sequences in a batch. It has shape `[batch_size]`.", + "name": "sequence_lens", + "type": "tensor(int64)" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Tensor with same shape of input.", + "name": "Y", + "type": "T" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Input and output types can be of any tensor type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "RoiAlign", + "schema": { + "attributes": [ + { + "default": "avg", + "description": "The pooling method. Two modes are supported: 'avg' and 'max'. Default is 'avg'.", + "name": "mode", + "required": false, + "type": "string" + }, + { + "default": 1, + "description": "default 1; Pooled output Y's height.", + "name": "output_height", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "default 1; Pooled output Y's width.", + "name": "output_width", + "required": false, + "type": "int64" + }, + { + "description": "Number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If == 0, then an adaptive number of grid points are used (computed as ceil(roi_width / output_width), and likewise for height). Default is 0.", + "name": "sampling_ratio", + "required": false, + "type": "int64" + }, + { + "default": 1.0, + "description": "Multiplicative spatial scale factor to translate ROI coordinates from their input spatial scale to the scale used when pooling, i.e., spatial scale of the input feature map X relative to the input image. E.g.; default is 1.0f. ", + "name": "spatial_scale", + "required": false, + "type": "float32" + } + ], + "description": "Region of Interest (RoI) align operation described in the\n[Mask R-CNN paper](https://arxiv.org/abs/1703.06870).\nRoiAlign consumes an input tensor X and region of interests (rois)\nto apply pooling across each RoI; it produces a 4-D tensor of shape\n(num_rois, C, output_height, output_width).\n\nRoiAlign is proposed to avoid the misalignment by removing\nquantizations while converting from original image into feature\nmap and from feature map into RoI feature; in each ROI bin,\nthe value of the sampled locations are computed directly\nthrough bilinear interpolation.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n \"RoiAlign\",\n inputs=[\"X\", \"rois\", \"batch_indices\"],\n outputs=[\"Y\"],\n spatial_scale=1.0,\n output_height=5,\n output_width=5,\n sampling_ratio=2,\n)\n\nX = np.array(\n [\n [\n [\n [\n 0.2764,\n 0.7150,\n 0.1958,\n 0.3416,\n 0.4638,\n 0.0259,\n 0.2963,\n 0.6518,\n 0.4856,\n 0.7250,\n ],\n [\n 0.9637,\n 0.0895,\n 0.2919,\n 0.6753,\n 0.0234,\n 0.6132,\n 0.8085,\n 0.5324,\n 0.8992,\n 0.4467,\n ],\n [\n 0.3265,\n 0.8479,\n 0.9698,\n 0.2471,\n 0.9336,\n 0.1878,\n 0.4766,\n 0.4308,\n 0.3400,\n 0.2162,\n ],\n [\n 0.0206,\n 0.1720,\n 0.2155,\n 0.4394,\n 0.0653,\n 0.3406,\n 0.7724,\n 0.3921,\n 0.2541,\n 0.5799,\n ],\n [\n 0.4062,\n 0.2194,\n 0.4473,\n 0.4687,\n 0.7109,\n 0.9327,\n 0.9815,\n 0.6320,\n 0.1728,\n 0.6119,\n ],\n [\n 0.3097,\n 0.1283,\n 0.4984,\n 0.5068,\n 0.4279,\n 0.0173,\n 0.4388,\n 0.0430,\n 0.4671,\n 0.7119,\n ],\n [\n 0.1011,\n 0.8477,\n 0.4726,\n 0.1777,\n 0.9923,\n 0.4042,\n 0.1869,\n 0.7795,\n 0.9946,\n 0.9689,\n ],\n [\n 0.1366,\n 0.3671,\n 0.7011,\n 0.6234,\n 0.9867,\n 0.5585,\n 0.6985,\n 0.5609,\n 0.8788,\n 0.9928,\n ],\n [\n 0.5697,\n 0.8511,\n 0.6711,\n 0.9406,\n 0.8751,\n 0.7496,\n 0.1650,\n 0.1049,\n 0.1559,\n 0.2514,\n ],\n [\n 0.7012,\n 0.4056,\n 0.7879,\n 0.3461,\n 0.0415,\n 0.2998,\n 0.5094,\n 0.3727,\n 0.5482,\n 0.0502,\n ],\n ]\n ]\n ],\n dtype=np.float32,\n)\nbatch_indices = np.array([0, 0, 0], dtype=np.int64)\nrois = np.array([[0, 0, 9, 9], [0, 5, 4, 9], [5, 5, 9, 9]], dtype=np.float32)\n# (num_rois, C, output_height, output_width)\nY = np.array(\n [\n [\n [\n [0.4664, 0.4466, 0.3405, 0.5688, 0.6068],\n [0.3714, 0.4296, 0.3835, 0.5562, 0.3510],\n [0.2768, 0.4883, 0.5222, 0.5528, 0.4171],\n [0.4713, 0.4844, 0.6904, 0.4920, 0.8774],\n [0.6239, 0.7125, 0.6289, 0.3355, 0.3495],\n ]\n ],\n [\n [\n [0.3022, 0.4305, 0.4696, 0.3978, 0.5423],\n [0.3656, 0.7050, 0.5165, 0.3172, 0.7015],\n [0.2912, 0.5059, 0.6476, 0.6235, 0.8299],\n [0.5916, 0.7389, 0.7048, 0.8372, 0.8893],\n [0.6227, 0.6153, 0.7097, 0.6154, 0.4585],\n ]\n ],\n [\n [\n [0.2384, 0.3379, 0.3717, 0.6100, 0.7601],\n [0.3767, 0.3785, 0.7147, 0.9243, 0.9727],\n [0.5749, 0.5826, 0.5709, 0.7619, 0.8770],\n [0.5355, 0.2566, 0.2141, 0.2796, 0.3600],\n [0.4365, 0.3504, 0.2887, 0.3661, 0.2349],\n ]\n ],\n ],\n dtype=np.float32,\n)\n\nexpect(node, inputs=[X, rois, batch_indices], outputs=[Y], name=\"test_roialign\")", + "summary": "roialign" + } + ], + "inputs": [ + { + "description": "Input data tensor from the previous operator; 4-D feature map of shape (N, C, H, W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.", + "name": "X", + "type": "T1" + }, + { + "description": "RoIs (Regions of Interest) to pool over; rois is 2-D input of shape (num_rois, 4) given as [[x1, y1, x2, y2], ...]. The RoIs' coordinates are in the coordinate system of the input image. Each coordinate set has a 1:1 correspondence with the 'batch_indices' input.", + "name": "rois", + "type": "T1" + }, + { + "description": "1-D tensor of shape (num_rois,) with each element denoting the index of the corresponding image in the batch.", + "name": "batch_indices", + "type": "T2" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "RoI pooled output, 4-D tensor of shape (num_rois, C, output_height, output_width). The r-th batch element Y[r-1] is a pooled feature map corresponding to the r-th RoI X[r-1].", + "name": "Y", + "type": "T1" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain types to float tensors.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain types to int tensors.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "Round", + "schema": { + "description": "Round takes one input Tensor and rounds the values, element-wise, meaning\nit finds the nearest integer for each value.\nIn case of halfs, the rule is to round them to the nearest even integer.\nThe output tensor has the same shape and type as the input.\n\nExamples:\n```\nround([0.9]) = [1.0]\nround([2.5]) = [2.0]\nround([2.3]) = [2.0]\nround([1.5]) = [2.0]\nround([-4.5]) = [-4.0]\n```\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Round',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([0.1, 0.5, 0.9, 1.2, 1.5,\n 1.8, 2.3, 2.5, 2.7, -1.1,\n -1.5, -1.9, -2.2, -2.5, -2.8]).astype(np.float32)\ny = np.array([0., 0., 1., 1., 2.,\n 2., 2., 2., 3., -1.,\n -2., -2., -2., -2., -3.]).astype(np.float32) # expected output\nexpect(node, inputs=[x], outputs=[y],\n name='test_round')", + "summary": "round" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "SVMClassifier", + "schema": { + "attributes": [ + { + "description": "Class labels if using integer labels.
    One and only one of the 'classlabels_*' attributes must be defined.", + "name": "classlabels_ints", + "required": false, + "type": "int64[]" + }, + { + "description": "Class labels if using string labels.
    One and only one of the 'classlabels_*' attributes must be defined.", + "name": "classlabels_strings", + "required": false, + "type": "string[]" + }, + { + "description": "", + "name": "coefficients", + "required": false, + "type": "float32[]" + }, + { + "description": "List of 3 elements containing gamma, coef0, and degree, in that order. Zero if unused for the kernel.", + "name": "kernel_params", + "required": false, + "type": "float32[]" + }, + { + "default": "LINEAR", + "description": "The kernel type, one of 'LINEAR,' 'POLY,' 'RBF,' 'SIGMOID'.", + "name": "kernel_type", + "required": false, + "type": "string" + }, + { + "default": "NONE", + "description": "Indicates the transform to apply to the score.
    One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT'", + "name": "post_transform", + "required": false, + "type": "string" + }, + { + "description": "First set of probability coefficients.", + "name": "prob_a", + "required": false, + "type": "float32[]" + }, + { + "description": "Second set of probability coefficients. This array must be same size as prob_a.
    If these are provided then output Z are probability estimates, otherwise they are raw scores.", + "name": "prob_b", + "required": false, + "type": "float32[]" + }, + { + "description": "", + "name": "rho", + "required": false, + "type": "float32[]" + }, + { + "description": "", + "name": "support_vectors", + "required": false, + "type": "float32[]" + }, + { + "description": "", + "name": "vectors_per_class", + "required": false, + "type": "int64[]" + } + ], + "description": "Support Vector Machine classifier\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Data to be classified.", + "name": "X", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 2, + "outputs": [ + { + "description": "Classification outputs (one class per example).", + "name": "Y", + "type": "T2" + }, + { + "description": "Class scores (one per class per example), if prob_a and prob_b are provided they are probabilities for each class, otherwise they are raw scores.", + "name": "Z", + "type": "tensor(float)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ], + "description": "The input must be a tensor of a numeric type, either [C] or [N,C].", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ], + "description": "The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used. Its size will match the bactch size of the input.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "SVMRegressor", + "schema": { + "attributes": [ + { + "description": "Support vector coefficients.", + "name": "coefficients", + "required": false, + "type": "float32[]" + }, + { + "description": "List of 3 elements containing gamma, coef0, and degree, in that order. Zero if unused for the kernel.", + "name": "kernel_params", + "required": false, + "type": "float32[]" + }, + { + "default": "LINEAR", + "description": "The kernel type, one of 'LINEAR,' 'POLY,' 'RBF,' 'SIGMOID'.", + "name": "kernel_type", + "required": false, + "type": "string" + }, + { + "description": "The number of support vectors.", + "name": "n_supports", + "required": false, + "type": "int64" + }, + { + "description": "Flag indicating whether the regression is a one-class SVM or not.", + "name": "one_class", + "required": false, + "type": "int64" + }, + { + "default": "NONE", + "description": "Indicates the transform to apply to the score.
    One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.'", + "name": "post_transform", + "required": false, + "type": "string" + }, + { + "description": "", + "name": "rho", + "required": false, + "type": "float32[]" + }, + { + "description": "Chosen support vectors", + "name": "support_vectors", + "required": false, + "type": "float32[]" + } + ], + "description": "Support Vector Machine regression prediction and one-class SVM anomaly detection.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Data to be regressed.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Regression outputs (one score per target per example).", + "name": "Y", + "type": "tensor(float)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ], + "description": "The input type must be a tensor of a numeric type, either [C] or [N,C].", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Scaler", + "schema": { + "attributes": [ + { + "description": "First, offset by this.
    Can be length of features in an [N,F] tensor or length 1, in which case it applies to all features, regardless of dimension count.", + "name": "offset", + "required": false, + "type": "float32[]" + }, + { + "description": "Second, multiply by this.
    Can be length of features in an [N,F] tensor or length 1, in which case it applies to all features, regardless of dimension count.
    Must be same length as 'offset'", + "name": "scale", + "required": false, + "type": "float32[]" + } + ], + "description": "Rescale input data, for example to standardize features by removing the mean and scaling to unit variance.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Data to be scaled.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Scaled output data.", + "name": "Y", + "type": "tensor(float)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ], + "description": "The input must be a tensor of a numeric type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Scan", + "schema": { + "attributes": [ + { + "description": "The graph run each iteration. It has N+M inputs: (loop state variables..., scan_input_elts...). It has N+K outputs: (loop state variables..., scan_output_elts...). Each scan_output is created by concatenating the value of the specified scan_output_elt value at the end of each iteration of the loop. It is an error if the dimensions of these values change across loop iterations.", + "name": "body", + "required": true, + "type": "graph" + }, + { + "description": "An optional list of M flags. The i-th element of the list specifies the direction to be scanned for the i-th scan_input tensor: 0 indicates forward direction and 1 indicates reverse direction. If omitted, all scan_input tensors will be scanned in the forward direction.", + "name": "directions", + "required": false, + "type": "int64[]" + }, + { + "description": "An attribute specifying the number of scan_inputs M. ", + "name": "num_scan_inputs", + "required": true, + "type": "int64" + } + ], + "description": "Scan can be used to iterate over one or more scan_input tensors,\nconstructing zero or more scan_output tensors. It combines ideas from general recurrences,\nfunctional programming constructs such as scan, fold, map, and zip and is intended to enable\ngeneralizations of RNN-like constructs for sequence-to-sequence processing.\nOther tensors (referred to as state_variables here) can be used to carry a state\nwhen iterating from one element to another (similar to hidden-state in RNNs, also referred\nto as loop-carried dependences in the context of loops). All these tensors are required to\nhave the same shape in each iteration of the loop (a restriction imposed to enable efficient\nmemory allocation). Many common usages involve a single scan_input tensor (where functionality\nsimilar to scan, fold and map can be obtained). When more than one scan_input is used,\na behavior similar to zip is obtained.\n\nThe attribute body must be a graph, specifying the computation to be performed in\nevery iteration. It takes as input the current values of the state_variables and\nthe current iterated element of the scan_inputs. It must return the (updated) values\nof the state_variables and zero or more scan_output_element tensors. The values of the\nscan_output_element tensors are concatenated over all the iterations to produce the\nscan_output values of the scan construct (similar to the concatenated intermediate\nhidden-state values of RNN-like constructs).\n\nThe scan operation returns the final values of the state_variables as well as the\nscan_outputs.\n\nThe operation supports batching, and the batch-axis is required to be 0.\nWhen multiple scan_input tensors are used, they must all have the same batch-size,\nand they must all have the same maximum-sequence-length (the dimensionality of the\nsequence axis or scan axis). The sequence axis or scan axis is required to be 1.\n\nThe operation has an optional sequence_lens input (of shape [BATCH_SIZE]) to\nallow variable length sequences of length <= the maximum-sequence-length. If this\ninput is not specified, all sequences are assumed to be of length equal to\nmaximum-sequence-length. For variable length input sequences, the scan_outputs\nwill consist of a sequence of same length as the input, padded to the\nmaximum-sequence-length.\n\nThe optional attribute directions can be used to scan a sequence in the reverse direction.\nIf this attribute is omitted, all sequences are scanned in the forward direction.\nA bidirectional scan be performed by specifying the same tensor input twice in the\nscan_inputs, once with a forward direction, and once with a backward direction.\n\nNote that because of the ONNX restriction that only the last parameter of an operator can\nbe variadic, the initial-states and scan-inputs are listed together as one input parameter.\nSimilarly, the final-states and scan-outputs are listed together as one output parameter.\nThe attribute num_scan_inputs indicates the number M of scan-inputs.\n\nThe behavior of\n\n Scan <\n num_scan_inputs = m,\n body = loop-body\n > (sequence_lengths, init_1, ..., init_n, scan_1, ..., scan_m)\n\nis equivalent to the following pseudo-code:\n\n // T.shape[0] denotes the batch-size of T\n // The batch-size of scan_1, ..., scan_m are all required to be equal\n batch_size = scan_1.shape[0];\n\n // scan_i.shape[1] denotes the (max) sequence-length of scan_i\n // scan_i.shape[1] is required to be equal to scan_j.shape[1] for all i,j.\n max_sequence_length = scan_1.shape[1];\n\n for (int batch = 0; batch < batch_size; ++batch) {\n // initialize state-variables\n st_1 = init_1; ... st_n = init_n;\n // initialize scan-output variables: [] denotes an empty tensor\n scan_out_1 = []; ...; scan_out_k = [];\n // identify number of iterations:\n N = (sequence_lengths specified) ? sequence_lengths[batch] : max_sequence_length;\n\n // execute loop\n for (int t = 0; t < N; ++t) {\n // generate the scan-input elements: the notation T[t] indicates the sub-tensor\n // of rank one less than T obtained by indexing T at position t along axis k.\n si_1 = (scan_1[batch])[t];\n ... ;\n si_m = (scan_m[batch])[t];\n // execute loop-body\n st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m)\n // accumulate the scan-output elements\n scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k);\n }\n // accumulate the outputs for this batch:\n bst_1[batch] = st_1; ..., bst_n[batch] = st_n;\n // Note scan-outputs will have size max_sequence_length, but only first N values will be meaningful.\n // The remaining values have an undefined value.\n b_scan_out_1[batch] = scan_out_1; ...; b_scan_out_k[batch] = scan_out_k;\n }\n return bst_1, ..., bst_n, b_scan_out_1, ..., b_scan_out_k;\n\n\n\n*Sample usage: Encoding RNN using a Scan*\n\nThe following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi,\nrecurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can\nbe encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes\n%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these\nvalues are computed in the outer graph, they need to be passed in as extra state_variables.\n\n graph rnn-encoding {\n %H_0 = ... \n %X = ...\n %Y_h, %Y = Scan[body = , num_scan_inputs=1](\"\", %H_0, %X)\n return %Y, %Y_h\n }\n\n graph rnn-cell-1 (\n %H_tminus1[FLOAT, tensor]\n %X_t[FLOAT, tensor]\n ) {\n %Wi = ...\n %Ri = ...\n %Wbi = ...\n %Rbi = ...\n %t1 = X_t * (Wi^T)\n %t2 = H_tminus1*(Ri^T)\n %t3 = Add(%t1, %t2)\n %t4 = Add(%t3, %Wbi)\n %t5 = Add(%t4, %Rbi)\n %Ht = Tanh(%t5)\n %Accumulate = Identity(%Ht)\n return %Ht, %Accumulate\n }\n \n", + "domain": "ai.onnx", + "examples": [ + { + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info('sum_in', onnx.TensorProto.FLOAT, [2])\nnext = onnx.helper.make_tensor_value_info('next', onnx.TensorProto.FLOAT, [2])\nsum_out = onnx.helper.make_tensor_value_info('sum_out', onnx.TensorProto.FLOAT, [2])\nscan_out = onnx.helper.make_tensor_value_info('scan_out', onnx.TensorProto.FLOAT, [2])\nadd_node = onnx.helper.make_node(\n 'Add',\n inputs=['sum_in', 'next'],\n outputs=['sum_out']\n)\nid_node = onnx.helper.make_node(\n 'Identity',\n inputs=['sum_out'],\n outputs=['scan_out']\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node],\n 'scan_body',\n [sum_in, next],\n [sum_out, scan_out]\n)\n# create scan op node\nno_sequence_lens = '' # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Scan',\n inputs=[no_sequence_lens, 'initial', 'x'],\n outputs=['y', 'z'],\n num_scan_inputs=1,\n body=scan_body\n)\n# create inputs for batch-size 1, sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((1, 2))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((1, 3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((1, 2))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((1, 3, 2))\n\nexpect(node, inputs=[initial, x], outputs=[y, z],\n name='test_scan_sum', opset_imports=[onnx.helper.make_opsetid(\"\", 8)])", + "summary": "scan_8" + }, + { + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info('sum_in', onnx.TensorProto.FLOAT, [2])\nnext = onnx.helper.make_tensor_value_info('next', onnx.TensorProto.FLOAT, [2])\nsum_out = onnx.helper.make_tensor_value_info('sum_out', onnx.TensorProto.FLOAT, [2])\nscan_out = onnx.helper.make_tensor_value_info('scan_out', onnx.TensorProto.FLOAT, [2])\nadd_node = onnx.helper.make_node(\n 'Add',\n inputs=['sum_in', 'next'],\n outputs=['sum_out']\n)\nid_node = onnx.helper.make_node(\n 'Identity',\n inputs=['sum_out'],\n outputs=['scan_out']\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node],\n 'scan_body',\n [sum_in, next],\n [sum_out, scan_out]\n)\n# create scan op node\nnode = onnx.helper.make_node(\n 'Scan',\n inputs=['initial', 'x'],\n outputs=['y', 'z'],\n num_scan_inputs=1,\n body=scan_body\n)\n# create inputs for sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((2,))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((2,))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2))\n\nexpect(node, inputs=[initial, x], outputs=[y, z],\n name='test_scan9_sum', opset_imports=[onnx.helper.make_opsetid(\"\", 9)])", + "summary": "scan_9" + } + ], + "inputs": [ + { + "description": "Optional tensor specifying lengths of the sequences in a batch. If this input is not specified, all sequences are assumed to be of the maximum sequence length (the dimension of the sequence axis of the scan_input tensors).", + "name": "sequence_lens", + "option": "optional", + "type": "I" + }, + { + "description": "Initial values of the loop's N state variables followed by M scan_inputs", + "name": "initial_state_and_scan_inputs", + "option": "variadic", + "type": "V" + } + ], + "inputs_range": "2 - ∞", + "max_input": 2147483647, + "max_output": 2147483647, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Final values of the loop's N state variables followed by K scan_outputs", + "name": "final_state_and_scan_outputs", + "option": "variadic", + "type": "V" + } + ], + "outputs_range": "1 - ∞", + "since_version": 8, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Int64 tensor", + "type_param_str": "I" + }, + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "All Tensor types", + "type_param_str": "V" + } + ] + } + }, + { + "name": "Scan", + "schema": { + "attributes": [ + { + "description": "The graph run each iteration. It has N+M inputs: (loop state variables..., scan_input_elts...). It has N+K outputs: (loop state variables..., scan_output_elts...). Each scan_output is created by concatenating the value of the specified scan_output_elt value at the end of each iteration of the loop. It is an error if the dimensions of these values change across loop iterations.", + "name": "body", + "required": true, + "type": "graph" + }, + { + "description": "An attribute specifying the number of scan_inputs M. ", + "name": "num_scan_inputs", + "required": true, + "type": "int64" + }, + { + "description": "An optional list of M flags. The i-th element of the list specifies the axis to be scanned (the sequence axis) for the i-th scan_input. If omitted, 0 will be used as the scan axis for every scan_input.", + "name": "scan_input_axes", + "required": false, + "type": "int64[]" + }, + { + "description": "An optional list of M flags. The i-th element of the list specifies the direction to be scanned for the i-th scan_input tensor: 0 indicates forward direction and 1 indicates reverse direction. If omitted, all scan_input tensors will be scanned in the forward direction.", + "name": "scan_input_directions", + "required": false, + "type": "int64[]" + }, + { + "description": "An optional list of K flags. The i-th element of the list specifies the axis for the i-th scan_output. The scan outputs are accumulated along the specified axis. If omitted, 0 will be used as the scan axis for every scan_output.", + "name": "scan_output_axes", + "required": false, + "type": "int64[]" + }, + { + "description": "An optional list of K flags, one for each scan_output. The i-th element of the list specifies whether the i-th scan_output should be constructed by appending or prepending a new value in each iteration: 0 indicates appending and 1 indicates prepending. If omitted, all scan_output tensors will be produced by appending a value in each iteration.", + "name": "scan_output_directions", + "required": false, + "type": "int64[]" + } + ], + "description": "Scan can be used to iterate over one or more scan_input tensors,\nconstructing zero or more scan_output tensors. It combines ideas from general recurrences,\nfunctional programming constructs such as scan, fold, map, and zip and is intended to enable\ngeneralizations of RNN-like constructs for sequence-to-sequence processing.\nOther tensors (referred to as state_variables here) can be used to carry a state\nwhen iterating from one element to another (similar to hidden-state in RNNs, also referred\nto as loop-carried dependences in the context of loops).\nMany common usages involve a single scan_input tensor (where functionality\nsimilar to scan, fold and map can be obtained). When more than one scan_input is used,\na behavior similar to zip is obtained.\n\nThe attribute body must be a graph, specifying the computation to be performed in\nevery iteration. It takes as input the current values of the state_variables and\nthe current iterated element of the scan_inputs. It must return the (updated) values\nof the state_variables and zero or more scan_output_element tensors. The values of the\nscan_output_element tensors are concatenated over all the iterations to produce the\nscan_output values of the scan construct (similar to the concatenated intermediate\nhidden-state values of RNN-like constructs). All the output tensors (state_variables as\nwell as scan_output_element tensors) are required to have the same shape in each iteration\nof the loop (a restriction imposed to enable efficient memory allocation).\n\nNote that the iterated element passed to the body subgraph does not have a sequence\naxis. It will have a rank one less than the rank of the corresponding scan_input.\n\nThe scan operation returns the final values of the state_variables as well as the\nscan_outputs.\n\nThe optional attribute scan_input_directions specifies the direction (forward or backward)\nfor each scan input. If this attribute is omitted, all sequences are scanned in the forward\ndirection. A bidirectional scan may be performed by specifying the same tensor input twice\nin the scan_inputs, once with a forward direction, and once with a backward direction.\n\nThe scan_output of the operation is produced by concatenating the scan_output_element\nvalues produced by the body in each iteration. The optional attribute scan_output_directions\nspecifies the direction in which scan_output is constructed (by appending or prepending the\nscan_output_element to scan_output in each iteration) for each scan_output. If this attribute\nis omitted, the scan_output_element is appended to the scan_output in each iteration.\n\nThe optional attribute scan_input_axes specifies the axis to be scanned for each scan_input.\nIf omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the\nbatch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1.\nNote that scanning a non-zero axis may be less efficient than scanning axis zero.\n\nThe optional attribute scan_output_axes specifies the axis along which the scan_outputs\nare accumulated for each scan_output. For example, if axis 1 is the time axis (to be\nscanned) for both inputs and outputs, specify a scan_input axis and scan_output axis\nvalue of 1.\n\nNote that because of the ONNX restriction that only the last parameter of an operator can\nbe variadic, the initial-states and scan-inputs are listed together as one input parameter.\nSimilarly, the final-states and scan-outputs are listed together as one output parameter.\nThe attribute num_scan_inputs indicates the number M of scan-inputs.\n\nThe behavior of\n\n Scan <\n num_scan_inputs = m,\n body = loop-body,\n scan_input_axes = [axis_1, ..., axis_m]\n > (init_1, ..., init_n, scan_1, ..., scan_m)\n\nis equivalent to the following pseudo-code:\n\n // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i\n // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j.\n sequence_length = scan_1.shape[axis_1];\n\n // initialize state-variables\n st_1 = init_1; ... st_n = init_n;\n // initialize scan-output variables: [] denotes an empty tensor\n scan_out_1 = []; ...; scan_out_k = [];\n // identify number of iterations:\n\n // execute loop\n for (int t = 0; t < sequence_length; ++t) {\n // generate the scan-input elements: the notation T[t] indicates the sub-tensor\n // of rank one less than T obtained by indexing T at position t along axis k.\n si_1 = scan_1[t];\n ... ;\n si_m = scan_m[t];\n // execute loop-body\n st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m)\n // accumulate the scan-output elements\n scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k);\n }\n\n return st_1, ..., st_n, scan_out_1, ..., scan_out_k;\n\n*Sample usage: Encoding RNN using a Scan*\n\nThe following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi,\nrecurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can\nbe encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes\n%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these\nvalues are computed in the outer graph, they need to be passed in as extra state_variables.\n\n graph rnn-encoding {\n %H_0 = ...\n %X = ...\n %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X)\n return %Y, %Y_h\n }\n\n graph rnn-cell-1 (\n %H_tminus1[FLOAT, tensor]\n %X_t[FLOAT, tensor]\n ) {\n %Wi = ...\n %Ri = ...\n %Wbi = ...\n %Rbi = ...\n %t1 = X_t * (Wi^T)\n %t2 = H_tminus1*(Ri^T)\n %t3 = Add(%t1, %t2)\n %t4 = Add(%t3, %Wbi)\n %t5 = Add(%t4, %Rbi)\n %Ht = Tanh(%t5)\n %Accumulate = Identity(%Ht)\n return %Ht, %Accumulate\n }\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info('sum_in', onnx.TensorProto.FLOAT, [2])\nnext = onnx.helper.make_tensor_value_info('next', onnx.TensorProto.FLOAT, [2])\nsum_out = onnx.helper.make_tensor_value_info('sum_out', onnx.TensorProto.FLOAT, [2])\nscan_out = onnx.helper.make_tensor_value_info('scan_out', onnx.TensorProto.FLOAT, [2])\nadd_node = onnx.helper.make_node(\n 'Add',\n inputs=['sum_in', 'next'],\n outputs=['sum_out']\n)\nid_node = onnx.helper.make_node(\n 'Identity',\n inputs=['sum_out'],\n outputs=['scan_out']\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node],\n 'scan_body',\n [sum_in, next],\n [sum_out, scan_out]\n)\n# create scan op node\nno_sequence_lens = '' # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Scan',\n inputs=[no_sequence_lens, 'initial', 'x'],\n outputs=['y', 'z'],\n num_scan_inputs=1,\n body=scan_body\n)\n# create inputs for batch-size 1, sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((1, 2))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((1, 3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((1, 2))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((1, 3, 2))\n\nexpect(node, inputs=[initial, x], outputs=[y, z],\n name='test_scan_sum', opset_imports=[onnx.helper.make_opsetid(\"\", 8)])", + "summary": "scan_8" + }, + { + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info('sum_in', onnx.TensorProto.FLOAT, [2])\nnext = onnx.helper.make_tensor_value_info('next', onnx.TensorProto.FLOAT, [2])\nsum_out = onnx.helper.make_tensor_value_info('sum_out', onnx.TensorProto.FLOAT, [2])\nscan_out = onnx.helper.make_tensor_value_info('scan_out', onnx.TensorProto.FLOAT, [2])\nadd_node = onnx.helper.make_node(\n 'Add',\n inputs=['sum_in', 'next'],\n outputs=['sum_out']\n)\nid_node = onnx.helper.make_node(\n 'Identity',\n inputs=['sum_out'],\n outputs=['scan_out']\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node],\n 'scan_body',\n [sum_in, next],\n [sum_out, scan_out]\n)\n# create scan op node\nnode = onnx.helper.make_node(\n 'Scan',\n inputs=['initial', 'x'],\n outputs=['y', 'z'],\n num_scan_inputs=1,\n body=scan_body\n)\n# create inputs for sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((2,))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((2,))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2))\n\nexpect(node, inputs=[initial, x], outputs=[y, z],\n name='test_scan9_sum', opset_imports=[onnx.helper.make_opsetid(\"\", 9)])", + "summary": "scan_9" + } + ], + "inputs": [ + { + "description": "Initial values of the loop's N state variables followed by M scan_inputs", + "name": "initial_state_and_scan_inputs", + "option": "variadic", + "type": "V" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 2147483647, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Final values of the loop's N state variables followed by K scan_outputs", + "name": "final_state_and_scan_outputs", + "option": "variadic", + "type": "V" + } + ], + "outputs_range": "1 - ∞", + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Int64 tensor", + "type_param_str": "I" + }, + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "All Tensor types", + "type_param_str": "V" + } + ] + } + }, + { + "name": "Scan", + "schema": { + "attributes": [ + { + "description": "The graph run each iteration. It has N+M inputs: (loop state variables..., scan_input_elts...). It has N+K outputs: (loop state variables..., scan_output_elts...). Each scan_output is created by concatenating the value of the specified scan_output_elt value at the end of each iteration of the loop. It is an error if the dimensions of these values change across loop iterations.", + "name": "body", + "required": true, + "type": "graph" + }, + { + "description": "An attribute specifying the number of scan_inputs M. ", + "name": "num_scan_inputs", + "required": true, + "type": "int64" + }, + { + "description": "An optional list of M flags. The i-th element of the list specifies the axis to be scanned (the sequence axis) for the i-th scan_input. If omitted, 0 will be used as the scan axis for every scan_input. Negative value for an axis means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).", + "name": "scan_input_axes", + "required": false, + "type": "int64[]" + }, + { + "description": "An optional list of M flags. The i-th element of the list specifies the direction to be scanned for the i-th scan_input tensor: 0 indicates forward direction and 1 indicates reverse direction. If omitted, all scan_input tensors will be scanned in the forward direction.", + "name": "scan_input_directions", + "required": false, + "type": "int64[]" + }, + { + "description": "An optional list of K flags. The i-th element of the list specifies the axis for the i-th scan_output. The scan outputs are accumulated along the specified axis. If omitted, 0 will be used as the scan axis for every scan_output. Negative value for an axis means counting dimensions from the back. Accepted range is [-r, r-1].", + "name": "scan_output_axes", + "required": false, + "type": "int64[]" + }, + { + "description": "An optional list of K flags, one for each scan_output. The i-th element of the list specifies whether the i-th scan_output should be constructed by appending or prepending a new value in each iteration: 0 indicates appending and 1 indicates prepending. If omitted, all scan_output tensors will be produced by appending a value in each iteration.", + "name": "scan_output_directions", + "required": false, + "type": "int64[]" + } + ], + "description": "Scan can be used to iterate over one or more scan_input tensors,\nconstructing zero or more scan_output tensors. It combines ideas from general recurrences,\nfunctional programming constructs such as scan, fold, map, and zip and is intended to enable\ngeneralizations of RNN-like constructs for sequence-to-sequence processing.\nOther tensors (referred to as state_variables here) can be used to carry a state\nwhen iterating from one element to another (similar to hidden-state in RNNs, also referred\nto as loop-carried dependences in the context of loops).\nMany common usages involve a single scan_input tensor (where functionality\nsimilar to scan, fold and map can be obtained). When more than one scan_input is used,\na behavior similar to zip is obtained.\n\nThe attribute body must be a graph, specifying the computation to be performed in\nevery iteration. It takes as input the current values of the state_variables and\nthe current iterated element of the scan_inputs. It must return the (updated) values\nof the state_variables and zero or more scan_output_element tensors. The values of the\nscan_output_element tensors are concatenated over all the iterations to produce the\nscan_output values of the scan construct (similar to the concatenated intermediate\nhidden-state values of RNN-like constructs). All the output tensors (state_variables as\nwell as scan_output_element tensors) are required to have the same shape in each iteration\nof the loop (a restriction imposed to enable efficient memory allocation).\n\nNote that the iterated element passed to the body subgraph does not have a sequence\naxis. It will have a rank one less than the rank of the corresponding scan_input.\n\nThe scan operation returns the final values of the state_variables as well as the\nscan_outputs.\n\nThe optional attribute scan_input_directions specifies the direction (forward or backward)\nfor each scan input. If this attribute is omitted, all sequences are scanned in the forward\ndirection. A bidirectional scan may be performed by specifying the same tensor input twice\nin the scan_inputs, once with a forward direction, and once with a backward direction.\n\nThe scan_output of the operation is produced by concatenating the scan_output_element\nvalues produced by the body in each iteration. The optional attribute scan_output_directions\nspecifies the direction in which scan_output is constructed (by appending or prepending the\nscan_output_element to scan_output in each iteration) for each scan_output. If this attribute\nis omitted, the scan_output_element is appended to the scan_output in each iteration.\n\nThe optional attribute scan_input_axes specifies the axis to be scanned for each scan_input.\nIf omitted, every scan_input will be scanned in axis 0. For example, if axis 0 is the\nbatch axis and axis 1 is the time axis (to be scanned), specify an axis value of 1.\nNote that scanning a non-zero axis may be less efficient than scanning axis zero.\n\nThe optional attribute scan_output_axes specifies the axis along which the scan_outputs\nare accumulated for each scan_output. For example, if axis 1 is the time axis (to be\nscanned) for both inputs and outputs, specify a scan_input axis and scan_output axis\nvalue of 1.\n\nNote that because of the ONNX restriction that only the last parameter of an operator can\nbe variadic, the initial-states and scan-inputs are listed together as one input parameter.\nSimilarly, the final-states and scan-outputs are listed together as one output parameter.\nThe attribute num_scan_inputs indicates the number M of scan-inputs.\n\nThe behavior of\n\n Scan <\n num_scan_inputs = m,\n body = loop-body,\n scan_input_axes = [axis_1, ..., axis_m]\n > (init_1, ..., init_n, scan_1, ..., scan_m)\n\nis equivalent to the following pseudo-code:\n\n // scan_i.shape[axis_i] denotes the (max) sequence-length of scan_i\n // scan_i.shape[axis_i] is required to be equal to scan_j.shape[axis_j] for all i,j.\n sequence_length = scan_1.shape[axis_1];\n\n // initialize state-variables\n st_1 = init_1; ... st_n = init_n;\n // initialize scan-output variables: [] denotes an empty tensor\n scan_out_1 = []; ...; scan_out_k = [];\n // identify number of iterations:\n\n // execute loop\n for (int t = 0; t < sequence_length; ++t) {\n // generate the scan-input elements: the notation T[t] indicates the sub-tensor\n // of rank one less than T obtained by indexing T at position t along axis k.\n si_1 = scan_1[t];\n ... ;\n si_m = scan_m[t];\n // execute loop-body\n st_1, ..., st_n, so_1, ..., so_k = loop-body(st_1, ..., st_n, si_1, ..., si_m)\n // accumulate the scan-output elements\n scan_out_1 = Concat(scan_out_1, so_1); ... ; scan_out_k = Concat(scan_out_k, so_k);\n }\n\n return st_1, ..., st_n, scan_out_1, ..., scan_out_k;\n\n*Sample usage: Encoding RNN using a Scan*\n\nThe following example shows how a simple RNN over an input tensor %X, with weight tensor %Wi,\nrecurrence weight tensor %Ri, bias tensors %Wbi and %Rbi, and initial hidden-state %H_0 can\nbe encoded as a ScanLoop. Note that the loop-body is a nested graph, and it directly computes\n%Wi, %Ri, %Wbi, and %Rbi (typically constants or initializers in the body graph). If these\nvalues are computed in the outer graph, they need to be passed in as extra state_variables.\n\n graph rnn-encoding {\n %H_0 = ... \n %X = ...\n %Y_h, %Y = Scan[body = , num_scan_inputs=1](%H_0, %X)\n return %Y, %Y_h\n }\n\n graph rnn-cell-1 (\n %H_tminus1[FLOAT, tensor]\n %X_t[FLOAT, tensor]\n ) {\n %Wi = ...\n %Ri = ...\n %Wbi = ...\n %Rbi = ...\n %t1 = X_t * (Wi^T)\n %t2 = H_tminus1*(Ri^T)\n %t3 = Add(%t1, %t2)\n %t4 = Add(%t3, %Wbi)\n %t5 = Add(%t4, %Rbi)\n %Ht = Tanh(%t5)\n %Accumulate = Identity(%Ht)\n return %Ht, %Accumulate\n }\n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info('sum_in', onnx.TensorProto.FLOAT, [2])\nnext = onnx.helper.make_tensor_value_info('next', onnx.TensorProto.FLOAT, [2])\nsum_out = onnx.helper.make_tensor_value_info('sum_out', onnx.TensorProto.FLOAT, [2])\nscan_out = onnx.helper.make_tensor_value_info('scan_out', onnx.TensorProto.FLOAT, [2])\nadd_node = onnx.helper.make_node(\n 'Add',\n inputs=['sum_in', 'next'],\n outputs=['sum_out']\n)\nid_node = onnx.helper.make_node(\n 'Identity',\n inputs=['sum_out'],\n outputs=['scan_out']\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node],\n 'scan_body',\n [sum_in, next],\n [sum_out, scan_out]\n)\n# create scan op node\nno_sequence_lens = '' # optional input, not supplied\nnode = onnx.helper.make_node(\n 'Scan',\n inputs=[no_sequence_lens, 'initial', 'x'],\n outputs=['y', 'z'],\n num_scan_inputs=1,\n body=scan_body\n)\n# create inputs for batch-size 1, sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((1, 2))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((1, 3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((1, 2))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((1, 3, 2))\n\nexpect(node, inputs=[initial, x], outputs=[y, z],\n name='test_scan_sum', opset_imports=[onnx.helper.make_opsetid(\"\", 8)])", + "summary": "scan_8" + }, + { + "code": "# Given an input sequence [x1, ..., xN], sum up its elements using a scan\n# returning the final state (x1+x2+...+xN) as well the scan_output\n# [x1, x1+x2, ..., x1+x2+...+xN]\n#\n# create graph to represent scan body\nsum_in = onnx.helper.make_tensor_value_info('sum_in', onnx.TensorProto.FLOAT, [2])\nnext = onnx.helper.make_tensor_value_info('next', onnx.TensorProto.FLOAT, [2])\nsum_out = onnx.helper.make_tensor_value_info('sum_out', onnx.TensorProto.FLOAT, [2])\nscan_out = onnx.helper.make_tensor_value_info('scan_out', onnx.TensorProto.FLOAT, [2])\nadd_node = onnx.helper.make_node(\n 'Add',\n inputs=['sum_in', 'next'],\n outputs=['sum_out']\n)\nid_node = onnx.helper.make_node(\n 'Identity',\n inputs=['sum_out'],\n outputs=['scan_out']\n)\nscan_body = onnx.helper.make_graph(\n [add_node, id_node],\n 'scan_body',\n [sum_in, next],\n [sum_out, scan_out]\n)\n# create scan op node\nnode = onnx.helper.make_node(\n 'Scan',\n inputs=['initial', 'x'],\n outputs=['y', 'z'],\n num_scan_inputs=1,\n body=scan_body\n)\n# create inputs for sequence-length 3, inner dimension 2\ninitial = np.array([0, 0]).astype(np.float32).reshape((2,))\nx = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))\n# final state computed = [1 + 3 + 5, 2 + 4 + 6]\ny = np.array([9, 12]).astype(np.float32).reshape((2,))\n# scan-output computed\nz = np.array([1, 2, 4, 6, 9, 12]).astype(np.float32).reshape((3, 2))\n\nexpect(node, inputs=[initial, x], outputs=[y, z],\n name='test_scan9_sum', opset_imports=[onnx.helper.make_opsetid(\"\", 9)])", + "summary": "scan_9" + } + ], + "inputs": [ + { + "description": "Initial values of the loop's N state variables followed by M scan_inputs", + "name": "initial_state_and_scan_inputs", + "option": "variadic", + "type": "V" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 2147483647, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Final values of the loop's N state variables followed by K scan_outputs", + "name": "final_state_and_scan_outputs", + "option": "variadic", + "type": "V" + } + ], + "outputs_range": "1 - ∞", + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Int64 tensor", + "type_param_str": "I" + }, + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "All Tensor types", + "type_param_str": "V" + } + ] + } + }, + { + "name": "Scatter", + "schema": { + "attributes": [ + { + "description": "Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1]", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "description": "Given `data`, `updates` and `indices` input tensors of rank r >= 1, write the values provided by `updates` \ninto the first input, `data`, along `axis` dimension of `data` (by default outer-most one as axis=0) at corresponding `indices`. \nFor each entry in `updates`, the target index in `data` is specified by corresponding entry in `indices`\nfor dimension = axis, and index in source for dimension != axis. For instance, in a 2-D tensor case,\ndata[indices[i][j]][j] = updates[i][j] if axis = 0, or data[i][indices[i][j]] = updates[i][j] if axis = 1,\nwhere i and j are loop counters from 0 up to the respective size in `updates` - 1.\nExample 1:\n data = [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n indices = [\n [1, 0, 2],\n [0, 2, 1],\n ]\n updates = [\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n ]\n output = [\n [2.0, 1.1, 0.0]\n [1.0, 0.0, 2.2]\n [0.0, 2.1, 1.2]\n ]\nExample 2:\n data = [[1.0, 2.0, 3.0, 4.0, 5.0]]\n indices = [[1, 3]]\n updates = [[1.1, 2.1]]\n axis = 1\n output = [[1.0, 1.1, 3.0, 2.1, 5.0]]\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "axis = 1\nnode = onnx.helper.make_node(\n 'Scatter',\n inputs=['data', 'indices', 'updates'],\n outputs=['y'],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter(data, indices, updates, axis=axis)\n# print(y) produces\n# [[1.0, 1.1, 3.0, 2.1, 5.0]]\n\nexpect(node, inputs=[data, indices, updates], outputs=[y],\n name='test_scatter_with_axis', opset_imports=[helper.make_opsetid(\"\", 10)])", + "summary": "scatter_with_axis" + }, + { + "code": "node = onnx.helper.make_node(\n 'Scatter',\n inputs=['data', 'indices', 'updates'],\n outputs=['y'],\n)\ndata = np.zeros((3, 3), dtype=np.float32)\nindices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)\nupdates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)\n\ny = scatter(data, indices, updates)\n# print(y) produces\n# [[2.0, 1.1, 0.0],\n# [1.0, 0.0, 2.2],\n# [0.0, 2.1, 1.2]]\n\nexpect(node, inputs=[data, indices, updates], outputs=[y],\n name='test_scatter_without_axis', opset_imports=[helper.make_opsetid(\"\", 10)])", + "summary": "scatter_without_axis" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "data", + "type": "T" + }, + { + "description": "Tensor of int32/int64 indices, of r >= 1 (same rank as input).", + "name": "indices", + "type": "Tind" + }, + { + "description": "Tensor of rank r >=1 (same rank and shape as indices)", + "name": "updates", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank r >= 1 (same rank as input).", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Input and output types can be of any tensor type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain indices to integer types", + "type_param_str": "Tind" + } + ] + } + }, + { + "name": "Scatter", + "schema": { + "attributes": [ + { + "description": "Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "description": "This operator is deprecated. Please use ScatterElements, which provides the same functionality.\n\nScatter takes three inputs `data`, `updates`, and `indices` of the same\nrank r >= 1 and an optional attribute axis that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value\nto values specified by `updates` at specific index positions specified by\n`indices`. Its output shape is the same as the shape of `data`.\n\nFor each entry in `updates`, the target index in `data` is obtained by combining\nthe corresponding entry in `indices` with the index of the entry itself: the\nindex-value for dimension = axis is obtained from the value of the corresponding\nentry in `indices` and the index-value for dimension != axis is obtained from the\nindex of the entry itself.\n\nFor instance, in a 2-D tensor case, the update corresponding to the [i][j] entry\nis performed as below:\n```\n output[indices[i][j]][j] = updates[i][j] if axis = 0, \n output[i][indices[i][j]] = updates[i][j] if axis = 1,\n```\n\nThis operator is the inverse of GatherElements. It is similar to Torch's Scatter operation.\n\nExample 1:\n```\n data = [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n indices = [\n [1, 0, 2],\n [0, 2, 1],\n ]\n updates = [\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n ]\n output = [\n [2.0, 1.1, 0.0]\n [1.0, 0.0, 2.2]\n [0.0, 2.1, 1.2]\n ]\n```\nExample 2:\n```\n data = [[1.0, 2.0, 3.0, 4.0, 5.0]]\n indices = [[1, 3]]\n updates = [[1.1, 2.1]]\n axis = 1\n output = [[1.0, 1.1, 3.0, 2.1, 5.0]]\n```\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "axis = 1\nnode = onnx.helper.make_node(\n 'Scatter',\n inputs=['data', 'indices', 'updates'],\n outputs=['y'],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter(data, indices, updates, axis=axis)\n# print(y) produces\n# [[1.0, 1.1, 3.0, 2.1, 5.0]]\n\nexpect(node, inputs=[data, indices, updates], outputs=[y],\n name='test_scatter_with_axis', opset_imports=[helper.make_opsetid(\"\", 10)])", + "summary": "scatter_with_axis" + }, + { + "code": "node = onnx.helper.make_node(\n 'Scatter',\n inputs=['data', 'indices', 'updates'],\n outputs=['y'],\n)\ndata = np.zeros((3, 3), dtype=np.float32)\nindices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)\nupdates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)\n\ny = scatter(data, indices, updates)\n# print(y) produces\n# [[2.0, 1.1, 0.0],\n# [1.0, 0.0, 2.2],\n# [0.0, 2.1, 1.2]]\n\nexpect(node, inputs=[data, indices, updates], outputs=[y],\n name='test_scatter_without_axis', opset_imports=[helper.make_opsetid(\"\", 10)])", + "summary": "scatter_without_axis" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "data", + "type": "T" + }, + { + "description": "Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.", + "name": "indices", + "type": "Tind" + }, + { + "description": "Tensor of rank r >=1 (same rank and shape as indices)", + "name": "updates", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank r >= 1 (same rank as input).", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Input and output types can be of any tensor type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain indices to integer types", + "type_param_str": "Tind" + } + ] + } + }, + { + "name": "ScatterElements", + "schema": { + "attributes": [ + { + "description": "Which axis to scatter on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "description": "ScatterElements takes three inputs `data`, `updates`, and `indices` of the same\nrank r >= 1 and an optional attribute axis that identifies an axis of `data`\n(by default, the outer-most axis, that is axis 0). The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value\nto values specified by `updates` at specific index positions specified by\n`indices`. Its output shape is the same as the shape of `data`.\n\nFor each entry in `updates`, the target index in `data` is obtained by combining\nthe corresponding entry in `indices` with the index of the entry itself: the\nindex-value for dimension = axis is obtained from the value of the corresponding\nentry in `indices` and the index-value for dimension != axis is obtained from the\nindex of the entry itself.\n\nFor instance, in a 2-D tensor case, the update corresponding to the [i][j] entry\nis performed as below:\n```\n output[indices[i][j]][j] = updates[i][j] if axis = 0, \n output[i][indices[i][j]] = updates[i][j] if axis = 1,\n```\n\nThis operator is the inverse of GatherElements. It is similar to Torch's Scatter operation.\n\nExample 1:\n```\n data = [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n ]\n indices = [\n [1, 0, 2],\n [0, 2, 1],\n ]\n updates = [\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n ]\n output = [\n [2.0, 1.1, 0.0]\n [1.0, 0.0, 2.2]\n [0.0, 2.1, 1.2]\n ]\n```\nExample 2:\n```\n data = [[1.0, 2.0, 3.0, 4.0, 5.0]]\n indices = [[1, 3]]\n updates = [[1.1, 2.1]]\n axis = 1\n output = [[1.0, 1.1, 3.0, 2.1, 5.0]]\n```\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "axis = 1\nnode = onnx.helper.make_node(\n 'ScatterElements',\n inputs=['data', 'indices', 'updates'],\n outputs=['y'],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, 3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis)\n# print(y) produces\n# [[1.0, 1.1, 3.0, 2.1, 5.0]]\n\nexpect(node, inputs=[data, indices, updates], outputs=[y],\n name='test_scatter_elements_with_axis')", + "summary": "scatter_elements_with_axis" + }, + { + "code": "axis = 1\nnode = onnx.helper.make_node(\n 'ScatterElements',\n inputs=['data', 'indices', 'updates'],\n outputs=['y'],\n axis=axis,\n)\ndata = np.array([[1.0, 2.0, 3.0, 4.0, 5.0]], dtype=np.float32)\nindices = np.array([[1, -3]], dtype=np.int64)\nupdates = np.array([[1.1, 2.1]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates, axis)\n# print(y) produces\n# [[1.0, 1.1, 2.1, 4.0, 5.0]]\n\nexpect(node, inputs=[data, indices, updates], outputs=[y],\n name='test_scatter_elements_with_negative_indices')", + "summary": "scatter_elements_with_negative_indices" + }, + { + "code": "node = onnx.helper.make_node(\n 'ScatterElements',\n inputs=['data', 'indices', 'updates'],\n outputs=['y'],\n)\ndata = np.zeros((3, 3), dtype=np.float32)\nindices = np.array([[1, 0, 2], [0, 2, 1]], dtype=np.int64)\nupdates = np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]], dtype=np.float32)\n\ny = scatter_elements(data, indices, updates)\n# print(y) produces\n# [[2.0, 1.1, 0.0],\n# [1.0, 0.0, 2.2],\n# [0.0, 2.1, 1.2]]\n\nexpect(node, inputs=[data, indices, updates], outputs=[y],\n name='test_scatter_elements_without_axis')", + "summary": "scatter_elements_without_axis" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "data", + "type": "T" + }, + { + "description": "Tensor of int32/int64 indices, of r >= 1 (same rank as input). All index values are expected to be within bounds [-s, s-1] along axis of size s. It is an error if any of the index values are out of bounds.", + "name": "indices", + "type": "Tind" + }, + { + "description": "Tensor of rank r >=1 (same rank and shape as indices)", + "name": "updates", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank r >= 1 (same rank as input).", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Input and output types can be of any tensor type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain indices to integer types", + "type_param_str": "Tind" + } + ] + } + }, + { + "name": "ScatterND", + "schema": { + "description": "ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1,\nand `updates` tensor of rank q + r - indices.shape[-1] - 1. The output of the operation\nis produced by creating a copy of the input `data`, and then updating its value to values\nspecified by `updates` at specific index positions specified by `indices`. Its output shape\nis the same as the shape of `data`. Note that `indices` should not have duplicate entries.\nThat is, two or more `updates` for the same index-location is not supported.\n\n`indices` is an integer tensor. Let k denote indices.shape[-1], the last dimension in the shape of `indices`.\n `indices` is treated as a (q-1)-dimensional tensor of k-tuples, where each k-tuple is a partial-index into `data`.\nHence, k can be a value at most the rank of `data`. When k equals rank(data), each update entry specifies an\nupdate to a single element of the tensor. When k is less than rank(data) each update entry specifies an\nupdate to a slice of the tensor.\n\n`updates` is treated as a (q-1)-dimensional tensor of replacement-slice-values. Thus, the\nfirst (q-1) dimensions of updates.shape must match the first (q-1) dimensions of indices.shape.\nThe remaining dimensions of `updates` correspond to the dimensions of the\nreplacement-slice-values. Each replacement-slice-value is a (r-k) dimensional tensor,\ncorresponding to the trailing (r-k) dimensions of `data`. Thus, the shape of `updates`\nmust equal indices.shape[0:q-1] ++ data.shape[k:r-1], where ++ denotes the concatenation\nof shapes.\n\nThe `output` is calculated via the following equation:\n\n output = np.copy(data)\n update_indices = indices.shape[:-1]\n for idx in np.ndindex(update_indices):\n output[indices[idx]] = updates[idx]\n\nThe order of iteration in the above loop is not specified.\nIn particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2].\nThis ensures that the output value does not depend on the iteration order.\n\nThis operator is the inverse of GatherND.\n\nExample 1:\n```\n data = [1, 2, 3, 4, 5, 6, 7, 8]\n indices = [[4], [3], [1], [7]]\n updates = [9, 10, 11, 12]\n output = [1, 11, 3, 10, 9, 6, 7, 12]\n```\n\nExample 2:\n```\n data = [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]\n indices = [[0], [2]]\n updates = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]\n output = [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]\n```\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'ScatterND',\n inputs=['data', 'indices', 'updates'],\n outputs=['y'],\n)\ndata = np.array(\n [[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],\n [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\nindices = np.array([[0], [2]], dtype=np.int64)\nupdates = np.array(\n [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]], dtype=np.float32)\n# Expecting output as np.array(\n# [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],\n# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],\n# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\noutput = scatter_nd_impl(data, indices, updates)\nexpect(node, inputs=[data, indices, updates], outputs=[output],\n name='test_scatternd')", + "summary": "scatternd" + } + ], + "inputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "data", + "type": "T" + }, + { + "description": "Tensor of rank q >= 1.", + "name": "indices", + "type": "tensor(int64)" + }, + { + "description": "Tensor of rank q + r - indices_shape[-1] - 1.", + "name": "updates", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of rank r >= 1.", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to any tensor type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Selu", + "schema": { + "attributes": [ + { + "default": 1.673200011253357, + "description": "Coefficient of SELU default to 1.6732.", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + }, + { + "default": 1.0506999492645264, + "description": "Coefficient of SELU default to 1.0507.", + "name": "gamma", + "required": false, + "type": "float32" + } + ], + "category": "Activation", + "description": "Selu takes one input data (Tensor) and produces one output data\n(Tensor) where the scaled exponential linear unit function,\n`y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`,\nis applied to the tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Selu',\n inputs=['x'],\n outputs=['y'],\n alpha=2.0,\n gamma=3.0\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-3.79272318, 0., 3.]\ny = np.clip(x, 0, np.inf) * 3.0 + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0\nexpect(node, inputs=[x], outputs=[y],\n name='test_selu_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) * 3.0 + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0\nexpect(node, inputs=[x], outputs=[y],\n name='test_selu')", + "summary": "selu" + }, + { + "code": "default_alpha = 1.67326319217681884765625\ndefault_gamma = 1.05070102214813232421875\nnode = onnx.helper.make_node(\n 'Selu',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) * default_gamma + \\\n (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha * default_gamma\nexpect(node, inputs=[x], outputs=[y],\n name='test_selu_default')", + "summary": "selu_default" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Selu", + "schema": { + "attributes": [ + { + "default": 1.6732631921768188, + "description": "Coefficient of SELU default to 1.67326319217681884765625 (i.e., float32 approximation of 1.6732632423543772848170429916717).", + "name": "alpha", + "required": false, + "type": "float32" + }, + { + "default": 1.0507010221481323, + "description": "Coefficient of SELU default to 1.05070102214813232421875 (i.e., float32 approximation of 1.0507009873554804934193349852946).", + "name": "gamma", + "required": false, + "type": "float32" + } + ], + "category": "Activation", + "description": "Selu takes one input data (Tensor) and produces one output data\n(Tensor) where the scaled exponential linear unit function,\n`y = gamma * (alpha * e^x - alpha) for x <= 0`, `y = gamma * x for x > 0`,\nis applied to the tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Selu',\n inputs=['x'],\n outputs=['y'],\n alpha=2.0,\n gamma=3.0\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\n# expected output [-3.79272318, 0., 3.]\ny = np.clip(x, 0, np.inf) * 3.0 + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0\nexpect(node, inputs=[x], outputs=[y],\n name='test_selu_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) * 3.0 + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0\nexpect(node, inputs=[x], outputs=[y],\n name='test_selu')", + "summary": "selu" + }, + { + "code": "default_alpha = 1.67326319217681884765625\ndefault_gamma = 1.05070102214813232421875\nnode = onnx.helper.make_node(\n 'Selu',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, 0, np.inf) * default_gamma + \\\n (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha * default_gamma\nexpect(node, inputs=[x], outputs=[y],\n name='test_selu_default')", + "summary": "selu_default" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "SequenceAt", + "schema": { + "description": "Outputs a tensor copy from the tensor at 'position' in 'input_sequence'.\nAccepted range for 'position' is in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'.\nNegative value means counting positions from the back.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input sequence.", + "name": "input_sequence", + "type": "S" + }, + { + "description": "Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).", + "name": "position", + "type": "I" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor at the specified position in the input sequence.", + "name": "tensor", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ], + "description": "Constrain to any tensor type.", + "type_param_str": "S" + }, + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain to any tensor type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain position to integral tensor. It must be a scalar(tensor of empty shape).", + "type_param_str": "I" + } + ] + } + }, + { + "name": "SequenceConstruct", + "schema": { + "description": "Construct a tensor sequence containing 'inputs' tensors.\nAll tensors in 'inputs' must have the same data type.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Tensors.", + "name": "inputs", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Sequence enclosing the input tensors.", + "name": "output_sequence", + "type": "S" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input types to any tensor type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ], + "description": "Constrain output types to any tensor type.", + "type_param_str": "S" + } + ] + } + }, + { + "name": "SequenceEmpty", + "schema": { + "attributes": [ + { + "description": "(Optional) The data type of the tensors in the output sequence. The default type is 'float'.", + "name": "dtype", + "required": false, + "type": "int64" + } + ], + "description": "Construct an empty tensor sequence, with given data type.\n", + "domain": "ai.onnx", + "max_input": 0, + "max_output": 1, + "min_input": 0, + "min_output": 1, + "outputs": [ + { + "description": "Empty sequence.", + "name": "output", + "type": "S" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ], + "description": "Constrain output types to any tensor type.", + "type_param_str": "S" + } + ] + } + }, + { + "name": "SequenceErase", + "schema": { + "description": "Outputs a tensor sequence that removes the tensor at 'position' from 'input_sequence'.\nAccepted range for 'position' is in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'.\nNegative value means counting positions from the back.\n'position' is optional, by default it erases the last tensor from 'input_sequence'.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input sequence.", + "name": "input_sequence", + "type": "S" + }, + { + "description": "Position of the tensor in the sequence. Negative value means counting positions from the back. Accepted range in `[-n, n - 1]`, where `n` is the number of tensors in 'input_sequence'. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).", + "name": "position", + "option": "optional", + "type": "I" + } + ], + "inputs_range": "1 - 2", + "max_input": 2, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output sequence that has the tensor at the specified position removed.", + "name": "output_sequence", + "type": "S" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ], + "description": "Constrain to any tensor type.", + "type_param_str": "S" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain position to integral tensor. It must be a scalar(tensor of empty shape).", + "type_param_str": "I" + } + ] + } + }, + { + "name": "SequenceInsert", + "schema": { + "description": "Outputs a tensor sequence that inserts 'tensor' into 'input_sequence' at 'position'.\n'tensor' must have the same data type as 'input_sequence'.\nAccepted range for 'position' is in `[-n, n]`, where `n` is the number of tensors in 'input_sequence'.\nNegative value means counting positions from the back.\n'position' is optional, by default it inserts 'tensor' to the back of 'input_sequence'.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input sequence.", + "name": "input_sequence", + "type": "S" + }, + { + "description": "Input tensor to be inserted into the input sequence.", + "name": "tensor", + "type": "T" + }, + { + "description": "Position in the sequence where the new tensor is inserted. It is optional and default is to insert to the back of the sequence. Negative value means counting positions from the back. Accepted range in `[-n, n]`, where `n` is the number of tensors in 'input_sequence'. It is an error if any of the index values are out of bounds. It must be a scalar(tensor of empty shape).", + "name": "position", + "option": "optional", + "type": "I" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output sequence that contains the inserted tensor at given position.", + "name": "output_sequence", + "type": "S" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain to any tensor type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ], + "description": "Constrain to any tensor type.", + "type_param_str": "S" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain position to integral tensor. It must be a scalar(tensor of empty shape).", + "type_param_str": "I" + } + ] + } + }, + { + "name": "SequenceLength", + "schema": { + "description": "Produces a scalar(tensor of empty shape) containing the number of tensors in 'input_sequence'.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input sequence.", + "name": "input_sequence", + "type": "S" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Length of input sequence. It must be a scalar(tensor of empty shape).", + "name": "length", + "type": "I" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ], + "description": "Constrain to any tensor type.", + "type_param_str": "S" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain output to integral tensor. It must be a scalar(tensor of empty shape).", + "type_param_str": "I" + } + ] + } + }, + { + "name": "Shape", + "schema": { + "description": "Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Shape',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([\n [1, 2, 3],\n [4, 5, 6],\n]).astype(np.float32)\ny = np.array([\n 2, 3,\n]).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_shape_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.array(x.shape).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_shape')", + "summary": "shape" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Shape of the input tensor", + "name": "shape", + "type": "T1" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Input tensor can be of arbitrary type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain output to int64 tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Shrink", + "schema": { + "attributes": [ + { + "description": "The bias value added to output. Default is 0.", + "name": "bias", + "required": false, + "type": "float32" + }, + { + "default": 0.5, + "description": "The lambd value for the Shrink formulation. Default is 0.5.", + "name": "lambd", + "required": false, + "type": "float32" + } + ], + "description": "Shrink takes one input data (Tensor) and produces one Tensor output,\nhaving same datatype and shape with input. It has two attributes, lambd and\nbias. The formula of this operator is: If x < -lambd, y = x + bias;\nIf x > lambd, y = x - bias; Otherwise, y = 0.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Shrink',\n inputs=['x'],\n outputs=['y'],\n lambd=1.5,\n)\nX = np.arange(-2.0, 2.1, dtype=np.float32)\nY = np.array([-2, 0, 0, 0, 2], dtype=np.float32)\nexpect(node, inputs=[X], outputs=[Y],\n name='test_shrink_hard')", + "summary": "hard_shrink" + }, + { + "code": "node = onnx.helper.make_node(\n 'Shrink',\n inputs=['x'],\n outputs=['y'],\n lambd=1.5,\n bias=1.5,\n)\nX = np.arange(-2.0, 2.1, dtype=np.float32)\nY = np.array([-0.5, 0, 0, 0, 0.5], dtype=np.float32)\nexpect(node, inputs=[X], outputs=[Y],\n name='test_shrink_soft')", + "summary": "soft_shrink" + } + ], + "inputs": [ + { + "description": "The input data as Tensor.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output.", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrains input to only numeric types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Sigmoid", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "category": "Activation", + "description": "Sigmoid takes one input data (Tensor) and produces one output data\n(Tensor) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the\ntensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Sigmoid',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = 1.0 / (1.0 + np.exp(np.negative(x))) # expected output [0.26894143, 0.5, 0.7310586]\nexpect(node, inputs=[x], outputs=[y],\n name='test_sigmoid_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = 1.0 / (1.0 + np.exp(np.negative(x)))\nexpect(node, inputs=[x], outputs=[y],\n name='test_sigmoid')", + "summary": "sigmoid" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Sigmoid", + "schema": { + "category": "Activation", + "description": "Sigmoid takes one input data (Tensor) and produces one output data\n(Tensor) where the sigmoid function, y = 1 / (1 + exp(-x)), is applied to the\ntensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Sigmoid',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = 1.0 / (1.0 + np.exp(np.negative(x))) # expected output [0.26894143, 0.5, 0.7310586]\nexpect(node, inputs=[x], outputs=[y],\n name='test_sigmoid_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = 1.0 / (1.0 + np.exp(np.negative(x)))\nexpect(node, inputs=[x], outputs=[y],\n name='test_sigmoid')", + "summary": "sigmoid" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Sign", + "schema": { + "description": "Calculate the sign of the given input tensor element-wise.\nIf input > 0, output 1. if input < 0, output -1. if input == 0, output 0.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Sign',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array(range(-5, 6)).astype(np.float32)\ny = np.sign(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_sign')", + "summary": "sign" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The sign of the input tensor computed element-wise. It has the same shape and type of the input.", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to all numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Sin", + "schema": { + "description": "Calculates the sine of the given input tensor, element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Sin',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.sin(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_sin_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.sin(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_sin')", + "summary": "sin" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The sine of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Sinh", + "schema": { + "description": "Calculates the hyperbolic sine of the given input tensor element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Sinh',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.sinh(x) # expected output [-1.17520118, 0., 1.17520118]\nexpect(node, inputs=[x], outputs=[y],\n name='test_sinh_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.sinh(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_sinh')", + "summary": "sinh" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The hyperbolic sine values of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Size", + "schema": { + "description": "Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Size',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([\n [1, 2, 3],\n [4, 5, 6],\n]).astype(np.float32)\ny = np.array(6).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_size_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.array(x.size).astype(np.int64)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_size')", + "summary": "size" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Total number of elements of the input tensor", + "name": "size", + "type": "T1" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Input tensor can be of arbitrary type.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain output to int64 tensor, which should be a scalar though.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Slice", + "schema": { + "attributes": [ + { + "description": "Axes that `starts` and `ends` apply to. It's optional. If not present, will be treated as [0, 1, ..., len(`starts`) - 1].", + "name": "axes", + "required": false, + "type": "int64[]" + }, + { + "description": "Ending indices (exclusive) of corresponding axis in axes`", + "name": "ends", + "required": true, + "type": "int64[]" + }, + { + "description": "Starting indices of corresponding axis in `axes`", + "name": "starts", + "required": true, + "type": "int64[]" + } + ], + "category": "Tensor", + "description": "Produces a slice of the input tensor along multiple axes. Similar to numpy:\nhttps://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\nSlices uses `axes`, `starts` and `ends` attributes to specify the start and end\ndimension for each axis in the list of axes, it uses this information to\nslice the input `data` tensor. If a negative value is passed for any of the\nstart or end indices, it represent number of elements before the end of that\ndimension. If the value passed to start or end is larger than the `n` (the\nnumber of elements in this dimension), it represents `n`. For slicing to the\nend of a dimension with unknown size, it is recommended to pass in `INT_MAX`.\nIf `axes` are omitted, they are set to `[0, ..., ndim-1]`.\nExample 1:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n axes = [0, 1]\n starts = [1, 0]\n ends = [2, 3]\n result = [\n [5, 6, 7],\n ]\nExample 2:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n starts = [0, 1]\n ends = [-1, 1000]\n result = [\n [2, 3, 4],\n ]\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\ny = x[0:3, 0:10]\nstarts = np.array([0, 0], dtype=np.int64)\nends = np.array([3, 10], dtype=np.int64)\naxes = np.array([0, 1], dtype=np.int64)\nsteps = np.array([1, 1], dtype=np.int64)\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice')", + "summary": "slice" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(node, inputs=[x, starts, ends], outputs=[y],\n name='test_slice_default_axes')", + "summary": "slice_default_axes" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(node, inputs=[x, starts, ends, axes], outputs=[y],\n name='test_slice_default_steps')", + "summary": "slice_default_steps" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1:1000]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_end_out_of_bounds')", + "summary": "slice_end_out_of_bounds" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0], dtype=np.int64)\nends = np.array([-1], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 0:-1]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_neg')", + "summary": "slice_neg" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([20, 10, 4], dtype=np.int64)\nends = np.array([0, 0, 1], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\nsteps = np.array([-1, -3, -2])\ny = x[20:0:-1, 10:0:-3, 4:1:-2]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_neg_steps')", + "summary": "slice_neg_steps" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, -2, -1], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(node, inputs=[x, starts, ends, axes], outputs=[y],\n name='test_slice_negative_axes')", + "summary": "slice_negative_axes" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1000], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1000:1000]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_start_out_of_bounds')", + "summary": "slice_start_out_of_bounds" + } + ], + "inputs": [ + { + "description": "Tensor of data to extract slices from.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Sliced data tensor.", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Slice", + "schema": { + "category": "Tensor", + "description": "Produces a slice of the input tensor along multiple axes. Similar to numpy:\nhttps://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\nSlices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end\ndimension and step for each axis in the list of axes, it uses this information to\nslice the input `data` tensor. If a negative value is passed for any of the\nstart or end indices, it represent number of elements before the end of that\ndimension. If the value passed to start or end is larger than the `n` (the\nnumber of elements in this dimension), it represents `n`. For slicing to the\nend of a dimension with unknown size, it is recommended to pass in `INT_MAX`.\nIf a negative value is passed for step, it represents slicing backward.\nIf `axes` are omitted, they are set to `[0, ..., ndim-1]`.\nIf `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)`\nExample 1:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n axes = [0, 1]\n starts = [1, 0]\n ends = [2, 3]\n steps = [1, 2]\n result = [\n [5, 7],\n ]\nExample 2:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n starts = [0, 1]\n ends = [-1, 1000]\n result = [\n [2, 3, 4],\n ]\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\ny = x[0:3, 0:10]\nstarts = np.array([0, 0], dtype=np.int64)\nends = np.array([3, 10], dtype=np.int64)\naxes = np.array([0, 1], dtype=np.int64)\nsteps = np.array([1, 1], dtype=np.int64)\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice')", + "summary": "slice" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(node, inputs=[x, starts, ends], outputs=[y],\n name='test_slice_default_axes')", + "summary": "slice_default_axes" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(node, inputs=[x, starts, ends, axes], outputs=[y],\n name='test_slice_default_steps')", + "summary": "slice_default_steps" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1:1000]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_end_out_of_bounds')", + "summary": "slice_end_out_of_bounds" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0], dtype=np.int64)\nends = np.array([-1], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 0:-1]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_neg')", + "summary": "slice_neg" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([20, 10, 4], dtype=np.int64)\nends = np.array([0, 0, 1], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\nsteps = np.array([-1, -3, -2])\ny = x[20:0:-1, 10:0:-3, 4:1:-2]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_neg_steps')", + "summary": "slice_neg_steps" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, -2, -1], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(node, inputs=[x, starts, ends, axes], outputs=[y],\n name='test_slice_negative_axes')", + "summary": "slice_negative_axes" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1000], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1000:1000]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_start_out_of_bounds')", + "summary": "slice_start_out_of_bounds" + } + ], + "inputs": [ + { + "description": "Tensor of data to extract slices from.", + "name": "data", + "type": "T" + }, + { + "description": "1-D tensor of starting indices of corresponding axis in `axes`", + "name": "starts", + "type": "Tind" + }, + { + "description": "1-D tensor of ending indices (exclusive) of corresponding axis in `axes`", + "name": "ends", + "type": "Tind" + }, + { + "description": "1-D tensor of axes that `starts` and `ends` apply to.", + "name": "axes", + "option": "optional", + "type": "Tind" + }, + { + "description": "1-D tensor of slice step of corresponding axis in `axes`. Default to 1. ", + "name": "steps", + "option": "optional", + "type": "Tind" + } + ], + "inputs_range": "3 - 5", + "max_input": 5, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Sliced data tensor.", + "name": "output", + "type": "T" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain indices to integer types", + "type_param_str": "Tind" + } + ] + } + }, + { + "name": "Slice", + "schema": { + "category": "Tensor", + "description": "Produces a slice of the input tensor along multiple axes. Similar to numpy:\nhttps://docs.scipy.org/doc/numpy/reference/arrays.indexing.html\nSlices uses `starts`, `ends`, `axes` and `steps` inputs to specify the start and end\ndimension and step for each axis in the list of axes, it uses this information to\nslice the input `data` tensor. If a negative value is passed for any of the\nstart or end indices, it represents number of elements before the end of that\ndimension. If the value passed to start or end is larger than the `n` (the\nnumber of elements in this dimension), it represents `n`. For slicing to the\nend of a dimension with unknown size, it is recommended to pass in `INT_MAX` \nwhen sclicing forward and 'INT_MIN' when slicing backward.\nIf a negative value is passed for step, it represents slicing backward. \nHowever step value cannot be 0.\nIf `axes` are omitted, they are set to `[0, ..., ndim-1]`.\nIf `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)`\nExample 1:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n axes = [0, 1]\n starts = [1, 0]\n ends = [2, 3]\n steps = [1, 2]\n result = [\n [5, 7],\n ]\nExample 2:\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n starts = [0, 1]\n ends = [-1, 1000]\n result = [\n [2, 3, 4],\n ]\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\ny = x[0:3, 0:10]\nstarts = np.array([0, 0], dtype=np.int64)\nends = np.array([3, 10], dtype=np.int64)\naxes = np.array([0, 1], dtype=np.int64)\nsteps = np.array([1, 1], dtype=np.int64)\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice')", + "summary": "slice" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(node, inputs=[x, starts, ends], outputs=[y],\n name='test_slice_default_axes')", + "summary": "slice_default_axes" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(node, inputs=[x, starts, ends, axes], outputs=[y],\n name='test_slice_default_steps')", + "summary": "slice_default_steps" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1:1000]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_end_out_of_bounds')", + "summary": "slice_end_out_of_bounds" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0], dtype=np.int64)\nends = np.array([-1], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 0:-1]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_neg')", + "summary": "slice_neg" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([20, 10, 4], dtype=np.int64)\nends = np.array([0, 0, 1], dtype=np.int64)\naxes = np.array([0, 1, 2], dtype=np.int64)\nsteps = np.array([-1, -3, -2])\ny = x[20:0:-1, 10:0:-3, 4:1:-2]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_neg_steps')", + "summary": "slice_neg_steps" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([0, 0, 3], dtype=np.int64)\nends = np.array([20, 10, 4], dtype=np.int64)\naxes = np.array([0, -2, -1], dtype=np.int64)\ny = x[:, :, 3:4]\n\nexpect(node, inputs=[x, starts, ends, axes], outputs=[y],\n name='test_slice_negative_axes')", + "summary": "slice_negative_axes" + }, + { + "code": "node = onnx.helper.make_node(\n 'Slice',\n inputs=['x', 'starts', 'ends', 'axes', 'steps'],\n outputs=['y'],\n)\n\nx = np.random.randn(20, 10, 5).astype(np.float32)\nstarts = np.array([1000], dtype=np.int64)\nends = np.array([1000], dtype=np.int64)\naxes = np.array([1], dtype=np.int64)\nsteps = np.array([1], dtype=np.int64)\ny = x[:, 1000:1000]\n\nexpect(node, inputs=[x, starts, ends, axes, steps], outputs=[y],\n name='test_slice_start_out_of_bounds')", + "summary": "slice_start_out_of_bounds" + } + ], + "inputs": [ + { + "description": "Tensor of data to extract slices from.", + "name": "data", + "type": "T" + }, + { + "description": "1-D tensor of starting indices of corresponding axis in `axes`", + "name": "starts", + "type": "Tind" + }, + { + "description": "1-D tensor of ending indices (exclusive) of corresponding axis in `axes`", + "name": "ends", + "type": "Tind" + }, + { + "description": "1-D tensor of axes that `starts` and `ends` apply to. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "option": "optional", + "type": "Tind" + }, + { + "description": "1-D tensor of slice step of corresponding axis in `axes`. Negative value means slicing backward. 'steps' cannot be 0. Defaults to 1.", + "name": "steps", + "option": "optional", + "type": "Tind" + } + ], + "inputs_range": "3 - 5", + "max_input": 5, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Sliced data tensor.", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain indices to integer types", + "type_param_str": "Tind" + } + ] + } + }, + { + "name": "Softmax", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "category": "Activation", + "description": "The operator computes the softmax (normalized exponential) values for each layer in the batch\n of the given input. The input is a 2-D tensor (Tensor) of size\n(batch_size x input_feature_dimensions). The output tensor has the same shape\nand contains the softmax values of the corresponding input.\n\nInput does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.array([[-1, 0, 1]]).astype(np.float32)\n# expected output [[0.09003058, 0.24472848, 0.66524094]]\ny = np.exp(x) / np.sum(np.exp(x), axis=1)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_example')", + "summary": "softmax" + }, + { + "code": "def softmax_2d(x): # type: (np.ndarray) -> np.ndarray\n max_x = np.max(x, axis=1).reshape((-1, 1))\n exp_x = np.exp(x - max_x)\n return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))\n\nx = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)\n# expected output [[0.0320586, 0.08714432, 0.23688284, 0.64391428],\n# [0.0320586, 0.08714432, 0.23688284, 0.64391428]]\ny = softmax_2d(x)\n\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_large_number')\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n axis=0,\n)\ny = softmax_2d(x.reshape(1, 60)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_axis_0')\n\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n axis=1,\n)\ny = softmax_2d(x.reshape(3, 20)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_axis_1')\n\n# default axis is 1\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_default_axis')\n\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n axis=2,\n)\ny = softmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_axis_2')\n\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n axis=-1,\n)\ny = softmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_negative_axis')", + "summary": "softmax_axis" + } + ], + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output values with the same shape as input tensor (the original size without coercion).", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Softmax", + "schema": { + "attributes": [ + { + "default": 1, + "description": "Describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "category": "Activation", + "description": "The operator computes the softmax (normalized exponential) values for each layer in the batch\n of the given input.\n\nThe input does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\ninput \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then input will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the input tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors. The output tensor has the same shape\nand contains the softmax values of the corresponding input.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n)\nx = np.array([[-1, 0, 1]]).astype(np.float32)\n# expected output [[0.09003058, 0.24472848, 0.66524094]]\ny = np.exp(x) / np.sum(np.exp(x), axis=1)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_example')", + "summary": "softmax" + }, + { + "code": "def softmax_2d(x): # type: (np.ndarray) -> np.ndarray\n max_x = np.max(x, axis=1).reshape((-1, 1))\n exp_x = np.exp(x - max_x)\n return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))\n\nx = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)\n# expected output [[0.0320586, 0.08714432, 0.23688284, 0.64391428],\n# [0.0320586, 0.08714432, 0.23688284, 0.64391428]]\ny = softmax_2d(x)\n\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_large_number')\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n axis=0,\n)\ny = softmax_2d(x.reshape(1, 60)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_axis_0')\n\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n axis=1,\n)\ny = softmax_2d(x.reshape(3, 20)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_axis_1')\n\n# default axis is 1\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_default_axis')\n\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n axis=2,\n)\ny = softmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_axis_2')\n\nnode = onnx.helper.make_node(\n 'Softmax',\n inputs=['x'],\n outputs=['y'],\n axis=-1,\n)\ny = softmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softmax_negative_axis')", + "summary": "softmax_axis" + } + ], + "inputs": [ + { + "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output values with the same shape as input tensor (the original size without coercion).", + "name": "output", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "SoftmaxCrossEntropyLoss", + "schema": { + "attributes": [ + { + "description": "Specifies a target value that is ignored and does not contribute to the input gradient. It's an optional value.", + "name": "ignore_index", + "required": false, + "type": "int64" + }, + { + "default": "mean", + "description": "Type of reduction to apply to loss: none, sum, mean(default). 'none': no reduction will be applied, 'sum': the output will be summed. 'mean': the sum of the output will be divided by the number of elements in the output.", + "name": "reduction", + "required": false, + "type": "string" + } + ], + "description": "Loss function that measures the softmax cross entropy\nbetween 'scores' and 'labels'.\nThis operator first computes a loss tensor whose shape is identical to the labels input.\nIf the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N).\nIf the input is N-D tensor with shape (N, C, D1, D2, ..., Dk),\nthe loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L.\nAfter L is available, this operator can optionally do a reduction operator.\n\nshape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\nshape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n\nThe loss for one sample, l_i, can caculated as follows:\n l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.\nor\n l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided.\n\nloss is zero for the case when label-value equals ignore_index.\n l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index\n\nwhere:\n p = Softmax(scores)\n y = Log(p)\n c = labels[i][d1][d2]...[dk]\n\nFinally, L is optionally reduced:\nIf reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk).\nIf reduction = 'sum', the output is scalar: Sum(L).\nIf reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W),\nwhere tensor W is of shape (N, D1, D2, ..., Dk) and W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]].\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "reduction = 'mean'\nignore_index = np.int64(-1)\n\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z'],\n reduction=reduction,\n ignore_index=ignore_index)\n\nN, C, dim1 = 3, 5, 6\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1))\nlabels[0][0] = -1\nweight = np.random.rand(C).astype(np.float32)\n\nsce = softmaxcrossentropy(x,\n labels,\n weight=weight,\n reduction=reduction,\n ignore_index=ignore_index)\n\nexpect(node, inputs=[x, labels, weight], outputs=[sce], name='test_softmax_cross_entropy_input_shape_is_NCd1_mean_weight_negative_ignore_index')", + "summary": "input_shape_is_NCd1_mean_weight_negative_ignore_index" + }, + { + "code": "reduction = 'mean'\nignore_index = np.int64(-1)\n\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z', 'log_prob'],\n reduction=reduction,\n ignore_index=ignore_index)\n\nN, C, dim1 = 3, 5, 6\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1))\nlabels[0][0] = -1\nweight = np.random.rand(C).astype(np.float32)\n\nloss, log_prob = softmaxcrossentropy(x,\n labels,\n weight=weight,\n reduction=reduction,\n ignore_index=ignore_index,\n get_log_prob=True)\n\nexpect(node, inputs=[x, labels, weight], outputs=[loss, log_prob], name='test_softmax_cross_entropy_input_shape_is_NCd1_mean_weight_negative_ignore_index_log_prob')", + "summary": "input_shape_is_NCd1_mean_weight_negative_ignore_index_log_prob" + }, + { + "code": "reduction = 'none'\nignore_index = np.int64(-5)\n\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z'],\n reduction=reduction,\n ignore_index=ignore_index)\n\nN, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3))\nlabels[0][0][0][0] = -5\n\nsce = softmaxcrossentropy(x,\n labels,\n reduction=reduction,\n ignore_index=ignore_index)\n\nexpect(node, inputs=[x, labels], outputs=[sce], name='test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index')", + "summary": "input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index" + }, + { + "code": "reduction = 'none'\nignore_index = np.int64(-5)\n\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z', 'log_prob'],\n reduction=reduction,\n ignore_index=ignore_index)\n\nN, C, dim1, dim2, dim3 = 3, 5, 6, 6, 5\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3))\nlabels[0][0][0][0] = -5\n\nloss, log_prob = softmaxcrossentropy(x,\n labels,\n reduction=reduction,\n ignore_index=ignore_index,\n get_log_prob=True)\n\nexpect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_log_prob')", + "summary": "input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_log_prob" + }, + { + "code": "reduction = 'sum'\nignore_index = np.int64(10)\n\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z'],\n reduction=reduction,\n ignore_index=ignore_index)\n\nN, C = 3, 5\nnp.random.seed(0)\nx = np.random.rand(N, C).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N))\nlabels[0] = 10\nweight = np.random.rand(C).astype(np.float32)\n\nsce = softmaxcrossentropy(x,\n labels,\n weight=weight,\n reduction=reduction,\n ignore_index=ignore_index)\n\nexpect(node, inputs=[x, labels, weight], outputs=[sce], name='test_softmax_cross_entropy_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index')", + "summary": "input_shape_is_NCd1d2d3_sum_weight_high_ignore_index" + }, + { + "code": "reduction = 'sum'\nignore_index = np.int64(10)\n\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z', 'log_prob'],\n reduction=reduction,\n ignore_index=ignore_index)\n\nN, C = 3, 5\nnp.random.seed(0)\nx = np.random.rand(N, C).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N))\nlabels[0] = 10\nweight = np.random.rand(C).astype(np.float32)\n\nloss, log_prob = softmaxcrossentropy(x,\n labels,\n weight=weight,\n reduction=reduction,\n ignore_index=ignore_index,\n get_log_prob=True)\n\nexpect(node, inputs=[x, labels, weight], outputs=[loss, log_prob], name='test_softmax_cross_entropy_input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_log_prob')", + "summary": "input_shape_is_NCd1d2d3_sum_weight_high_ignore_index_log_prob" + }, + { + "code": "reduction = 'mean'\n\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z'],\n reduction=reduction)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5))\nweight = np.random.rand(C).astype(np.float32)\n\nsce = softmaxcrossentropy(x,\n labels,\n weight=weight,\n reduction=reduction)\n\nexpect(node, inputs=[x, labels, weight], outputs=[sce], name='test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_mean_weight')", + "summary": "input_shape_is_NCd1d2d3d4d5_mean_weight" + }, + { + "code": "reduction = 'mean'\n\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z', 'log_prob'],\n reduction=reduction)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5))\nweight = np.random.rand(C).astype(np.float32)\n\nloss, log_prob = softmaxcrossentropy(x,\n labels,\n weight=weight,\n reduction=reduction,\n get_log_prob=True)\n\nexpect(node, inputs=[x, labels, weight], outputs=[loss, log_prob], name='test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_mean_weight_log_prob')", + "summary": "input_shape_is_NCd1d2d3d4d5_mean_weight_log_prob" + }, + { + "code": "reduction = 'none'\n\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z'],\n reduction=reduction)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5))\n\nsce = softmaxcrossentropy(x,\n labels,\n reduction=reduction)\n\nexpect(node, inputs=[x, labels], outputs=[sce], name='test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_none_no_weight')", + "summary": "input_shape_is_NCd1d2d3d4d5_none_no_weight" + }, + { + "code": "reduction = 'none'\n\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z', 'log_prob'],\n reduction=reduction)\n\nN, C, dim1, dim2, dim3, dim4, dim5 = 3, 5, 6, 6, 5, 3, 4\nnp.random.seed(0)\nx = np.random.rand(N, C, dim1, dim2, dim3, dim4, dim5).astype(np.float32)\nlabels = np.random.randint(0, high=C, size=(N, dim1, dim2, dim3, dim4, dim5))\n\nloss, log_prob = softmaxcrossentropy(x,\n labels,\n reduction=reduction,\n get_log_prob=True)\n\nexpect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_softmax_cross_entropy_input_shape_is_NCd1d2d3d4d5_none_no_weight_log_prob')", + "summary": "input_shape_is_NCd1d2d3d4d5_none_no_weight_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name='test_softmax_cross_entropy_mean')", + "summary": "softmaxcrossentropy_mean" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\ny = np.random.randint(0, high=5, size=(3, 2))\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, y)\n\n# Check results\nexpect(node, inputs=[x, y], outputs=[sce], name='test_softmax_cross_entropy_mean_3d')", + "summary": "softmaxcrossentropy_mean_3d" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z', 'log_prob'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\ny = np.random.randint(0, high=5, size=(3, 2))\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, y, get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, y], outputs=[loss, log_prob], name='test_softmax_cross_entropy_mean_3d_log_prob')", + "summary": "softmaxcrossentropy_mean_3d_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z', 'log_prob'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_softmax_cross_entropy_mean_log_prob')", + "summary": "softmaxcrossentropy_mean_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\nlabels[0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, ignore_index=ignore_index)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name='test_softmax_cross_entropy_mean_no_weight_ignore_index')", + "summary": "softmaxcrossentropy_mean_no_weights_ignore_index" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2))\nlabels[0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, ignore_index=ignore_index)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name='test_softmax_cross_entropy_mean_no_weight_ignore_index_3d')", + "summary": "softmaxcrossentropy_mean_no_weights_ignore_index_3d" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z', 'log_prob'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2))\nlabels[0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, ignore_index=ignore_index, get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_softmax_cross_entropy_mean_no_weight_ignore_index_3d_log_prob')", + "summary": "softmaxcrossentropy_mean_no_weights_ignore_index_3d_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7))\nlabels[0][0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, reduction=reduction, ignore_index=ignore_index)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name='test_softmax_cross_entropy_mean_no_weight_ignore_index_4d')", + "summary": "softmaxcrossentropy_mean_no_weights_ignore_index_4d" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z', 'log_prob'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7))\nlabels[0][0][0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, reduction=reduction, ignore_index=ignore_index, get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_softmax_cross_entropy_mean_no_weight_ignore_index_4d_log_prob')", + "summary": "softmaxcrossentropy_mean_no_weights_ignore_index_4d_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z', 'log_prob'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\nlabels[0] = np.int64(2)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, ignore_index=ignore_index, get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_softmax_cross_entropy_mean_no_weight_ignore_index_log_prob')", + "summary": "softmaxcrossentropy_mean_no_weights_ignore_index_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights)\n\n# Check results\nexpect(node, inputs=[x, labels, weights], outputs=[sce], name='test_softmax_cross_entropy_mean_weight')", + "summary": "softmaxcrossentropy_mean_weights" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(0)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\nlabels[0] = np.int64(0)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights, ignore_index=ignore_index)\n\n# Check results\nexpect(node, inputs=[x, labels, weights], outputs=[sce], name='test_softmax_cross_entropy_mean_weight_ignore_index')", + "summary": "softmaxcrossentropy_mean_weights_ignore_index" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(1)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2))\nlabels[0][0] = np.int64(1)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights, ignore_index=ignore_index)\n\n# Check results\nexpect(node, inputs=[x, labels, weights], outputs=[sce], name='test_softmax_cross_entropy_mean_weight_ignore_index_3d')", + "summary": "softmaxcrossentropy_mean_weights_ignore_index_3d" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(1)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z', 'log_prob'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2))\nlabels[0][0] = np.int64(1)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, weight=weights, ignore_index=ignore_index, get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, labels, weights], outputs=[loss, log_prob], name='test_softmax_cross_entropy_mean_weight_ignore_index_3d_log_prob')", + "summary": "softmaxcrossentropy_mean_weights_ignore_index_3d_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7))\nlabels[0][0][0] = np.int64(2)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, reduction=reduction, weight=weights, ignore_index=ignore_index)\n\n# Check results\nexpect(node, inputs=[x, labels, weights], outputs=[sce], name='test_softmax_cross_entropy_mean_weight_ignore_index_4d')", + "summary": "softmaxcrossentropy_mean_weights_ignore_index_4d" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(2)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z', 'log_prob'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5, 2, 7).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, 2, 7))\nlabels[0][0][0] = np.int64(2)\nweights = np.array([0.2, 0.3, 0.6, 0.1, 0.5], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, reduction=reduction, weight=weights, ignore_index=ignore_index, get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, labels, weights], outputs=[loss, log_prob], name='test_softmax_cross_entropy_mean_weight_ignore_index_4d_log_prob')", + "summary": "softmaxcrossentropy_mean_weights_ignore_index_4d_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\nignore_index = np.int64(0)\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z', 'log_prob'],\n reduction=reduction,\n ignore_index=ignore_index)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\nlabels[0] = np.int64(0)\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, weight=weights, ignore_index=ignore_index, get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, labels, weights], outputs=[loss, log_prob], name='test_softmax_cross_entropy_mean_weight_ignore_index_log_prob')", + "summary": "softmaxcrossentropy_mean_weights_ignore_index_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'mean'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z', 'log_prob'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, weight=weights, get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, labels, weights], outputs=[loss, log_prob], name='test_softmax_cross_entropy_mean_weight_log_prob')", + "summary": "softmaxcrossentropy_mean_weights_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'none'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, reduction='none')\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name='test_softmax_cross_entropy_none')", + "summary": "softmaxcrossentropy_none" + }, + { + "code": "# Define operator attributes.\nreduction = 'none'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z', 'log_prob'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, reduction='none', get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_softmax_cross_entropy_none_log_prob')", + "summary": "softmaxcrossentropy_none_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'none'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, weight=weights, reduction='none')\n\n# Check results\nexpect(node, inputs=[x, labels, weights], outputs=[sce], name='test_softmax_cross_entropy_none_weights')", + "summary": "softmaxcrossentropy_none_weights" + }, + { + "code": "# Define operator attributes.\nreduction = 'none'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y', 'w'],\n outputs=['z', 'log_prob'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\nweights = np.array([0.9, 0.7, 0.8, 0.9, 0.9], dtype=np.float32)\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, weight=weights, reduction='none', get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, labels, weights], outputs=[loss, log_prob], name='test_softmax_cross_entropy_none_weights_log_prob')", + "summary": "softmaxcrossentropy_none_weights_log_prob" + }, + { + "code": "# Define operator attributes.\nreduction = 'sum'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\n\n# Compute SoftmaxCrossEntropyLoss\nsce = softmaxcrossentropy(x, labels, reduction='sum')\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[sce], name='test_softmax_cross_entropy_sum')", + "summary": "softmaxcrossentropy_sum" + }, + { + "code": "# Define operator attributes.\nreduction = 'sum'\n\n# Create operator.\nnode = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z', 'log_prob'],\n reduction=reduction)\n\n# Define operator inputs.\nnp.random.seed(0)\nx = np.random.rand(3, 5).astype(np.float32)\nlabels = np.random.randint(0, high=5, size=(3, ))\n\n# Compute SoftmaxCrossEntropyLoss\nloss, log_prob = softmaxcrossentropy(x, labels, reduction='sum', get_log_prob=True)\n\n# Check results\nexpect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_softmax_cross_entropy_sum_log_prob')", + "summary": "softmaxcrossentropy_sum_log_prob" + } + ], + "inputs": [ + { + "description": "The predicted outputs with shape [batch_size, class_size], or [batch_size, class_size, D1, D2 , ..., Dk], where K is the number of dimensions.", + "name": "scores", + "type": "T" + }, + { + "description": "The ground truth output tensor, with shape [batch_size], or [batch_size, D1, D2, ..., Dk], where K is the number of dimensions. Labels element value shall be in range of [0, C). If ignore_index is specified, it may have a value outside [0, C) and the label values should either be in the range [0, C) or have the value ignore_index.", + "name": "labels", + "type": "Tind" + }, + { + "description": "A manual rescaling weight given to each class. If given, it has to be a 1D Tensor assigning weight to each of the classes. Otherwise, it is treated as if having all ones.", + "name": "weights", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "2 - 3", + "max_input": 3, + "max_output": 2, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Weighted loss float Tensor. If reduction is 'none', this has the shape of [batch_size], or [batch_size, D1, D2, ..., Dk] in case of K-dimensional loss. Otherwise, it is a scalar.", + "name": "output", + "type": "T" + }, + { + "description": "Log probability tensor. If the output of softmax is prob, its value is log(prob).", + "name": "log_prob", + "option": "optional", + "type": "T" + } + ], + "outputs_range": "1 - 2", + "since_version": 12, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain target to integer types", + "type_param_str": "Tind" + } + ] + } + }, + { + "name": "Softplus", + "schema": { + "category": "Activation", + "description": "Softplus takes one input data (Tensor) and produces one output data\n(Tensor) where the softplus function, y = ln(exp(x) + 1), is applied to\nthe tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Softplus',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.log(np.exp(x) + 1) # expected output [0.31326166, 0.69314718, 1.31326163]\nexpect(node, inputs=[x], outputs=[y],\n name='test_softplus_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.log(np.exp(x) + 1)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softplus')", + "summary": "softplus" + } + ], + "inputs": [ + { + "description": "1D input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "1D input tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Softsign", + "schema": { + "category": "Activation", + "description": "Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Softsign',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.array([-0.5, 0, 0.5]).astype(np.float32)\nexpect(node, inputs=[x], outputs=[y],\n name='test_softsign_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = x / (1 + np.abs(x))\nexpect(node, inputs=[x], outputs=[y],\n name='test_softsign')", + "summary": "softsign" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The softsign (x/(1+|x|)) values of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "SpaceToDepth", + "schema": { + "attributes": [ + { + "description": "Blocks of [blocksize, blocksize] are moved.", + "name": "blocksize", + "required": true, + "type": "int64" + } + ], + "description": "SpaceToDepth rearranges blocks of spatial data into depth. More specifically,\nthis op outputs a copy of the input tensor where values from the height and width dimensions\nare moved to the depth dimension.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "Input tensor of [N,C,H,W], where N is the batch axis, C is the channel or depth, H is the height and W is the width.", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of [N, C * blocksize * blocksize, H/blocksize, W/blocksize].", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Split", + "schema": { + "attributes": [ + { + "description": "Which axis to split on", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "length of each output", + "name": "split", + "required": false, + "type": "int64[]" + } + ], + "category": "Tensor", + "description": "Split a tensor into a list of tensors, along the specified\n'axis'. The lengths of the split can be specified using argument 'axis' or\noptional second input blob to the operator. Otherwise, the tensor is split\nto equal sized parts.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2', 'output_3'],\n axis=0\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4.]).astype(np.float32), np.array([5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_equal_parts_1d')\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n axis=0,\n split=[2, 4]\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4., 5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_variable_parts_1d')", + "summary": "1d" + }, + { + "code": "input = np.array([[1., 2., 3., 4., 5., 6.],\n [7., 8., 9., 10., 11., 12.]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n axis=1\n)\n\nexpected_outputs = [np.array([[1., 2., 3.], [7., 8., 9.]]).astype(np.float32),\n np.array([[4., 5., 6.], [10., 11., 12.]]).astype(np.float32)]\n\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_equal_parts_2d')\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n axis=1,\n split=[2, 4]\n)\n\nexpected_outputs = [np.array([[1., 2.], [7., 8.]]).astype(np.float32),\n np.array([[3., 4., 5., 6.], [9., 10., 11., 12.]]).astype(np.float32)]\n\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_variable_parts_2d')", + "summary": "2d" + }, + { + "code": "input = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2', 'output_3']\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4.]).astype(np.float32), np.array([5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_equal_parts_default_axis')\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n split=[2, 4]\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4., 5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_variable_parts_default_axis')", + "summary": "default_values" + }, + { + "code": "input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2', 'output_3'],\n split=[0, 0, 0]\n)\n\nexpected_outputs = [np.array([]).astype(np.float32), np.array([]).astype(np.float32), np.array([]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_zero_size_splits')", + "summary": "zero_size_splits" + } + ], + "inputs": [ + { + "description": "The tensor to split", + "name": "input", + "type": "T" + }, + { + "description": "Optional list of output lengths (see also arg 'split')", + "name": "split", + "option": "optional", + "type": "T" + } + ], + "inputs_range": "1 - 2", + "max_input": 2, + "max_output": 2147483647, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "One or more outputs forming list of tensors after splitting", + "name": "outputs...", + "option": "variadic", + "type": "T" + } + ], + "outputs_range": "1 - ∞", + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Split", + "schema": { + "attributes": [ + { + "description": "Which axis to split on. ", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "length of each output", + "name": "split", + "required": false, + "type": "int64[]" + } + ], + "category": "Tensor", + "description": "Split a tensor into a list of tensors, along the specified\n'axis'. Lengths of the parts can be specified using argument 'split'.\nOtherwise, the tensor is split to equal sized parts.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2', 'output_3'],\n axis=0\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4.]).astype(np.float32), np.array([5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_equal_parts_1d')\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n axis=0,\n split=[2, 4]\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4., 5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_variable_parts_1d')", + "summary": "1d" + }, + { + "code": "input = np.array([[1., 2., 3., 4., 5., 6.],\n [7., 8., 9., 10., 11., 12.]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n axis=1\n)\n\nexpected_outputs = [np.array([[1., 2., 3.], [7., 8., 9.]]).astype(np.float32),\n np.array([[4., 5., 6.], [10., 11., 12.]]).astype(np.float32)]\n\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_equal_parts_2d')\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n axis=1,\n split=[2, 4]\n)\n\nexpected_outputs = [np.array([[1., 2.], [7., 8.]]).astype(np.float32),\n np.array([[3., 4., 5., 6.], [9., 10., 11., 12.]]).astype(np.float32)]\n\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_variable_parts_2d')", + "summary": "2d" + }, + { + "code": "input = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2', 'output_3']\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4.]).astype(np.float32), np.array([5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_equal_parts_default_axis')\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n split=[2, 4]\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4., 5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_variable_parts_default_axis')", + "summary": "default_values" + }, + { + "code": "input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2', 'output_3'],\n split=[0, 0, 0]\n)\n\nexpected_outputs = [np.array([]).astype(np.float32), np.array([]).astype(np.float32), np.array([]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_zero_size_splits')", + "summary": "zero_size_splits" + } + ], + "inputs": [ + { + "description": "The tensor to split", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 2147483647, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "One or more outputs forming list of tensors after splitting", + "name": "outputs", + "option": "variadic", + "type": "T" + } + ], + "outputs_range": "1 - ∞", + "since_version": 2, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Split", + "schema": { + "attributes": [ + { + "description": "Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1] where r = rank(input).", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "length of each output. Values should be >= 0.", + "name": "split", + "required": false, + "type": "int64[]" + } + ], + "category": "Tensor", + "description": "Split a tensor into a list of tensors, along the specified\n'axis'. Lengths of the parts can be specified using argument 'split'.\nOtherwise, the tensor is split to equal sized parts.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2', 'output_3'],\n axis=0\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4.]).astype(np.float32), np.array([5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_equal_parts_1d')\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n axis=0,\n split=[2, 4]\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4., 5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_variable_parts_1d')", + "summary": "1d" + }, + { + "code": "input = np.array([[1., 2., 3., 4., 5., 6.],\n [7., 8., 9., 10., 11., 12.]]).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n axis=1\n)\n\nexpected_outputs = [np.array([[1., 2., 3.], [7., 8., 9.]]).astype(np.float32),\n np.array([[4., 5., 6.], [10., 11., 12.]]).astype(np.float32)]\n\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_equal_parts_2d')\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n axis=1,\n split=[2, 4]\n)\n\nexpected_outputs = [np.array([[1., 2.], [7., 8.]]).astype(np.float32),\n np.array([[3., 4., 5., 6.], [9., 10., 11., 12.]]).astype(np.float32)]\n\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_variable_parts_2d')", + "summary": "2d" + }, + { + "code": "input = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32)\n\n# If axis is not specified, split is applied on default axis 0\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2', 'output_3']\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4.]).astype(np.float32), np.array([5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_equal_parts_default_axis')\n\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2'],\n split=[2, 4]\n)\n\nexpected_outputs = [np.array([1., 2.]).astype(np.float32), np.array([3., 4., 5., 6.]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_variable_parts_default_axis')", + "summary": "default_values" + }, + { + "code": "input = np.array([]).astype(np.float32)\n\n# Split emtpy tensor to tensors of size zero\nnode = onnx.helper.make_node(\n 'Split',\n inputs=['input'],\n outputs=['output_1', 'output_2', 'output_3'],\n split=[0, 0, 0]\n)\n\nexpected_outputs = [np.array([]).astype(np.float32), np.array([]).astype(np.float32), np.array([]).astype(np.float32)]\nexpect(node, inputs=[input], outputs=[y for y in expected_outputs], name='test_split_zero_size_splits')", + "summary": "zero_size_splits" + } + ], + "inputs": [ + { + "description": "The tensor to split", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 2147483647, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "One or more outputs forming list of tensors after splitting", + "name": "outputs", + "option": "variadic", + "type": "T" + } + ], + "outputs_range": "1 - ∞", + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "SplitToSequence", + "schema": { + "attributes": [ + { + "description": "Which axis to split on. A negative value means counting dimensions from the back. Accepted range is [-rank, rank-1].", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "Keep the split dimension or not. Default 1, which means we keep split dimension. If input 'split' is specified, this attribute is ignored.", + "name": "keepdims", + "required": false, + "type": "int64" + } + ], + "description": "Split a tensor into a sequence of tensors, along the specified\n'axis'. Lengths of the parts can be specified using argument 'split'.\n'split' must contain only positive numbers.\n'split' is either a scalar (tensor of empty shape), or a 1-D tensor.\nIf 'split' is a scalar, then 'input' will be split into equally sized chunks(if possible).\nLast chunk will be smaller if the 'input' size along the given axis 'axis' is not divisible\nby 'split'.\nOtherwise, the tensor is split into 'size(split)' chunks, with lengths of the parts on 'axis'\nspecified in 'split'. In this scenario, the sum of entries in 'split' must be equal to the\ndimension size of input tensor on 'axis'.\n", + "domain": "ai.onnx", + "inputs": [ + { + "description": "The tensor to split", + "name": "input", + "type": "T" + }, + { + "description": "Length of each output. It can be either a scalar(tensor of empty shape), or a 1-D tensor. All values must be >= 0. ", + "name": "split", + "option": "optional", + "type": "I" + } + ], + "inputs_range": "1 - 2", + "max_input": 2, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "One or more outputs forming a sequence of tensors after splitting", + "name": "output_sequence", + "type": "S" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input types to all tensor types.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int32)", + "tensor(int64)" + ], + "description": "Constrain split size to integral tensor.", + "type_param_str": "I" + }, + { + "allowed_type_strs": [ + "seq(tensor(uint8))", + "seq(tensor(uint16))", + "seq(tensor(uint32))", + "seq(tensor(uint64))", + "seq(tensor(int8))", + "seq(tensor(int16))", + "seq(tensor(int32))", + "seq(tensor(int64))", + "seq(tensor(float16))", + "seq(tensor(float))", + "seq(tensor(double))", + "seq(tensor(string))", + "seq(tensor(bool))", + "seq(tensor(complex64))", + "seq(tensor(complex128))" + ], + "description": "Constrain output types to all tensor types.", + "type_param_str": "S" + } + ] + } + }, + { + "name": "Sqrt", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Square root takes one input data (Tensor) and produces one output data\n(Tensor) where the square root is, y = x^0.5, is applied to\nthe tensor elementwise. If x is negative, then it will return NaN.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Sqrt',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([1, 4, 9]).astype(np.float32)\ny = np.sqrt(x) # expected output [1., 2., 3.]\nexpect(node, inputs=[x], outputs=[y],\n name='test_sqrt_example')\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\ny = np.sqrt(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_sqrt')", + "summary": "sqrt" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Sqrt", + "schema": { + "description": "Square root takes one input data (Tensor) and produces one output data\n(Tensor) where the square root is, y = x^0.5, is applied to\nthe tensor elementwise. If x is negative, then it will return NaN.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Sqrt',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([1, 4, 9]).astype(np.float32)\ny = np.sqrt(x) # expected output [1., 2., 3.]\nexpect(node, inputs=[x], outputs=[y],\n name='test_sqrt_example')\n\nx = np.abs(np.random.randn(3, 4, 5).astype(np.float32))\ny = np.sqrt(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_sqrt')", + "summary": "sqrt" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Squeeze", + "schema": { + "attributes": [ + { + "description": "List of non-negative integers, indicate the dimensions to squeeze.", + "name": "axes", + "required": false, + "type": "int64[]" + } + ], + "category": "Transform", + "description": "Remove single-dimensional entries from the shape of a tensor.\nTakes a parameter `axes` with a list of axes to squeeze.\nIf `axes` is not provided, all the single dimensions will be removed from\nthe shape. If an axis is selected with shape entry not equal to one, an error is raised.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Squeeze',\n inputs=['x'],\n outputs=['y'],\n axes=[0],\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\ny = np.squeeze(x, axis=0)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_squeeze')", + "summary": "squeeze" + }, + { + "code": "node = onnx.helper.make_node(\n 'Squeeze',\n inputs=['x'],\n outputs=['y'],\n axes=[-2],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\ny = np.squeeze(x, axis=-2)\nexpect(node, inputs=[x], outputs=[y],\n name='test_squeeze_negative_axes')", + "summary": "squeeze_negative_axes" + } + ], + "inputs": [ + { + "description": "Tensors with at least max(dims) dimensions.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reshaped tensor with same data as input.", + "name": "squeezed", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Squeeze", + "schema": { + "attributes": [ + { + "description": "List of integers indicating the dimensions to squeeze. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(data).", + "name": "axes", + "required": false, + "type": "int64[]" + } + ], + "category": "Transform", + "description": "Remove single-dimensional entries from the shape of a tensor.\nTakes a parameter `axes` with a list of axes to squeeze.\nIf `axes` is not provided, all the single dimensions will be removed from\nthe shape. If an axis is selected with shape entry not equal to one, an error is raised.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Squeeze',\n inputs=['x'],\n outputs=['y'],\n axes=[0],\n)\nx = np.random.randn(1, 3, 4, 5).astype(np.float32)\ny = np.squeeze(x, axis=0)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_squeeze')", + "summary": "squeeze" + }, + { + "code": "node = onnx.helper.make_node(\n 'Squeeze',\n inputs=['x'],\n outputs=['y'],\n axes=[-2],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\ny = np.squeeze(x, axis=-2)\nexpect(node, inputs=[x], outputs=[y],\n name='test_squeeze_negative_axes')", + "summary": "squeeze_negative_axes" + } + ], + "inputs": [ + { + "description": "Tensors with at least max(dims) dimensions.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reshaped tensor with same data as input.", + "name": "squeezed", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "StringNormalizer", + "schema": { + "attributes": [ + { + "default": "NONE", + "description": "string enum that cases output to be lowercased/uppercases/unchanged. Valid values are \"LOWER\", \"UPPER\", \"NONE\". Default is \"NONE\"", + "name": "case_change_action", + "required": false, + "type": "string" + }, + { + "description": "Boolean. Whether the identification of stop words in X is case-sensitive. Default is false", + "name": "is_case_sensitive", + "required": false, + "type": "int64" + }, + { + "description": "Environment dependent string that denotes the locale according to which output strings needs to be upper/lowercased.Default en_US or platform specific equivalent as decided by the implementation.", + "name": "locale", + "required": false, + "type": "string" + }, + { + "description": "List of stop words. If not set, no word would be removed from X.", + "name": "stopwords", + "required": false, + "type": "string[]" + } + ], + "description": "StringNormalization performs string operations for basic cleaning.\nThis operator has only one input (denoted by X) and only one output\n(denoted by Y). This operator first examines the elements in the X,\nand removes elements specified in \"stopwords\" attribute.\nAfter removing stop words, the intermediate result can be further lowercased,\nuppercased, or just returned depending the \"case_change_action\" attribute.\nThis operator only accepts [C]- and [1, C]-tensor.\nIf all elements in X are dropped, the output will be the empty value of string tensor with shape [1]\nif input shape is [C] and shape [1, 1] if input shape is [1, C].\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)\noutput = np.array([u'tuesday', u'wednesday', u'thursday']).astype(np.object)\nstopwords = [u'monday']\n\nnode = onnx.helper.make_node(\n 'StringNormalizer',\n inputs=['x'],\n outputs=['y'],\n case_change_action='LOWER',\n is_case_sensitive=1,\n stopwords=stopwords\n)\nexpect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_casesensintive_lower')", + "summary": "monday_casesensintive_lower" + }, + { + "code": "input = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)\noutput = np.array([u'tuesday', u'wednesday', u'thursday']).astype(np.object)\nstopwords = [u'monday']\n\nnode = onnx.helper.make_node(\n 'StringNormalizer',\n inputs=['x'],\n outputs=['y'],\n is_case_sensitive=1,\n stopwords=stopwords\n)\nexpect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_casesensintive_nochangecase')", + "summary": "monday_casesensintive_nochangecase" + }, + { + "code": "input = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(np.object)\noutput = np.array([u'TUESDAY', u'WEDNESDAY', u'THURSDAY']).astype(np.object)\nstopwords = [u'monday']\n\nnode = onnx.helper.make_node(\n 'StringNormalizer',\n inputs=['x'],\n outputs=['y'],\n case_change_action='UPPER',\n is_case_sensitive=1,\n stopwords=stopwords\n)\nexpect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_casesensintive_upper')", + "summary": "monday_casesensintive_upper" + }, + { + "code": "input = np.array([u'monday', u'monday']).astype(np.object)\noutput = np.array([u'']).astype(np.object)\nstopwords = [u'monday']\n\nnode = onnx.helper.make_node(\n 'StringNormalizer',\n inputs=['x'],\n outputs=['y'],\n case_change_action='UPPER',\n is_case_sensitive=1,\n stopwords=stopwords\n)\nexpect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_empty_output')", + "summary": "monday_empty_output" + }, + { + "code": "input = np.array([u'Monday', u'tuesday', u'wednesday', u'Monday', u'tuesday', u'wednesday']).astype(np.object).reshape([1, 6])\n\n# It does upper case cecedille, accented E\n# and german umlaut but fails\n# with german eszett\noutput = np.array([u'TUESDAY', u'WEDNESDAY', u'TUESDAY', u'WEDNESDAY']).astype(np.object).reshape([1, 4])\nstopwords = [u'monday']\n\nnode = onnx.helper.make_node(\n 'StringNormalizer',\n inputs=['x'],\n outputs=['y'],\n case_change_action='UPPER',\n stopwords=stopwords\n)\nexpect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_insensintive_upper_twodim')", + "summary": "monday_insensintive_upper_twodim" + }, + { + "code": "input = np.array([u'monday', u'tuesday']).astype(np.object)\noutput = input\n\n# No stopwords. This is a NOOP\nnode = onnx.helper.make_node(\n 'StringNormalizer',\n inputs=['x'],\n outputs=['y'],\n is_case_sensitive=1,\n)\nexpect(node, inputs=[input], outputs=[output], name='test_strnormalizer_nostopwords_nochangecase')", + "summary": "nostopwords_nochangecase" + } + ], + "inputs": [ + { + "description": "UTF-8 strings to normalize", + "name": "X", + "type": "tensor(string)" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "UTF-8 Normalized strings", + "name": "Y", + "type": "tensor(string)" + } + ], + "since_version": 10, + "support_level": "common" + } + }, + { + "name": "Sub", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions. See doc for details.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + }, + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Performs element-wise binary subtraction (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Sub',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([3, 2, 1]).astype(np.float32)\nz = x - y # expected output [-2., 0., 2.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_sub_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_sub')", + "summary": "sub" + }, + { + "code": "node = onnx.helper.make_node(\n 'Sub',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_sub_bcast')", + "summary": "sub_broadcast" + } + ], + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Sub", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions. See doc for details.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Pass 1 to enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + } + ], + "description": "Performs element-wise binary subtraction (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of element size 1 (including a scalar tensor and any\ntensor with rank equal to or smaller than the first tensor), or having its\nshape as a contiguous subset of the first tensor's shape. The starting of the\nmutually equal shape is specified by the argument \"axis\", and if it is not set,\nsuffix matching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (1, 1), i.e. B is an 1-element tensor\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n\nAttribute `broadcast=1` needs to be passed to enable broadcasting.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Sub',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([3, 2, 1]).astype(np.float32)\nz = x - y # expected output [-2., 0., 2.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_sub_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_sub')", + "summary": "sub" + }, + { + "code": "node = onnx.helper.make_node(\n 'Sub',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_sub_bcast')", + "summary": "sub_broadcast" + } + ], + "inputs": [ + { + "description": "First operand, should share the type with the second operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same dimensions and type as A", + "name": "C", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Sub", + "schema": { + "description": "Performs element-wise binary subtraction (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Sub',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.array([1, 2, 3]).astype(np.float32)\ny = np.array([3, 2, 1]).astype(np.float32)\nz = x - y # expected output [-2., 0., 2.]\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_sub_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(3, 4, 5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_sub')", + "summary": "sub" + }, + { + "code": "node = onnx.helper.make_node(\n 'Sub',\n inputs=['x', 'y'],\n outputs=['z'],\n)\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.random.randn(5).astype(np.float32)\nz = x - y\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_sub_bcast')", + "summary": "sub_broadcast" + } + ], + "inputs": [ + { + "description": "First operand.", + "name": "A", + "type": "T" + }, + { + "description": "Second operand.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result, has same element type as two inputs", + "name": "C", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to high-precision numeric tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Sum", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "description": "Element-wise sum of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([6, 9, 12]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Sum',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_sum_example')\n\nnode = onnx.helper.make_node(\n 'Sum',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_sum_one_input')\n\nresult = np.add(data_0, data_1)\nnode = onnx.helper.make_node(\n 'Sum',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_sum_two_inputs')", + "summary": "sum" + } + ], + "inputs": [ + { + "description": "List of tensors for Sum.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "sum", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Sum", + "schema": { + "description": "Element-wise sum of each of the input tensors. All inputs and outputs must\nhave the same shape and data type.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([6, 9, 12]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Sum',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_sum_example')\n\nnode = onnx.helper.make_node(\n 'Sum',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_sum_one_input')\n\nresult = np.add(data_0, data_1)\nnode = onnx.helper.make_node(\n 'Sum',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_sum_two_inputs')", + "summary": "sum" + } + ], + "inputs": [ + { + "description": "List of tensors for Sum.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor. Same dimension as inputs.", + "name": "sum", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Sum", + "schema": { + "description": "Element-wise sum of each of the input tensors (with Numpy-style broadcasting support).\nAll inputs and outputs must have the same data type.\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "data_0 = np.array([3, 0, 2]).astype(np.float32)\ndata_1 = np.array([1, 3, 4]).astype(np.float32)\ndata_2 = np.array([2, 6, 6]).astype(np.float32)\nresult = np.array([6, 9, 12]).astype(np.float32)\nnode = onnx.helper.make_node(\n 'Sum',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_sum_example')\n\nnode = onnx.helper.make_node(\n 'Sum',\n inputs=['data_0'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0], outputs=[data_0],\n name='test_sum_one_input')\n\nresult = np.add(data_0, data_1)\nnode = onnx.helper.make_node(\n 'Sum',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n)\nexpect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_sum_two_inputs')", + "summary": "sum" + } + ], + "inputs": [ + { + "description": "List of tensors for sum.", + "name": "data_0", + "option": "variadic", + "type": "T" + } + ], + "inputs_range": "1 - ∞", + "max_input": 2147483647, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor.", + "name": "sum", + "type": "T" + } + ], + "since_version": 8, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Tan", + "schema": { + "description": "Calculates the tangent of the given input tensor, element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Tan',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.tan(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_tan_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.tan(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_tan')", + "summary": "tan" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The tangent of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Tanh", + "schema": { + "attributes": [ + { + "description": "legacy optimization attribute.", + "name": "consumed_inputs", + "required": false, + "type": "int64[]" + } + ], + "category": "Activation", + "description": "Calculates the hyperbolic tangent of the given input tensor element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Tanh',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.tanh(x) # expected output [-0.76159418, 0., 0.76159418]\nexpect(node, inputs=[x], outputs=[y],\n name='test_tanh_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.tanh(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_tanh')", + "summary": "tanh" + } + ], + "inputs": [ + { + "description": "1-D input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The hyperbolic tangent values of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Tanh", + "schema": { + "category": "Activation", + "description": "Calculates the hyperbolic tangent of the given input tensor element-wise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Tanh',\n inputs=['x'],\n outputs=['y'],\n)\n\nx = np.array([-1, 0, 1]).astype(np.float32)\ny = np.tanh(x) # expected output [-0.76159418, 0., 0.76159418]\nexpect(node, inputs=[x], outputs=[y],\n name='test_tanh_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.tanh(x)\nexpect(node, inputs=[x], outputs=[y],\n name='test_tanh')", + "summary": "tanh" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "input", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The hyperbolic tangent values of the input tensor computed element-wise", + "name": "output", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "TfIdfVectorizer", + "schema": { + "attributes": [ + { + "description": "Maximum n-gram length. If this value is 3, 3-grams will be used to generate the output.", + "name": "max_gram_length", + "required": true, + "type": "int64" + }, + { + "description": "Maximum number of items (integers/strings) to be skipped when constructing an n-gram from X. If max_skip_count=1, min_gram_length=2, max_gram_length=3, this operator may generate 2-grams with skip_count=0 and skip_count=1, and 3-grams with skip_count=0 and skip_count=1", + "name": "max_skip_count", + "required": true, + "type": "int64" + }, + { + "description": "Minimum n-gram length. If this value is 2 and max_gram_length is 3, output may contain counts of 2-grams and 3-grams.", + "name": "min_gram_length", + "required": true, + "type": "int64" + }, + { + "description": "The weighting criteria. It can be one of \"TF\" (term frequency), \"IDF\" (inverse document frequency), and \"TFIDF\" (the combination of TF and IDF)", + "name": "mode", + "required": true, + "type": "string" + }, + { + "description": "The starting indexes of 1-grams, 2-grams, and so on in pool. It is useful when determining the boundary between two consecutive collections of n-grams. For example, if ngram_counts is [0, 17, 36], the first index (zero-based) of 1-gram/2-gram/3-gram in pool are 0/17/36. This format is essentially identical to CSR (or CSC) sparse matrix format, and we choose to use this due to its popularity.", + "name": "ngram_counts", + "required": true, + "type": "int64[]" + }, + { + "description": "list of int64s (type: AttributeProto::INTS). This list is parallel to the specified 'pool_*' attribute. The i-th element in ngram_indexes indicate the coordinate of the i-th n-gram in the output tensor.", + "name": "ngram_indexes", + "required": true, + "type": "int64[]" + }, + { + "description": "List of int64 n-grams learned from the training set. Either this or pool_strings attributes must be present but not both. It's an 1-D tensor starting with the collections of all 1-grams and ending with the collections of n-grams. The i-th element in pool stores the n-gram that should be mapped to coordinate ngram_indexes[i] in the output vector.", + "name": "pool_int64s", + "required": false, + "type": "int64[]" + }, + { + "description": "List of strings n-grams learned from the training set. Either this or pool_int64s attributes must be present but not both. It's an 1-D tensor starting with the collections of all 1-grams and ending with the collections of n-grams. The i-th element in pool stores the n-gram that should be mapped to coordinate ngram_indexes[i] in the output vector.", + "name": "pool_strings", + "required": false, + "type": "string[]" + }, + { + "description": "list of floats. This attribute stores the weight of each n-gram in pool. The i-th element in weights is the weight of the i-th n-gram in pool. Its length equals to the size of ngram_indexes. By default, weights is an all-one tensor.This attribute is used when mode is \"IDF\" or \"TFIDF\" to scale the associated word counts.", + "name": "weights", + "required": false, + "type": "float32[]" + } + ], + "description": "This transform extracts n-grams from the input sequence and save them as a vector. Input can\nbe either a 1-D or 2-D tensor. For 1-D input, output is the n-gram representation of that input.\nFor 2-D input, the output is also a 2-D tensor whose i-th row is the n-gram representation of the i-th input row.\nMore specifically, if input shape is [C], the corresponding output shape would be [max(ngram_indexes) + 1].\nIf input shape is [N, C], this operator produces a [N, max(ngram_indexes) + 1]-tensor.\n\nIn contrast to standard n-gram extraction, here, the indexes of extracting an n-gram from the original\nsequence are not necessarily consecutive numbers. The discontinuity between indexes are controlled by the number of skips.\nIf the number of skips is 2, we should skip two tokens when scanning through the original sequence.\nLet's consider an example. Assume that input sequence is [94, 17, 36, 12, 28] and the number of skips is 2.\nThe associated 2-grams are [94, 12] and [17, 28] respectively indexed by [0, 3] and [1, 4].\nIf the number of skips becomes 0, the 2-grams generated are [94, 17], [17, 36], [36, 12], [12, 28]\nindexed by [0, 1], [1, 2], [2, 3], [3, 4], respectively.\n\nThe output vector (denoted by Y) stores the count of each n-gram;\nY[ngram_indexes[i]] indicates the times that the i-th n-gram is found. The attribute ngram_indexes is used to determine the mapping\nbetween index i and the corresponding n-gram's output coordinate. If pool_int64s is [94, 17, 17, 36], ngram_indexes is [1, 0],\nngram_counts=[0, 0], then the Y[0] (first element in Y) and Y[1] (second element in Y) are the counts of [17, 36] and [94, 17],\nrespectively. An n-gram which cannot be found in pool_strings/pool_int64s should be ignored and has no effect on the output.\nNote that we may consider all skips up to S when generating the n-grams.\n\nThe examples used above are true if mode is \"TF\". If mode is \"IDF\", all the counts larger than 1 would be truncated to 1 and\nthe i-th element in weights would be used to scale (by multiplication) the count of the i-th n-gram in pool. If mode is \"TFIDF\",\nthis operator first computes the counts of all n-grams and then scale them by the associated values in the weights attribute.\n\nOnly one of pool_strings and pool_int64s can be set. If pool_int64s is set, the input should be an integer tensor.\nIf pool_strings is set, the input must be a string tensor.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "input = np.array([[1, 1, 3, 3, 3, 7], [8, 6, 7, 5, 6, 8]]).astype(np.int32)\noutput = np.array([[0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 0., 1.]]).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, # unigrams\n 5, 6, 7, 8, 6, 7]).astype(np.int64) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode='TF',\n min_gram_length=2,\n max_gram_length=2,\n max_skip_count=0,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s\n)\nnode = helper.make_node_noweights()\nexpect(node, inputs=[input], outputs=[output], name='test_tfidfvectorizer_tf_batch_onlybigrams_skip0')", + "summary": "tf_batch_onlybigrams_skip0" + }, + { + "code": "input = np.array([[1, 1, 3, 3, 3, 7], [8, 6, 7, 5, 6, 8]]).astype(np.int32)\noutput = np.array([[0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 1., 1.]]).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, # unigrams\n 5, 6, 7, 8, 6, 7]).astype(np.int64) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode='TF',\n min_gram_length=2,\n max_gram_length=2,\n max_skip_count=5,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s\n)\nnode = helper.make_node_noweights()\nexpect(node, inputs=[input], outputs=[output], name='test_tfidfvectorizer_tf_batch_onlybigrams_skip5')", + "summary": "tf_batch_onlybigrams_skip5" + }, + { + "code": "input = np.array([[1, 1, 3, 3, 3, 7], [8, 6, 7, 5, 6, 8]]).astype(np.int32)\noutput = np.array([[0., 3., 0., 0., 0., 0., 0.], [0., 0., 1., 0., 1., 1., 1.]]).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, # unigrams\n 5, 6, 7, 8, 6, 7]).astype(np.int64) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode='TF',\n min_gram_length=1,\n max_gram_length=2,\n max_skip_count=5,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s\n)\nnode = helper.make_node_noweights()\nexpect(node, inputs=[input], outputs=[output], name='test_tfidfvectorizer_tf_batch_uniandbigrams_skip5')", + "summary": "tf_batch_uniandbigrams_skip5" + }, + { + "code": "input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)\noutput = np.array([0., 0., 0., 0., 1., 1., 1.]).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, # unigrams\n 5, 6, 7, 8, 6, 7]).astype(np.int64) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode='TF',\n min_gram_length=2,\n max_gram_length=2,\n max_skip_count=0,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s\n)\nnode = helper.make_node_noweights()\nexpect(node, inputs=[input], outputs=[output], name='test_tfidfvectorizer_tf_only_bigrams_skip0')", + "summary": "tf_only_bigrams_skip0" + }, + { + "code": "input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)\noutput = np.array([1., 1., 1.]).astype(np.float32)\n\nngram_counts = np.array([0, 0]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2]).astype(np.int64)\npool_int64s = np.array([ # unigrams none\n 5, 6, 7, 8, 6, 7]).astype(np.int64) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode='TF',\n min_gram_length=2,\n max_gram_length=2,\n max_skip_count=0,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s\n)\nnode = helper.make_node_noweights()\nexpect(node, inputs=[input], outputs=[output], name='test_tfidfvectorizer_tf_onlybigrams_levelempty')", + "summary": "tf_onlybigrams_levelempty" + }, + { + "code": "input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)\noutput = np.array([0., 0., 0., 0., 1., 3., 1.]).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, # unigrams\n 5, 6, 7, 8, 6, 7]).astype(np.int64) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode='TF',\n min_gram_length=2,\n max_gram_length=2,\n max_skip_count=5,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s\n)\nnode = helper.make_node_noweights()\nexpect(node, inputs=[input], outputs=[output], name='test_tfidfvectorizer_tf_onlybigrams_skip5')", + "summary": "tf_onlybigrams_skip5" + }, + { + "code": "input = np.array([1, 1, 3, 3, 3, 7, 8, 6, 7, 5, 6, 8]).astype(np.int32)\noutput = np.array([0., 3., 1., 0., 1., 3., 1.]).astype(np.float32)\n\nngram_counts = np.array([0, 4]).astype(np.int64)\nngram_indexes = np.array([0, 1, 2, 3, 4, 5, 6]).astype(np.int64)\npool_int64s = np.array([2, 3, 5, 4, # unigrams\n 5, 6, 7, 8, 6, 7]).astype(np.int64) # bigrams\n\nhelper = TfIdfVectorizerHelper(\n mode='TF',\n min_gram_length=1,\n max_gram_length=2,\n max_skip_count=5,\n ngram_counts=ngram_counts,\n ngram_indexes=ngram_indexes,\n pool_int64s=pool_int64s\n)\nnode = helper.make_node_noweights()\nexpect(node, inputs=[input], outputs=[output], name='test_tfidfvectorizer_tf_uniandbigrams_skip5')", + "summary": "tf_uniandbigrams_skip5" + } + ], + "inputs": [ + { + "description": "Input for n-gram extraction", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Ngram results", + "name": "Y", + "type": "T1" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(int32)", + "tensor(int64)" + ], + "description": "Input is ether string UTF-8 or int32/int64", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(float)" + ], + "description": "1-D tensor of floats", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "ThresholdedRelu", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Threshold value", + "name": "alpha", + "required": false, + "type": "float32" + } + ], + "category": "Activation", + "description": "ThresholdedRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = x for x > alpha, y = 0 otherwise,\nis applied to the tensor elementwise.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "default_alpha = 1.0\nnode = onnx.helper.make_node(\n 'ThresholdedRelu',\n inputs=['x'],\n outputs=['y']\n)\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, default_alpha, np.inf)\ny[y == default_alpha] = 0\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_thresholdedrelu_default')", + "summary": "default" + }, + { + "code": "alpha = 2.0\nnode = onnx.helper.make_node(\n 'ThresholdedRelu',\n inputs=['x'],\n outputs=['y'],\n alpha=alpha\n)\n\nx = np.array([-1.5, 0., 1.2, 2.0, 2.2]).astype(np.float32)\ny = np.clip(x, alpha, np.inf) # expected output [0., 0., 0., 0., 2.2]\ny[y == alpha] = 0\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_thresholdedrelu_example')\n\nx = np.random.randn(3, 4, 5).astype(np.float32)\ny = np.clip(x, alpha, np.inf)\ny[y == alpha] = 0\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_thresholdedrelu')", + "summary": "thresholdedrelu" + } + ], + "inputs": [ + { + "description": "Input tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor", + "name": "Y", + "type": "T" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Tile", + "schema": { + "category": "Shape", + "description": "Repeat the elements of a tensor along an axis.", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Tile',\n inputs=['x', 'y'],\n outputs=['z']\n)\n\nx = np.random.rand(2, 3, 4, 5).astype(np.float32)\n\nrepeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)\n\nz = np.tile(x, repeats)\n\nexpect(node,\n inputs=[x, repeats],\n outputs=[z],\n name='test_tile')", + "summary": "tile" + }, + { + "code": "node = onnx.helper.make_node(\n 'Tile',\n inputs=['x', 'y'],\n outputs=['z']\n)\n\nx = np.array([\n [0, 1],\n [2, 3]\n], dtype=np.float32)\n\nrepeats = np.array([2, 2], dtype=np.int64)\n\nz = np.array([\n [0, 1, 0, 1],\n [2, 3, 2, 3],\n [0, 1, 0, 1],\n [2, 3, 2, 3]\n], dtype=np.float32)\n\nexpect(node,\n inputs=[x, repeats],\n outputs=[z],\n name='test_tile_precomputed')", + "summary": "tile_precomputed" + } + ], + "inputs": [ + { + "description": "Input tensor of any shape.", + "name": "input", + "type": "T" + }, + { + "description": "Number of repeated copies to make of the input tensor.", + "name": "tiles", + "type": "T" + }, + { + "description": "Axis along which to repeat.", + "name": "axis", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of same shape and type as input.", + "name": "output", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain tiles and axis's type to int64 tensors.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Tile", + "schema": { + "category": "Shape", + "description": "Constructs a tensor by tiling a given tensor.\nThis is the same as function `tile` in Numpy, but no broadcast.\nFor example A = [[1, 2], [3, 4]], B = [1, 2], tile(A, B) = [[1, 2, 1, 2], [3, 4, 3, 4]]\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Tile',\n inputs=['x', 'y'],\n outputs=['z']\n)\n\nx = np.random.rand(2, 3, 4, 5).astype(np.float32)\n\nrepeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)\n\nz = np.tile(x, repeats)\n\nexpect(node,\n inputs=[x, repeats],\n outputs=[z],\n name='test_tile')", + "summary": "tile" + }, + { + "code": "node = onnx.helper.make_node(\n 'Tile',\n inputs=['x', 'y'],\n outputs=['z']\n)\n\nx = np.array([\n [0, 1],\n [2, 3]\n], dtype=np.float32)\n\nrepeats = np.array([2, 2], dtype=np.int64)\n\nz = np.array([\n [0, 1, 0, 1],\n [2, 3, 2, 3],\n [0, 1, 0, 1],\n [2, 3, 2, 3]\n], dtype=np.float32)\n\nexpect(node,\n inputs=[x, repeats],\n outputs=[z],\n name='test_tile_precomputed')", + "summary": "tile_precomputed" + } + ], + "inputs": [ + { + "description": "Input tensor of any shape.", + "name": "input", + "type": "T" + }, + { + "description": "1D int64 tensor of the same length as input's dimension number, includes numbers of repeated copies along input's dimensions.", + "name": "repeats", + "type": "T1" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Output tensor of the same dimension and type as tensor input. output_dim[i] = input_dim[i] * repeats[i]", + "name": "output", + "type": "T" + } + ], + "since_version": 6, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain repeat's type to int64 tensors.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "TopK", + "schema": { + "attributes": [ + { + "default": -1, + "description": "Dimension on which to do the sort.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Number of top elements to retrieve", + "name": "k", + "required": true, + "type": "int64" + } + ], + "description": "Retrieve the top-K elements along a specified axis. Given an input tensor of\nshape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs:\n -Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n]\n which contains the values of the top k elements along the specified axis\n -Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which\n contains the indices of the top k elements (original indices from the input\n tensor).\nGiven two equivalent values, this operator uses the indices along the axis as\n a tiebreaker. That is, the element with the lower index will appear first.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "axis = 1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n 'TopK',\n inputs=['x', 'k'],\n outputs=['values', 'indices'],\n axis=axis\n)\nX = np.array([\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n], dtype=np.float32)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n#print(values_ref)\n#[[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n#print(indices_ref)\n#[[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(node, inputs=[X, K], outputs=[values_ref, indices_ref],\n name='test_top_k')", + "summary": "top_k" + }, + { + "code": "axis = -1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n 'TopK',\n inputs=['x', 'k'],\n outputs=['values', 'indices'],\n axis=axis\n)\nX = np.array([\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n], dtype=np.float32)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n#[[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n# print(indices_ref)\n#[[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(node, inputs=[X, K], outputs=[values_ref, indices_ref],\n name='test_top_k_negative_axis')", + "summary": "top_k_negative_axis" + }, + { + "code": "axis = 1\nlargest = 0\nsorted = 1\nk = 3\n\nnode = onnx.helper.make_node(\n 'TopK',\n inputs=['x', 'k'],\n outputs=['values', 'indices'],\n axis=axis,\n largest=largest,\n sorted=sorted\n)\n\nX = np.array([\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [11, 10, 9, 8],\n], dtype=np.float32)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n#print(values_ref)\n#[[ 0. 1. 2.]\n# [ 4. 5. 6.]\n# [ 8. 9. 10.]]\n#print(indices_ref)\n#[[0 1 2]\n# [0 1 2]\n# [3 2 1]]\n\nexpect(node, inputs=[X, K], outputs=[values_ref, indices_ref],\n name='test_top_k_smallest')", + "summary": "top_k_smallest" + } + ], + "inputs": [ + { + "description": "Tensor of shape [a_1, a_2, ..., a_n, r]", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 2, + "outputs": [ + { + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing top K values from the input tensor", + "name": "Values", + "type": "T" + }, + { + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing the corresponding input tensor indices for the top K values.", + "name": "Indices", + "type": "I" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain index tensor to int64", + "type_param_str": "I" + } + ] + } + }, + { + "name": "TopK", + "schema": { + "attributes": [ + { + "default": -1, + "description": "Dimension on which to do the sort.", + "name": "axis", + "required": false, + "type": "int64" + } + ], + "description": "Retrieve the top-K elements along a specified axis. Given an input tensor of\nshape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs:\n -Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n]\n which contains the values of the top k elements along the specified axis\n -Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which\n contains the indices of the top k elements (original indices from the input\n tensor).\n \nGiven two equivalent values, this operator uses the indices along the axis as\n a tiebreaker. That is, the element with the lower index will appear first.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "axis = 1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n 'TopK',\n inputs=['x', 'k'],\n outputs=['values', 'indices'],\n axis=axis\n)\nX = np.array([\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n], dtype=np.float32)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n#print(values_ref)\n#[[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n#print(indices_ref)\n#[[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(node, inputs=[X, K], outputs=[values_ref, indices_ref],\n name='test_top_k')", + "summary": "top_k" + }, + { + "code": "axis = -1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n 'TopK',\n inputs=['x', 'k'],\n outputs=['values', 'indices'],\n axis=axis\n)\nX = np.array([\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n], dtype=np.float32)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n#[[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n# print(indices_ref)\n#[[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(node, inputs=[X, K], outputs=[values_ref, indices_ref],\n name='test_top_k_negative_axis')", + "summary": "top_k_negative_axis" + }, + { + "code": "axis = 1\nlargest = 0\nsorted = 1\nk = 3\n\nnode = onnx.helper.make_node(\n 'TopK',\n inputs=['x', 'k'],\n outputs=['values', 'indices'],\n axis=axis,\n largest=largest,\n sorted=sorted\n)\n\nX = np.array([\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [11, 10, 9, 8],\n], dtype=np.float32)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n#print(values_ref)\n#[[ 0. 1. 2.]\n# [ 4. 5. 6.]\n# [ 8. 9. 10.]]\n#print(indices_ref)\n#[[0 1 2]\n# [0 1 2]\n# [3 2 1]]\n\nexpect(node, inputs=[X, K], outputs=[values_ref, indices_ref],\n name='test_top_k_smallest')", + "summary": "top_k_smallest" + } + ], + "inputs": [ + { + "description": "Tensor of shape [a_1, a_2, ..., a_n, r]", + "name": "X", + "type": "T" + }, + { + "description": "A 1-D tensor containing a single positive value corresponding to the number of top elements to retrieve", + "name": "K", + "type": "tensor(int64)" + } + ], + "max_input": 2, + "max_output": 2, + "min_input": 2, + "min_output": 2, + "outputs": [ + { + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing top K values from the input tensor", + "name": "Values", + "type": "T" + }, + { + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing the corresponding input tensor indices for the top K values.", + "name": "Indices", + "type": "I" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to float tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain index tensor to int64", + "type_param_str": "I" + } + ] + } + }, + { + "name": "TopK", + "schema": { + "attributes": [ + { + "default": -1, + "description": "Dimension on which to do the sort. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "Whether to return the top-K largest or smallest elements.", + "name": "largest", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "Whether to return the elements in sorted order.", + "name": "sorted", + "required": false, + "type": "int64" + } + ], + "description": "Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of\nshape [a_1, a_2, ..., a_n, r] and integer argument k, return two outputs:\n -Value tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n]\n which contains the values of the top k elements along the specified axis\n -Index tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] which\n contains the indices of the top k elements (original indices from the input\n tensor).\n\nIf \"largest\" is 1 (the default value) then the k largest elements are returned.\nIf \"sorted\" is 1 (the default value) then the resulting k elements will be sorted.\nIf \"sorted\" is 0, order of returned 'Values' and 'Indices' are undefined.\n\nGiven two equivalent values, this operator uses the indices along the axis as\n a tiebreaker. That is, the element with the lower index will appear first.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "axis = 1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n 'TopK',\n inputs=['x', 'k'],\n outputs=['values', 'indices'],\n axis=axis\n)\nX = np.array([\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n], dtype=np.float32)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n#print(values_ref)\n#[[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n#print(indices_ref)\n#[[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(node, inputs=[X, K], outputs=[values_ref, indices_ref],\n name='test_top_k')", + "summary": "top_k" + }, + { + "code": "axis = -1\nlargest = 1\n\nk = 3\nnode = onnx.helper.make_node(\n 'TopK',\n inputs=['x', 'k'],\n outputs=['values', 'indices'],\n axis=axis\n)\nX = np.array([\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [8, 9, 10, 11],\n], dtype=np.float32)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n# print(values_ref)\n#[[ 3. 2. 1.]\n# [ 7. 6. 5.]\n# [11. 10. 9.]]\n# print(indices_ref)\n#[[3 2 1]\n# [3 2 1]\n# [3 2 1]]\n\nexpect(node, inputs=[X, K], outputs=[values_ref, indices_ref],\n name='test_top_k_negative_axis')", + "summary": "top_k_negative_axis" + }, + { + "code": "axis = 1\nlargest = 0\nsorted = 1\nk = 3\n\nnode = onnx.helper.make_node(\n 'TopK',\n inputs=['x', 'k'],\n outputs=['values', 'indices'],\n axis=axis,\n largest=largest,\n sorted=sorted\n)\n\nX = np.array([\n [0, 1, 2, 3],\n [4, 5, 6, 7],\n [11, 10, 9, 8],\n], dtype=np.float32)\nK = np.array([k], dtype=np.int64)\nvalues_ref, indices_ref = topk_sorted_implementation(X, k, axis, largest)\n\n#print(values_ref)\n#[[ 0. 1. 2.]\n# [ 4. 5. 6.]\n# [ 8. 9. 10.]]\n#print(indices_ref)\n#[[0 1 2]\n# [0 1 2]\n# [3 2 1]]\n\nexpect(node, inputs=[X, K], outputs=[values_ref, indices_ref],\n name='test_top_k_smallest')", + "summary": "top_k_smallest" + } + ], + "inputs": [ + { + "description": "Tensor of shape [a_1, a_2, ..., a_n, r]", + "name": "X", + "type": "T" + }, + { + "description": "A 1-D tensor containing a single positive value corresponding to the number of top elements to retrieve", + "name": "K", + "type": "tensor(int64)" + } + ], + "max_input": 2, + "max_output": 2, + "min_input": 2, + "min_output": 2, + "outputs": [ + { + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing top K values from the input tensor", + "name": "Values", + "type": "T" + }, + { + "description": "Tensor of shape [a_1, a_2, ..., a_{axis-1}, k, a_{axis+1}, ... a_n] containing the corresponding input tensor indices for the top K values.", + "name": "Indices", + "type": "I" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain input and output types to numeric tensors.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(int64)" + ], + "description": "Constrain index tensor to int64", + "type_param_str": "I" + } + ] + } + }, + { + "name": "Transpose", + "schema": { + "attributes": [ + { + "description": "A list of integers. By default, reverse the dimensions, otherwise permute the axes according to the values given.", + "name": "perm", + "required": false, + "type": "int64[]" + } + ], + "category": "Transform", + "description": "Transpose the input tensor similar to numpy.transpose. For example, when\nperm=(1, 0, 2), given an input tensor of shape (1, 2, 3), the output shape\nwill be (2, 1, 3).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "shape = (2, 3, 4)\ndata = np.random.random_sample(shape).astype(np.float32)\npermutations = list(itertools.permutations(np.arange(len(shape))))\n\nfor i in range(len(permutations)):\n node = onnx.helper.make_node(\n 'Transpose',\n inputs=['data'],\n outputs=['transposed'],\n perm=permutations[i]\n )\n transposed = np.transpose(data, permutations[i])\n expect(node, inputs=[data], outputs=[transposed],\n name='test_transpose_all_permutations_' + str(i))", + "summary": "all_permutations" + }, + { + "code": "shape = (2, 3, 4)\ndata = np.random.random_sample(shape).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Transpose',\n inputs=['data'],\n outputs=['transposed']\n)\n\ntransposed = np.transpose(data)\nexpect(node, inputs=[data], outputs=[transposed],\n name='test_transpose_default')", + "summary": "default" + } + ], + "inputs": [ + { + "description": "An input tensor.", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Transposed output.", + "name": "transposed", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "TreeEnsembleClassifier", + "schema": { + "attributes": [ + { + "description": "Base values for classification, added to final class score; the size must be the same as the classes or can be left unassigned (assumed 0)", + "name": "base_values", + "required": false, + "type": "float32[]" + }, + { + "description": "The index of the class list that each weight is for.", + "name": "class_ids", + "required": false, + "type": "int64[]" + }, + { + "description": "node id that this weight is for.", + "name": "class_nodeids", + "required": false, + "type": "int64[]" + }, + { + "description": "The id of the tree that this node is in.", + "name": "class_treeids", + "required": false, + "type": "int64[]" + }, + { + "description": "The weight for the class in class_id.", + "name": "class_weights", + "required": false, + "type": "float32[]" + }, + { + "description": "Class labels if using integer labels.
    One and only one of the 'classlabels_*' attributes must be defined.", + "name": "classlabels_int64s", + "required": false, + "type": "int64[]" + }, + { + "description": "Class labels if using string labels.
    One and only one of the 'classlabels_*' attributes must be defined.", + "name": "classlabels_strings", + "required": false, + "type": "string[]" + }, + { + "description": "Child node if expression is false.", + "name": "nodes_falsenodeids", + "required": false, + "type": "int64[]" + }, + { + "description": "Feature id for each node.", + "name": "nodes_featureids", + "required": false, + "type": "int64[]" + }, + { + "description": "Popularity of each node, used for performance and may be omitted.", + "name": "nodes_hitrates", + "required": false, + "type": "float32[]" + }, + { + "description": "For each node, define what to do in the presence of a missing value: if a value is missing (NaN), use the 'true' or 'false' branch based on the value in this array.
    This attribute may be left undefined, and the defalt value is false (0) for all nodes.", + "name": "nodes_missing_value_tracks_true", + "required": false, + "type": "int64[]" + }, + { + "description": "The node kind, that is, the comparison to make at the node. There is no comparison to make at a leaf node.
    One of 'BRANCH_LEQ', 'BRANCH_LT', 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', 'LEAF'", + "name": "nodes_modes", + "required": false, + "type": "string[]" + }, + { + "description": "Node id for each node. Ids may restart at zero for each tree, but it not required to.", + "name": "nodes_nodeids", + "required": false, + "type": "int64[]" + }, + { + "description": "Tree id for each node.", + "name": "nodes_treeids", + "required": false, + "type": "int64[]" + }, + { + "description": "Child node if expression is true.", + "name": "nodes_truenodeids", + "required": false, + "type": "int64[]" + }, + { + "description": "Thresholds to do the splitting on for each node.", + "name": "nodes_values", + "required": false, + "type": "float32[]" + }, + { + "default": "NONE", + "description": "Indicates the transform to apply to the score.
    One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT.'", + "name": "post_transform", + "required": false, + "type": "string" + } + ], + "description": "Tree Ensemble classifier. Returns the top class for each of N inputs.
    \n The attributes named 'nodes_X' form a sequence of tuples, associated by \n index into the sequences, which must all be of equal length. These tuples\n define the nodes.
    \n Similarly, all fields prefixed with 'class_' are tuples of votes at the leaves.\n A leaf may have multiple votes, where each vote is weighted by\n the associated class_weights index.
    \n One and only one of classlabels_strings or classlabels_int64s\n will be defined. The class_ids are indices into this list.\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Input of shape [N,F]", + "name": "X", + "type": "T1" + } + ], + "max_input": 1, + "max_output": 2, + "min_input": 1, + "min_output": 2, + "outputs": [ + { + "description": "N, Top class for each point", + "name": "Y", + "type": "T2" + }, + { + "description": "The class score for each class, for each point, a tensor of shape [N,E].", + "name": "Z", + "type": "tensor(float)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ], + "description": "The input type must be a tensor of a numeric type.", + "type_param_str": "T1" + }, + { + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)" + ], + "description": "The output type will be a tensor of strings or integers, depending on which of the the classlabels_* attributes is used.", + "type_param_str": "T2" + } + ] + } + }, + { + "name": "TreeEnsembleRegressor", + "schema": { + "attributes": [ + { + "default": "SUM", + "description": "Defines how to aggregate leaf values within a target.
    One of 'AVERAGE,' 'SUM,' 'MIN,' 'MAX.'", + "name": "aggregate_function", + "required": false, + "type": "string" + }, + { + "description": "Base values for classification, added to final class score; the size must be the same as the classes or can be left unassigned (assumed 0)", + "name": "base_values", + "required": false, + "type": "float32[]" + }, + { + "description": "The total number of targets.", + "name": "n_targets", + "required": false, + "type": "int64" + }, + { + "description": "Child node if expression is false", + "name": "nodes_falsenodeids", + "required": false, + "type": "int64[]" + }, + { + "description": "Feature id for each node.", + "name": "nodes_featureids", + "required": false, + "type": "int64[]" + }, + { + "description": "Popularity of each node, used for performance and may be omitted.", + "name": "nodes_hitrates", + "required": false, + "type": "float32[]" + }, + { + "description": "For each node, define what to do in the presence of a NaN: use the 'true' (if the attribute value is 1) or 'false' (if the attribute value is 0) branch based on the value in this array.
    This attribute may be left undefined and the defalt value is false (0) for all nodes.", + "name": "nodes_missing_value_tracks_true", + "required": false, + "type": "int64[]" + }, + { + "description": "The node kind, that is, the comparison to make at the node. There is no comparison to make at a leaf node.
    One of 'BRANCH_LEQ', 'BRANCH_LT', 'BRANCH_GTE', 'BRANCH_GT', 'BRANCH_EQ', 'BRANCH_NEQ', 'LEAF'", + "name": "nodes_modes", + "required": false, + "type": "string[]" + }, + { + "description": "Node id for each node. Node ids must restart at zero for each tree and increase sequentially.", + "name": "nodes_nodeids", + "required": false, + "type": "int64[]" + }, + { + "description": "Tree id for each node.", + "name": "nodes_treeids", + "required": false, + "type": "int64[]" + }, + { + "description": "Child node if expression is true", + "name": "nodes_truenodeids", + "required": false, + "type": "int64[]" + }, + { + "description": "Thresholds to do the splitting on for each node.", + "name": "nodes_values", + "required": false, + "type": "float32[]" + }, + { + "default": "NONE", + "description": "Indicates the transform to apply to the score.
    One of 'NONE,' 'SOFTMAX,' 'LOGISTIC,' 'SOFTMAX_ZERO,' or 'PROBIT'", + "name": "post_transform", + "required": false, + "type": "string" + }, + { + "description": "The index of the target that each weight is for", + "name": "target_ids", + "required": false, + "type": "int64[]" + }, + { + "description": "The node id of each weight", + "name": "target_nodeids", + "required": false, + "type": "int64[]" + }, + { + "description": "The id of the tree that each node is in.", + "name": "target_treeids", + "required": false, + "type": "int64[]" + }, + { + "description": "The weight for each target", + "name": "target_weights", + "required": false, + "type": "float32[]" + } + ], + "description": "Tree Ensemble regressor. Returns the regressed values for each input in N.
    \n All args with nodes_ are fields of a tuple of tree nodes, and\n it is assumed they are the same length, and an index i will decode the\n tuple across these inputs. Each node id can appear only once\n for each tree id.
    \n All fields prefixed with target_ are tuples of votes at the leaves.
    \n A leaf may have multiple votes, where each vote is weighted by\n the associated target_weights index.
    \n All trees must have their node ids start at 0 and increment by 1.
    \n Mode enum is BRANCH_LEQ, BRANCH_LT, BRANCH_GTE, BRANCH_GT, BRANCH_EQ, BRANCH_NEQ, LEAF\n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "Input of shape [N,F]", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "N classes", + "name": "Y", + "type": "tensor(float)" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(float)", + "tensor(double)", + "tensor(int64)", + "tensor(int32)" + ], + "description": "The input type must be a tensor of a numeric type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Unique", + "schema": { + "attributes": [ + { + "description": "(Optional) The dimension to apply unique. If not specified, the unique elements of the flattened input are returned. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input).", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "default": 1, + "description": "(Optional) Whether to sort the unique elements in ascending order before returning as output. Must be one of 0, or 1 (default).", + "name": "sorted", + "required": false, + "type": "int64" + } + ], + "description": "Find the unique elements of a tensor. When an optional attribute 'axis' is provided, unique subtensors sliced along the 'axis' are returned. \nOtherwise the input tensor is flattened and unique values of the flattened tensor are returned. \n\nThis operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs. \nThe first output tensor 'Y' contains all unique values or subtensors of the input. \nThe second optional output tensor 'indices' contains indices of 'Y' elements' first occurance in 'X'.. \nThe third optional output tensor 'inverse_indices' contains, for elements of 'X', its corresponding indices in 'Y'. \". \nThe fourth optional output tensor 'counts' contains the count of each element of 'Y' in the input. \n\nOutputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input. \n\nhttps://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html\n\nExample 1:\n input_X = [2, 1, 1, 3, 4, 3]\n attribute_sorted = 0\n attribute_axis = None\n output_Y = [2, 1, 3, 4]\n output_indices = [0, 1, 3, 4]\n output_inverse_indices = [0, 1, 1, 2, 3, 2]\n output_counts = [1, 2, 2, 1]\n\nExample 2:\n input_X = [[1, 3], [2, 3]]\n attribute_sorted = 1\n attribute_axis = None\n output_Y = [1, 2, 3]\n output_indices = [0, 2, 1]\n output_inverse_indices = [0, 2, 1, 2]\n output_counts = [1, 1, 2]\n\nExample 3:\n input_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]\n attribute_sorted = 1\n attribute_axis = 0\n output_Y = [[1, 0, 0], [2, 3, 4]]\n output_indices = [0, 2]\n output_inverse_indices = [0, 0, 1]\n output_counts = [2, 1]\n\nExample 4:\n input_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]], \n [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]\n attribute_sorted = 1\n attribute_axis = 1\n\n intermediate data are presented below for better understanding: \n \n there are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)):\n A: [[1, 1], [1, 1]], \n [[0, 1], [0, 1]], \n [[2, 1], [2, 1]], \n [[0, 1], [0, 1]].\n \n there are 3 unique subtensors: \n [[1, 1], [1, 1]], \n [[0, 1], [0, 1]], \n [[2, 1], [2, 1]].\n \n sorted unique subtensors:\n B: [[0, 1], [0, 1]], \n [[1, 1], [1, 1]], \n [[2, 1], [2, 1]].\n \n output_Y is constructed from B:\n [[[0. 1.], [1. 1.], [2. 1.]], \n [[0. 1.], [1. 1.], [2. 1.]]]\n\n output_indices is to map from B to A:\n [1, 0, 2]\n \n output_inverse_indices is to map from A to B:\n [1, 0, 2, 0]\n\n output_counts = [2 1 1]\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node_not_sorted = onnx.helper.make_node(\n 'Unique',\n inputs=['X'],\n outputs=['Y', 'indices', 'inverse_indices', 'counts'],\n sorted=0\n)\n# numpy unique does not retain original order (it sorts the output unique values)\n# https://github.com/numpy/numpy/issues/8621\n# we need to recover unsorted output and indices\nx = np.array([2.0, 1.0, 1.0, 3.0, 4.0, 3.0], dtype=np.float32)\ny, indices, inverse_indices, counts = np.unique(x, True, True, True)\n\n# prepare index mapping from sorted to unsorted\nargsorted_indices = np.argsort(indices)\ninverse_indices_map = {i: si for i, si in zip(argsorted_indices, np.arange(len(argsorted_indices)))}\n\nindices = indices[argsorted_indices]\ny = np.take(x, indices, axis=0)\ninverse_indices = np.asarray([inverse_indices_map[i] for i in inverse_indices], dtype=np.int64)\ncounts = counts[argsorted_indices]\n# print(y)\n# [2.0, 1.0, 3.0, 4.0]\n# print(indices)\n# [0 1 3 4]\n# print(inverse_indices)\n# [0, 1, 1, 2, 3, 2]\n# print(counts)\n# [1, 2, 2, 1]\n\nexpect(node_not_sorted, inputs=[x], outputs=[y, indices, inverse_indices, counts], name='test_unique_not_sorted_without_axis')", + "summary": "not_sorted_without_axis" + }, + { + "code": "node_sorted = onnx.helper.make_node(\n 'Unique',\n inputs=['X'],\n outputs=['Y', 'indices', 'inverse_indices', 'counts'],\n sorted=1,\n axis=0\n)\n\nx = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]], dtype=np.float32)\ny, indices, inverse_indices, counts = np.unique(x, True, True, True, axis=0)\n# print(y)\n# [[1. 0. 0.]\n# [2. 3. 4.]]\n# print(indices)\n# [0 2]\n# print(inverse_indices)\n# [0 0 1]\n# print(counts)\n# [2 1]\n\nexpect(node_sorted, inputs=[x], outputs=[y, indices, inverse_indices, counts], name='test_unique_sorted_with_axis')", + "summary": "sorted_with_axis" + }, + { + "code": "node_sorted = onnx.helper.make_node(\n 'Unique',\n inputs=['X'],\n outputs=['Y', 'indices', 'inverse_indices', 'counts'],\n sorted=1,\n axis=1\n)\n\nx = np.array([[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],\n [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]], dtype=np.float32)\ny, indices, inverse_indices, counts = np.unique(x, True, True, True, axis=1)\n# print(y)\n# [[[0. 1.]\n# [1. 1.]\n# [2. 1.]]\n# [[0. 1.]\n# [1. 1.]\n# [2. 1.]]]\n# print(indices)\n# [1 0 2]\n# print(inverse_indices)\n# [1 0 2 0]\n# print(counts)\n# [2 1 1]\nexpect(node_sorted, inputs=[x], outputs=[y, indices, inverse_indices, counts], name='test_unique_sorted_with_axis_3d')", + "summary": "sorted_with_axis_3d" + }, + { + "code": "node_sorted = onnx.helper.make_node(\n 'Unique',\n inputs=['X'],\n outputs=['Y', 'indices', 'inverse_indices', 'counts'],\n sorted=1,\n axis=-1\n)\n\nx = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 3]], dtype=np.float32)\ny, indices, inverse_indices, counts = np.unique(x, True, True, True, axis=-1)\n# print(y)\n# [[0. 1.]\n# [0. 1.]\n# [3. 2.]]\n# print(indices)\n# [1 0]\n# print(inverse_indices)\n# [1 0 0]\n# print(counts)\n# [2 1]\n\nexpect(node_sorted, inputs=[x], outputs=[y, indices, inverse_indices, counts], name='test_unique_sorted_with_negative_axis')", + "summary": "sorted_with_negative_axis" + }, + { + "code": "node_sorted = onnx.helper.make_node(\n 'Unique',\n inputs=['X'],\n outputs=['Y', 'indices', 'inverse_indices', 'counts']\n)\n\nx = np.array([2.0, 1.0, 1.0, 3.0, 4.0, 3.0], dtype=np.float32)\ny, indices, inverse_indices, counts = np.unique(x, True, True, True)\nexpect(node_sorted, inputs=[x], outputs=[y, indices, inverse_indices, counts], name='test_unique_sorted_without_axis')", + "summary": "sorted_without_axis" + } + ], + "inputs": [ + { + "description": "A N-D input tensor that is to be processed.", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 4, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "A tensor of the same type as 'X' containing all the unique values or subtensors sliced along a provided 'axis' in 'X', either sorted or maintained in the same order they occur in input 'X'", + "name": "Y", + "type": "T" + }, + { + "description": "A 1-D INT64 tensor containing indices of 'Y' elements' first occurance in 'X'. When 'axis' is provided, it contains indices to subtensors in input 'X' on the 'axis'. When 'axis' is not provided, it contains indices to values in the flattened input tensor. ", + "name": "indices", + "option": "optional", + "type": "tensor(int64)" + }, + { + "description": "A 1-D INT64 tensor containing, for elements of 'X', its corresponding indices in 'Y'. When 'axis' is provided, it contains indices to subtensors in output 'Y' on the 'axis'. When 'axis' is not provided, it contains indices to values in output 'Y'. ", + "name": "inverse_indices", + "option": "optional", + "type": "tensor(int64)" + }, + { + "description": "A 1-D INT64 tensor containing the count of each element of 'Y' in input 'X'", + "name": "counts", + "option": "optional", + "type": "tensor(int64)" + } + ], + "outputs_range": "1 - 4", + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Input can be of any tensor type.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Unsqueeze", + "schema": { + "attributes": [ + { + "description": "List of non-negative integers, indicate the dimensions to be inserted", + "name": "axes", + "required": true, + "type": "int64[]" + } + ], + "category": "Transform", + "description": "Insert single-dimensional entries to the shape of a tensor.\nTakes one required argument `axes`, a list of dimensions that will be inserted.\nDimension indices in `axes` are as seen in the output tensor. For example:\n Given a tensor such that tensor with shape [3, 4, 5], then\n Unsqueeze(tensor, axes=[0, 4]) has shape [1, 3, 4, 5, 1]\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[-2],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\ny = np.expand_dims(x, axis=-2)\nexpect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_negative_axes')", + "summary": "unsqueeze_negative_axes" + }, + { + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nfor i in range(x.ndim):\n node = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[i],\n )\n y = np.expand_dims(x, axis=i)\n\n expect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_axis_' + str(i))", + "summary": "unsqueeze_one_axis" + }, + { + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[2, 4, 5],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_three_axes')", + "summary": "unsqueeze_three_axes" + }, + { + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[1, 4],\n)\ny = np.expand_dims(x, axis=1)\ny = np.expand_dims(y, axis=4)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_two_axes')", + "summary": "unsqueeze_two_axes" + }, + { + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[5, 4, 2],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_unsorted_axes')", + "summary": "unsqueeze_unsorted_axes" + } + ], + "inputs": [ + { + "description": "Original tensor", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reshaped tensor with same data as input.", + "name": "expanded", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Unsqueeze", + "schema": { + "attributes": [ + { + "description": "List of integers indicating the dimensions to be inserted. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(expanded).", + "name": "axes", + "required": true, + "type": "int64[]" + } + ], + "category": "Transform", + "description": "Insert single-dimensional entries to the shape of an input tensor (`data`).\nTakes one required argument `axes` - which contains a list of dimension indices and this operator will insert a dimension of value `1` into the corresponding index of the output tensor (`expanded`).\n\nFor example:\n Given an input tensor (`data`) of shape [3, 4, 5], then\n Unsqueeze(data, axes=[0, 4]) outputs a tensor (`expanded`) containing same data as `data` but with shape [1, 3, 4, 5, 1].\n\nThe attribute `axes` should not contain any duplicate entries. It is an error if it contains duplicates.\nThe rank of the output tensor (`output_rank`) is the rank of the input tensor (`data`) plus the number of values in `axes`.\nEach value in `axes` should be within the (inclusive) range [-output_rank , output_rank - 1]. \nThe order of values in `axes` does not matter and can come in any order. \n\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[-2],\n)\nx = np.random.randn(1, 3, 1, 5).astype(np.float32)\ny = np.expand_dims(x, axis=-2)\nexpect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_negative_axes')", + "summary": "unsqueeze_negative_axes" + }, + { + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nfor i in range(x.ndim):\n node = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[i],\n )\n y = np.expand_dims(x, axis=i)\n\n expect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_axis_' + str(i))", + "summary": "unsqueeze_one_axis" + }, + { + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[2, 4, 5],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_three_axes')", + "summary": "unsqueeze_three_axes" + }, + { + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[1, 4],\n)\ny = np.expand_dims(x, axis=1)\ny = np.expand_dims(y, axis=4)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_two_axes')", + "summary": "unsqueeze_two_axes" + }, + { + "code": "x = np.random.randn(3, 4, 5).astype(np.float32)\n\nnode = onnx.helper.make_node(\n 'Unsqueeze',\n inputs=['x'],\n outputs=['y'],\n axes=[5, 4, 2],\n)\ny = np.expand_dims(x, axis=2)\ny = np.expand_dims(y, axis=4)\ny = np.expand_dims(y, axis=5)\n\nexpect(node, inputs=[x], outputs=[y],\n name='test_unsqueeze_unsorted_axes')", + "summary": "unsqueeze_unsorted_axes" + } + ], + "inputs": [ + { + "description": "Original tensor", + "name": "data", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "Reshaped tensor with same data as input.", + "name": "expanded", + "type": "T" + } + ], + "since_version": 11, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Upsample", + "schema": { + "attributes": [ + { + "description": "The scale along height dimension. It takes value greater than or equal to 1.", + "name": "height_scale", + "required": true, + "type": "float32" + }, + { + "default": "nearest", + "description": "Two interpolation modes: nearest(default), bilinear", + "name": "mode", + "required": false, + "type": "string" + }, + { + "description": "The scale along width dimension. It takes value greater than or equal to 1.", + "name": "width_scale", + "required": true, + "type": "float32" + } + ], + "category": "Data", + "description": "Upsample the input tensor.\nThe width and height of the output tensor are:\n output_width = floor(input_width * width_scale),\n output_height = floor(input_height * height_scale).\nExample:\n Given `data` tensor, width_scale, height_scale, mode,\n Upsample the input 4-D tensor in nearest mode:\n data = [[[\n [1, 2],\n [3, 4]\n ]]]\n width_scale = 2\n height_scale = 2\n mode = \"nearest\"\n output = [[[\n [1, 1, 2, 2],\n [1, 1, 2, 2],\n [3, 3, 4, 4],\n [3, 3, 4, 4]\n ]]]\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Upsample',\n inputs=['X', 'scales'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\noutput = np.array([[[\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n]]], dtype=np.float32)\n\nexpect(node, inputs=[data, scales], outputs=[output],\n name='test_upsample_nearest', opset_imports=[helper.make_opsetid(\"\", 9)])", + "summary": "nearest" + } + ], + "inputs": [ + { + "description": "4-D tensor, [N,C,H,W]", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "4-D tensor after resizing, [N,C,H,W]", + "name": "Y", + "type": "T" + } + ], + "since_version": 1, + "support_level": "experimental", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)" + ], + "description": "Constrain output types to bool, int32, int64, float16, float, double tensors.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Upsample", + "schema": { + "attributes": [ + { + "default": "nearest", + "description": "Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc)", + "name": "mode", + "required": false, + "type": "string" + }, + { + "description": "The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of 'scales' should be the same as the rank of input 'X'.", + "name": "scales", + "required": true, + "type": "float32[]" + } + ], + "category": "Data", + "description": "Upsample the input tensor.\nEach dimension value of the output tensor is:\n output_dimension = floor(input_dimension * scale).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Upsample',\n inputs=['X', 'scales'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\noutput = np.array([[[\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n]]], dtype=np.float32)\n\nexpect(node, inputs=[data, scales], outputs=[output],\n name='test_upsample_nearest', opset_imports=[helper.make_opsetid(\"\", 9)])", + "summary": "nearest" + } + ], + "inputs": [ + { + "description": "N-D tensor", + "name": "X", + "type": "T" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "N-D tensor after resizing", + "name": "Y", + "type": "T" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Upsample", + "schema": { + "attributes": [ + { + "default": "nearest", + "description": "Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc)", + "name": "mode", + "required": false, + "type": "string" + } + ], + "category": "Data", + "description": "Upsample the input tensor.\nEach dimension value of the output tensor is:\n output_dimension = floor(input_dimension * scale).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Upsample',\n inputs=['X', 'scales'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\noutput = np.array([[[\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n]]], dtype=np.float32)\n\nexpect(node, inputs=[data, scales], outputs=[output],\n name='test_upsample_nearest', opset_imports=[helper.make_opsetid(\"\", 9)])", + "summary": "nearest" + } + ], + "inputs": [ + { + "description": "N-D tensor", + "name": "X", + "type": "T" + }, + { + "description": "The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of 'scales' should be the same as the rank of input 'X'.", + "name": "scales", + "type": "tensor(float)" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "N-D tensor after resizing", + "name": "Y", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input 'X' and output 'Y' to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Upsample", + "schema": { + "attributes": [ + { + "default": "nearest", + "description": "Two interpolation modes: nearest (default), and linear (including bilinear, trilinear, etc)", + "name": "mode", + "required": false, + "type": "string" + } + ], + "category": "Data", + "description": "Upsample the input tensor.\nEach dimension value of the output tensor is:\n output_dimension = floor(input_dimension * scale).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Upsample',\n inputs=['X', 'scales'],\n outputs=['Y'],\n mode='nearest',\n)\n\ndata = np.array([[[\n [1, 2],\n [3, 4],\n]]], dtype=np.float32)\n\nscales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)\n\noutput = np.array([[[\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n]]], dtype=np.float32)\n\nexpect(node, inputs=[data, scales], outputs=[output],\n name='test_upsample_nearest', opset_imports=[helper.make_opsetid(\"\", 9)])", + "summary": "nearest" + } + ], + "inputs": [ + { + "description": "N-D tensor", + "name": "X", + "type": "T" + }, + { + "description": "The scale array along each dimension. It takes value greater than or equal to 1. The number of elements of 'scales' should be the same as the rank of input 'X'.", + "name": "scales", + "type": "tensor(float)" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "N-D tensor after resizing", + "name": "Y", + "type": "T" + } + ], + "since_version": 10, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input 'X' and output 'Y' to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Where", + "schema": { + "description": "Return elements, either from X or Y, depending on condition\n (with Numpy-style broadcasting support).\n Where behaves like numpy.where with three parameters:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Where',\n inputs=['condition', 'x', 'y'],\n outputs=['z'],\n)\n\ncondition = np.array([[1, 0], [1, 1]], dtype=np.bool)\nx = np.array([[1, 2], [3, 4]], dtype=np.int64)\ny = np.array([[9, 8], [7, 6]], dtype=np.int64)\nz = np.where(condition, x, y) # expected output [[1, 8], [3, 4]]\nexpect(node, inputs=[condition, x, y], outputs=[z],\n name='test_where_long_example')", + "summary": "long" + }, + { + "code": "node = onnx.helper.make_node(\n 'Where',\n inputs=['condition', 'x', 'y'],\n outputs=['z'],\n)\n\ncondition = np.array([[1, 0], [1, 1]], dtype=np.bool)\nx = np.array([[1, 2], [3, 4]], dtype=np.float32)\ny = np.array([[9, 8], [7, 6]], dtype=np.float32)\nz = np.where(condition, x, y) # expected output [[1, 8], [3, 4]]\nexpect(node, inputs=[condition, x, y], outputs=[z],\n name='test_where_example')", + "summary": "where" + } + ], + "inputs": [ + { + "description": "When True (nonzero), yield X, otherwise yield Y", + "name": "condition", + "type": "B" + }, + { + "description": "values selected at indices where condition is True", + "name": "X", + "type": "T" + }, + { + "description": "values selected at indices where condition is False", + "name": "Y", + "type": "T" + } + ], + "max_input": 3, + "max_output": 1, + "min_input": 3, + "min_output": 1, + "outputs": [ + { + "description": "Tensor of shape equal to the broadcasted shape of condition, X, and Y.", + "name": "output", + "type": "T" + } + ], + "since_version": 9, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrain to boolean tensors.", + "type_param_str": "B" + }, + { + "allowed_type_strs": [ + "tensor(uint8)", + "tensor(uint16)", + "tensor(uint32)", + "tensor(uint64)", + "tensor(int8)", + "tensor(int16)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(string)", + "tensor(bool)", + "tensor(complex64)", + "tensor(complex128)" + ], + "description": "Constrain input and output types to all tensor types.", + "type_param_str": "T" + } + ] + } + }, + { + "name": "Xor", + "schema": { + "attributes": [ + { + "description": "If set, defines the broadcast dimensions.", + "name": "axis", + "required": false, + "type": "int64" + }, + { + "description": "Enable broadcasting", + "name": "broadcast", + "required": false, + "type": "int64" + } + ], + "category": "Logic", + "description": "Returns the tensor resulted from performing the `xor` logical operation\nelementwise on the input tensors `A` and `B`.\n\nIf broadcasting is enabled, the right-hand-side argument will be broadcasted\nto match the shape of left-hand-side argument. See the doc of `Add` for a\ndetailed description of the broadcasting rules.\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(np.bool)\ny = (np.random.randn(3, 4) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor2d')\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor3d')\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor4d')", + "summary": "xor" + }, + { + "code": "node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(5) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast3v1d')\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(4, 5) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast3v2d')\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(5, 6) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast4v2d')\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast4v3d')\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast4v4d')", + "summary": "xor_broadcast" + } + ], + "inputs": [ + { + "description": "Left input tensor for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Right input tensor for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains input to boolean tensor.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "Xor", + "schema": { + "category": "Logic", + "description": "Returns the tensor resulted from performing the `xor` logical operation\nelementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support).\n\nThis operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md).\n", + "domain": "ai.onnx", + "examples": [ + { + "code": "node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n)\n\n# 2d\nx = (np.random.randn(3, 4) > 0).astype(np.bool)\ny = (np.random.randn(3, 4) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor2d')\n\n# 3d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor3d')\n\n# 4d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor4d')", + "summary": "xor" + }, + { + "code": "node = onnx.helper.make_node(\n 'Xor',\n inputs=['x', 'y'],\n outputs=['xor'],\n)\n\n# 3d vs 1d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(5) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast3v1d')\n\n# 3d vs 2d\nx = (np.random.randn(3, 4, 5) > 0).astype(np.bool)\ny = (np.random.randn(4, 5) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast3v2d')\n\n# 4d vs 2d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(5, 6) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast4v2d')\n\n# 4d vs 3d\nx = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)\ny = (np.random.randn(4, 5, 6) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast4v3d')\n\n# 4d vs 4d\nx = (np.random.randn(1, 4, 1, 6) > 0).astype(np.bool)\ny = (np.random.randn(3, 1, 5, 6) > 0).astype(np.bool)\nz = np.logical_xor(x, y)\nexpect(node, inputs=[x, y], outputs=[z],\n name='test_xor_bcast4v4d')", + "summary": "xor_broadcast" + } + ], + "inputs": [ + { + "description": "First input operand for the logical operator.", + "name": "A", + "type": "T" + }, + { + "description": "Second input operand for the logical operator.", + "name": "B", + "type": "T" + } + ], + "max_input": 2, + "max_output": 1, + "min_input": 2, + "min_output": 1, + "outputs": [ + { + "description": "Result tensor.", + "name": "C", + "type": "T1" + } + ], + "since_version": 7, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains input to boolean tensor.", + "type_param_str": "T" + }, + { + "allowed_type_strs": [ + "tensor(bool)" + ], + "description": "Constrains output to boolean tensor.", + "type_param_str": "T1" + } + ] + } + }, + { + "name": "ZipMap", + "schema": { + "attributes": [ + { + "description": "The keys when using int keys.
    One and only one of the 'classlabels_*' attributes must be defined.", + "name": "classlabels_int64s", + "required": false, + "type": "int64[]" + }, + { + "description": "The keys when using string keys.
    One and only one of the 'classlabels_*' attributes must be defined.", + "name": "classlabels_strings", + "required": false, + "type": "string[]" + } + ], + "description": "Creates a map from the input and the attributes.
    \n The values are provided by the input tensor, while the keys are specified by the attributes.\n Must provide keys in either classlabels_strings or classlabels_int64s (but not both).
    \n The columns of the tensor correspond one-by-one to the keys specified by the attributes. There must be as many columns as keys.
    \n", + "domain": "ai.onnx.ml", + "inputs": [ + { + "description": "The input values", + "name": "X", + "type": "tensor(float)" + } + ], + "max_input": 1, + "max_output": 1, + "min_input": 1, + "min_output": 1, + "outputs": [ + { + "description": "The output map", + "name": "Z", + "type": "T" + } + ], + "since_version": 1, + "support_level": "common", + "type_constraints": [ + { + "allowed_type_strs": [ + "seq(map(string, float))", + "seq(map(int64, float))" + ], + "description": "The output will be a sequence of string or integer maps to float.", + "type_param_str": "T" + } + ] + } + } +] diff --git a/frontend/packages/core/public/netron/onnx-proto.js b/frontend/packages/core/public/netron/onnx-proto.js new file mode 100644 index 00000000..8e2f8ffc --- /dev/null +++ b/frontend/packages/core/public/netron/onnx-proto.js @@ -0,0 +1,2231 @@ +/*eslint-disable block-scoped-var, id-length, no-control-regex, no-magic-numbers, no-prototype-builtins, no-redeclare, no-shadow, no-var, sort-vars*/ +(function($protobuf) { + "use strict"; + + var $Reader = $protobuf.Reader, $util = $protobuf.util; + + var $root = $protobuf.roots.onnx || ($protobuf.roots.onnx = {}); + + $root.onnx = (function() { + + var onnx = {}; + + onnx.Version = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "_START_VERSION"] = 0; + values[valuesById[1] = "IR_VERSION_2017_10_10"] = 1; + values[valuesById[2] = "IR_VERSION_2017_10_30"] = 2; + values[valuesById[3] = "IR_VERSION_2017_11_3"] = 3; + values[valuesById[4] = "IR_VERSION_2019_1_22"] = 4; + values[valuesById[5] = "IR_VERSION_2019_3_18"] = 5; + values[valuesById[6] = "IR_VERSION_2019_9_19"] = 6; + values[valuesById[7] = "IR_VERSION"] = 7; + return values; + })(); + + onnx.AttributeProto = (function() { + + function AttributeProto(properties) { + this.floats = []; + this.ints = []; + this.strings = []; + this.tensors = []; + this.graphs = []; + this.sparse_tensors = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AttributeProto.prototype.name = ""; + AttributeProto.prototype.ref_attr_name = ""; + AttributeProto.prototype.doc_string = ""; + AttributeProto.prototype.type = 0; + AttributeProto.prototype.f = 0; + AttributeProto.prototype.i = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + AttributeProto.prototype.s = $util.newBuffer([]); + AttributeProto.prototype.t = null; + AttributeProto.prototype.g = null; + AttributeProto.prototype.sparse_tensor = null; + AttributeProto.prototype.floats = $util.emptyArray; + AttributeProto.prototype.ints = $util.emptyArray; + AttributeProto.prototype.strings = $util.emptyArray; + AttributeProto.prototype.tensors = $util.emptyArray; + AttributeProto.prototype.graphs = $util.emptyArray; + AttributeProto.prototype.sparse_tensors = $util.emptyArray; + + AttributeProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.AttributeProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 21: + message.ref_attr_name = reader.string(); + break; + case 13: + message.doc_string = reader.string(); + break; + case 20: + message.type = reader.int32(); + break; + case 2: + message.f = reader.float(); + break; + case 3: + message.i = reader.int64(); + break; + case 4: + message.s = reader.bytes(); + break; + case 5: + message.t = $root.onnx.TensorProto.decode(reader, reader.uint32()); + break; + case 6: + message.g = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + case 22: + message.sparse_tensor = $root.onnx.SparseTensorProto.decode(reader, reader.uint32()); + break; + case 7: + if (!(message.floats && message.floats.length)) + message.floats = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.floats.push(reader.float()); + } else + message.floats.push(reader.float()); + break; + case 8: + if (!(message.ints && message.ints.length)) + message.ints = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.ints.push(reader.int64()); + } else + message.ints.push(reader.int64()); + break; + case 9: + if (!(message.strings && message.strings.length)) + message.strings = []; + message.strings.push(reader.bytes()); + break; + case 10: + if (!(message.tensors && message.tensors.length)) + message.tensors = []; + message.tensors.push($root.onnx.TensorProto.decode(reader, reader.uint32())); + break; + case 11: + if (!(message.graphs && message.graphs.length)) + message.graphs = []; + message.graphs.push($root.onnx.GraphProto.decode(reader, reader.uint32())); + break; + case 23: + if (!(message.sparse_tensors && message.sparse_tensors.length)) + message.sparse_tensors = []; + message.sparse_tensors.push($root.onnx.SparseTensorProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + AttributeProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.AttributeProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "ref_attr_name": + message.ref_attr_name = reader.string(); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "type": + message.type = reader.enum($root.onnx.AttributeProto.AttributeType); + break; + case "f": + message.f = reader.float(); + break; + case "i": + message.i = reader.int64(); + break; + case "s": + message.s = reader.bytes(); + break; + case "t": + message.t = $root.onnx.TensorProto.decodeText(reader, true); + break; + case "g": + message.g = $root.onnx.GraphProto.decodeText(reader, true); + break; + case "sparse_tensor": + message.sparse_tensor = $root.onnx.SparseTensorProto.decodeText(reader, true); + break; + case "floats": + if (!(message.floats && message.floats.length)) + message.floats = []; + if (reader.first()) + while (!reader.last()) { + message.floats.push(reader.float()); + reader.next(); + } + else + message.floats.push(reader.float()); + break; + case "ints": + if (!(message.ints && message.ints.length)) + message.ints = []; + if (reader.first()) + while (!reader.last()) { + message.ints.push(reader.int64()); + reader.next(); + } + else + message.ints.push(reader.int64()); + break; + case "strings": + if (!(message.strings && message.strings.length)) + message.strings = []; + if (reader.first()) + while (!reader.last()) { + message.strings.push(reader.bytes()); + reader.next(); + } + else + message.strings.push(reader.bytes()); + break; + case "tensors": + if (!(message.tensors && message.tensors.length)) + message.tensors = []; + message.tensors.push($root.onnx.TensorProto.decodeText(reader, true)); + break; + case "graphs": + if (!(message.graphs && message.graphs.length)) + message.graphs = []; + message.graphs.push($root.onnx.GraphProto.decodeText(reader, true)); + break; + case "sparse_tensors": + if (!(message.sparse_tensors && message.sparse_tensors.length)) + message.sparse_tensors = []; + message.sparse_tensors.push($root.onnx.SparseTensorProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + AttributeProto.AttributeType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNDEFINED"] = 0; + values[valuesById[1] = "FLOAT"] = 1; + values[valuesById[2] = "INT"] = 2; + values[valuesById[3] = "STRING"] = 3; + values[valuesById[4] = "TENSOR"] = 4; + values[valuesById[5] = "GRAPH"] = 5; + values[valuesById[11] = "SPARSE_TENSOR"] = 11; + values[valuesById[6] = "FLOATS"] = 6; + values[valuesById[7] = "INTS"] = 7; + values[valuesById[8] = "STRINGS"] = 8; + values[valuesById[9] = "TENSORS"] = 9; + values[valuesById[10] = "GRAPHS"] = 10; + values[valuesById[12] = "SPARSE_TENSORS"] = 12; + return values; + })(); + + return AttributeProto; + })(); + + onnx.ValueInfoProto = (function() { + + function ValueInfoProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ValueInfoProto.prototype.name = ""; + ValueInfoProto.prototype.type = null; + ValueInfoProto.prototype.doc_string = ""; + + ValueInfoProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.ValueInfoProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + case 3: + message.doc_string = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ValueInfoProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.ValueInfoProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = $root.onnx.TypeProto.decodeText(reader, true); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ValueInfoProto; + })(); + + onnx.NodeProto = (function() { + + function NodeProto(properties) { + this.input = []; + this.output = []; + this.attribute = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NodeProto.prototype.input = $util.emptyArray; + NodeProto.prototype.output = $util.emptyArray; + NodeProto.prototype.name = ""; + NodeProto.prototype.op_type = ""; + NodeProto.prototype.domain = ""; + NodeProto.prototype.attribute = $util.emptyArray; + NodeProto.prototype.doc_string = ""; + + NodeProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.NodeProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.input && message.input.length)) + message.input = []; + message.input.push(reader.string()); + break; + case 2: + if (!(message.output && message.output.length)) + message.output = []; + message.output.push(reader.string()); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.op_type = reader.string(); + break; + case 7: + message.domain = reader.string(); + break; + case 5: + if (!(message.attribute && message.attribute.length)) + message.attribute = []; + message.attribute.push($root.onnx.AttributeProto.decode(reader, reader.uint32())); + break; + case 6: + message.doc_string = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NodeProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.NodeProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "input": + if (!(message.input && message.input.length)) + message.input = []; + if (reader.first()) + while (!reader.last()) { + message.input.push(reader.string()); + reader.next(); + } + else + message.input.push(reader.string()); + break; + case "output": + if (!(message.output && message.output.length)) + message.output = []; + if (reader.first()) + while (!reader.last()) { + message.output.push(reader.string()); + reader.next(); + } + else + message.output.push(reader.string()); + break; + case "name": + message.name = reader.string(); + break; + case "op_type": + message.op_type = reader.string(); + break; + case "domain": + message.domain = reader.string(); + break; + case "attribute": + if (!(message.attribute && message.attribute.length)) + message.attribute = []; + message.attribute.push($root.onnx.AttributeProto.decodeText(reader, true)); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return NodeProto; + })(); + + onnx.TrainingInfoProto = (function() { + + function TrainingInfoProto(properties) { + this.initialization_binding = []; + this.update_binding = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TrainingInfoProto.prototype.initialization = null; + TrainingInfoProto.prototype.algorithm = null; + TrainingInfoProto.prototype.initialization_binding = $util.emptyArray; + TrainingInfoProto.prototype.update_binding = $util.emptyArray; + + TrainingInfoProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TrainingInfoProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.initialization = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + case 2: + message.algorithm = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + case 3: + if (!(message.initialization_binding && message.initialization_binding.length)) + message.initialization_binding = []; + message.initialization_binding.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + case 4: + if (!(message.update_binding && message.update_binding.length)) + message.update_binding = []; + message.update_binding.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TrainingInfoProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.TrainingInfoProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "initialization": + message.initialization = $root.onnx.GraphProto.decodeText(reader, true); + break; + case "algorithm": + message.algorithm = $root.onnx.GraphProto.decodeText(reader, true); + break; + case "initialization_binding": + if (!(message.initialization_binding && message.initialization_binding.length)) + message.initialization_binding = []; + message.initialization_binding.push($root.onnx.StringStringEntryProto.decodeText(reader, true)); + break; + case "update_binding": + if (!(message.update_binding && message.update_binding.length)) + message.update_binding = []; + message.update_binding.push($root.onnx.StringStringEntryProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return TrainingInfoProto; + })(); + + onnx.ModelProto = (function() { + + function ModelProto(properties) { + this.opset_import = []; + this.metadata_props = []; + this.training_info = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ModelProto.prototype.ir_version = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ModelProto.prototype.opset_import = $util.emptyArray; + ModelProto.prototype.producer_name = ""; + ModelProto.prototype.producer_version = ""; + ModelProto.prototype.domain = ""; + ModelProto.prototype.model_version = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ModelProto.prototype.doc_string = ""; + ModelProto.prototype.graph = null; + ModelProto.prototype.metadata_props = $util.emptyArray; + ModelProto.prototype.training_info = $util.emptyArray; + + ModelProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.ModelProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.ir_version = reader.int64(); + break; + case 8: + if (!(message.opset_import && message.opset_import.length)) + message.opset_import = []; + message.opset_import.push($root.onnx.OperatorSetIdProto.decode(reader, reader.uint32())); + break; + case 2: + message.producer_name = reader.string(); + break; + case 3: + message.producer_version = reader.string(); + break; + case 4: + message.domain = reader.string(); + break; + case 5: + message.model_version = reader.int64(); + break; + case 6: + message.doc_string = reader.string(); + break; + case 7: + message.graph = $root.onnx.GraphProto.decode(reader, reader.uint32()); + break; + case 14: + if (!(message.metadata_props && message.metadata_props.length)) + message.metadata_props = []; + message.metadata_props.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + case 20: + if (!(message.training_info && message.training_info.length)) + message.training_info = []; + message.training_info.push($root.onnx.TrainingInfoProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ModelProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.ModelProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "ir_version": + message.ir_version = reader.int64(); + break; + case "opset_import": + if (!(message.opset_import && message.opset_import.length)) + message.opset_import = []; + message.opset_import.push($root.onnx.OperatorSetIdProto.decodeText(reader, true)); + break; + case "producer_name": + message.producer_name = reader.string(); + break; + case "producer_version": + message.producer_version = reader.string(); + break; + case "domain": + message.domain = reader.string(); + break; + case "model_version": + message.model_version = reader.int64(); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "graph": + message.graph = $root.onnx.GraphProto.decodeText(reader, true); + break; + case "metadata_props": + if (!(message.metadata_props && message.metadata_props.length)) + message.metadata_props = []; + message.metadata_props.push($root.onnx.StringStringEntryProto.decodeText(reader, true)); + break; + case "training_info": + if (!(message.training_info && message.training_info.length)) + message.training_info = []; + message.training_info.push($root.onnx.TrainingInfoProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ModelProto; + })(); + + onnx.StringStringEntryProto = (function() { + + function StringStringEntryProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + StringStringEntryProto.prototype.key = ""; + StringStringEntryProto.prototype.value = ""; + + StringStringEntryProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.StringStringEntryProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + StringStringEntryProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.StringStringEntryProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.string(); + break; + case "value": + message.value = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return StringStringEntryProto; + })(); + + onnx.TensorAnnotation = (function() { + + function TensorAnnotation(properties) { + this.quant_parameter_tensor_names = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorAnnotation.prototype.tensor_name = ""; + TensorAnnotation.prototype.quant_parameter_tensor_names = $util.emptyArray; + + TensorAnnotation.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TensorAnnotation(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor_name = reader.string(); + break; + case 2: + if (!(message.quant_parameter_tensor_names && message.quant_parameter_tensor_names.length)) + message.quant_parameter_tensor_names = []; + message.quant_parameter_tensor_names.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorAnnotation.decodeText = function decodeText(reader) { + var message = new $root.onnx.TensorAnnotation(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "tensor_name": + message.tensor_name = reader.string(); + break; + case "quant_parameter_tensor_names": + if (!(message.quant_parameter_tensor_names && message.quant_parameter_tensor_names.length)) + message.quant_parameter_tensor_names = []; + message.quant_parameter_tensor_names.push($root.onnx.StringStringEntryProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return TensorAnnotation; + })(); + + onnx.GraphProto = (function() { + + function GraphProto(properties) { + this.node = []; + this.initializer = []; + this.sparse_initializer = []; + this.input = []; + this.output = []; + this.value_info = []; + this.quantization_annotation = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GraphProto.prototype.node = $util.emptyArray; + GraphProto.prototype.name = ""; + GraphProto.prototype.initializer = $util.emptyArray; + GraphProto.prototype.sparse_initializer = $util.emptyArray; + GraphProto.prototype.doc_string = ""; + GraphProto.prototype.input = $util.emptyArray; + GraphProto.prototype.output = $util.emptyArray; + GraphProto.prototype.value_info = $util.emptyArray; + GraphProto.prototype.quantization_annotation = $util.emptyArray; + + GraphProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.GraphProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.node && message.node.length)) + message.node = []; + message.node.push($root.onnx.NodeProto.decode(reader, reader.uint32())); + break; + case 2: + message.name = reader.string(); + break; + case 5: + if (!(message.initializer && message.initializer.length)) + message.initializer = []; + message.initializer.push($root.onnx.TensorProto.decode(reader, reader.uint32())); + break; + case 15: + if (!(message.sparse_initializer && message.sparse_initializer.length)) + message.sparse_initializer = []; + message.sparse_initializer.push($root.onnx.SparseTensorProto.decode(reader, reader.uint32())); + break; + case 10: + message.doc_string = reader.string(); + break; + case 11: + if (!(message.input && message.input.length)) + message.input = []; + message.input.push($root.onnx.ValueInfoProto.decode(reader, reader.uint32())); + break; + case 12: + if (!(message.output && message.output.length)) + message.output = []; + message.output.push($root.onnx.ValueInfoProto.decode(reader, reader.uint32())); + break; + case 13: + if (!(message.value_info && message.value_info.length)) + message.value_info = []; + message.value_info.push($root.onnx.ValueInfoProto.decode(reader, reader.uint32())); + break; + case 14: + if (!(message.quantization_annotation && message.quantization_annotation.length)) + message.quantization_annotation = []; + message.quantization_annotation.push($root.onnx.TensorAnnotation.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + GraphProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.GraphProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "node": + if (!(message.node && message.node.length)) + message.node = []; + message.node.push($root.onnx.NodeProto.decodeText(reader, true)); + break; + case "name": + message.name = reader.string(); + break; + case "initializer": + if (!(message.initializer && message.initializer.length)) + message.initializer = []; + message.initializer.push($root.onnx.TensorProto.decodeText(reader, true)); + break; + case "sparse_initializer": + if (!(message.sparse_initializer && message.sparse_initializer.length)) + message.sparse_initializer = []; + message.sparse_initializer.push($root.onnx.SparseTensorProto.decodeText(reader, true)); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "input": + if (!(message.input && message.input.length)) + message.input = []; + message.input.push($root.onnx.ValueInfoProto.decodeText(reader, true)); + break; + case "output": + if (!(message.output && message.output.length)) + message.output = []; + message.output.push($root.onnx.ValueInfoProto.decodeText(reader, true)); + break; + case "value_info": + if (!(message.value_info && message.value_info.length)) + message.value_info = []; + message.value_info.push($root.onnx.ValueInfoProto.decodeText(reader, true)); + break; + case "quantization_annotation": + if (!(message.quantization_annotation && message.quantization_annotation.length)) + message.quantization_annotation = []; + message.quantization_annotation.push($root.onnx.TensorAnnotation.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return GraphProto; + })(); + + onnx.TensorProto = (function() { + + function TensorProto(properties) { + this.dims = []; + this.float_data = []; + this.int32_data = []; + this.string_data = []; + this.int64_data = []; + this.external_data = []; + this.double_data = []; + this.uint64_data = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorProto.prototype.dims = $util.emptyArray; + TensorProto.prototype.data_type = 0; + TensorProto.prototype.segment = null; + TensorProto.prototype.float_data = $util.emptyArray; + TensorProto.prototype.int32_data = $util.emptyArray; + TensorProto.prototype.string_data = $util.emptyArray; + TensorProto.prototype.int64_data = $util.emptyArray; + TensorProto.prototype.name = ""; + TensorProto.prototype.doc_string = ""; + TensorProto.prototype.raw_data = $util.newBuffer([]); + TensorProto.prototype.external_data = $util.emptyArray; + TensorProto.prototype.data_location = 0; + TensorProto.prototype.double_data = $util.emptyArray; + TensorProto.prototype.uint64_data = $util.emptyArray; + + TensorProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TensorProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.dims && message.dims.length)) + message.dims = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dims.push(reader.int64()); + } else + message.dims.push(reader.int64()); + break; + case 2: + message.data_type = reader.int32(); + break; + case 3: + message.segment = $root.onnx.TensorProto.Segment.decode(reader, reader.uint32()); + break; + case 4: + if (!(message.float_data && message.float_data.length)) + message.float_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + if (message.float_data.length == 0 && (end2 - reader.pos) > 1048576) { + var float_dataLength = end2 - reader.pos; + var float_dataView = new DataView(reader.buf.buffer, reader.buf.byteOffset + reader.pos, float_dataLength); + float_dataLength = float_dataLength >>> 2; + var float_data = new Float32Array(float_dataLength); + for (var i = 0; i < float_dataLength; i++) { + float_data[i] = float_dataView.getFloat32(i << 2, true); + } + message.float_data = float_data; + reader.pos = end2; + } + else { + while (reader.pos < end2) + message.float_data.push(reader.float()); + } + } else + message.float_data.push(reader.float()); + break; + case 5: + if (!(message.int32_data && message.int32_data.length)) + message.int32_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.int32_data.push(reader.int32()); + } else + message.int32_data.push(reader.int32()); + break; + case 6: + if (!(message.string_data && message.string_data.length)) + message.string_data = []; + message.string_data.push(reader.bytes()); + break; + case 7: + if (!(message.int64_data && message.int64_data.length)) + message.int64_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.int64_data.push(reader.int64()); + } else + message.int64_data.push(reader.int64()); + break; + case 8: + message.name = reader.string(); + break; + case 12: + message.doc_string = reader.string(); + break; + case 9: + message.raw_data = reader.bytes(); + break; + case 13: + if (!(message.external_data && message.external_data.length)) + message.external_data = []; + message.external_data.push($root.onnx.StringStringEntryProto.decode(reader, reader.uint32())); + break; + case 14: + message.data_location = reader.int32(); + break; + case 10: + if (!(message.double_data && message.double_data.length)) + message.double_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + if (message.double_data.length == 0 && (end2 - reader.pos) > 1048576) { + var double_dataLength = end2 - reader.pos; + var double_dataView = new DataView(reader.buf.buffer, reader.buf.byteOffset + reader.pos, double_dataLength); + double_dataLength = double_dataLength >>> 3; + var double_data = new Float64Array(double_dataLength); + for (var i = 0; i < double_dataLength; i++) { + double_data[i] = double_dataView.getFloat64(i << 3, true); + } + message.double_data = double_data; + reader.pos = end2; + } + else { + while (reader.pos < end2) + message.double_data.push(reader.double()); + } + } else + message.double_data.push(reader.double()); + break; + case 11: + if (!(message.uint64_data && message.uint64_data.length)) + message.uint64_data = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.uint64_data.push(reader.uint64()); + } else + message.uint64_data.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.TensorProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dims": + if (!(message.dims && message.dims.length)) + message.dims = []; + if (reader.first()) + while (!reader.last()) { + message.dims.push(reader.int64()); + reader.next(); + } + else + message.dims.push(reader.int64()); + break; + case "data_type": + message.data_type = reader.int32(); + break; + case "segment": + message.segment = $root.onnx.TensorProto.Segment.decodeText(reader, true); + break; + case "float_data": + if (!(message.float_data && message.float_data.length)) + message.float_data = []; + if (reader.first()) + while (!reader.last()) { + message.float_data.push(reader.float()); + reader.next(); + } + else + message.float_data.push(reader.float()); + break; + case "int32_data": + if (!(message.int32_data && message.int32_data.length)) + message.int32_data = []; + if (reader.first()) + while (!reader.last()) { + message.int32_data.push(reader.int32()); + reader.next(); + } + else + message.int32_data.push(reader.int32()); + break; + case "string_data": + if (!(message.string_data && message.string_data.length)) + message.string_data = []; + if (reader.first()) + while (!reader.last()) { + message.string_data.push(reader.bytes()); + reader.next(); + } + else + message.string_data.push(reader.bytes()); + break; + case "int64_data": + if (!(message.int64_data && message.int64_data.length)) + message.int64_data = []; + if (reader.first()) + while (!reader.last()) { + message.int64_data.push(reader.int64()); + reader.next(); + } + else + message.int64_data.push(reader.int64()); + break; + case "name": + message.name = reader.string(); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "raw_data": + message.raw_data = reader.bytes(); + break; + case "external_data": + if (!(message.external_data && message.external_data.length)) + message.external_data = []; + message.external_data.push($root.onnx.StringStringEntryProto.decodeText(reader, true)); + break; + case "data_location": + message.data_location = reader.enum($root.onnx.TensorProto.DataLocation); + break; + case "double_data": + if (!(message.double_data && message.double_data.length)) + message.double_data = []; + if (reader.first()) + while (!reader.last()) { + message.double_data.push(reader.double()); + reader.next(); + } + else + message.double_data.push(reader.double()); + break; + case "uint64_data": + if (!(message.uint64_data && message.uint64_data.length)) + message.uint64_data = []; + if (reader.first()) + while (!reader.last()) { + message.uint64_data.push(reader.uint64()); + reader.next(); + } + else + message.uint64_data.push(reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TensorProto.DataType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNDEFINED"] = 0; + values[valuesById[1] = "FLOAT"] = 1; + values[valuesById[2] = "UINT8"] = 2; + values[valuesById[3] = "INT8"] = 3; + values[valuesById[4] = "UINT16"] = 4; + values[valuesById[5] = "INT16"] = 5; + values[valuesById[6] = "INT32"] = 6; + values[valuesById[7] = "INT64"] = 7; + values[valuesById[8] = "STRING"] = 8; + values[valuesById[9] = "BOOL"] = 9; + values[valuesById[10] = "FLOAT16"] = 10; + values[valuesById[11] = "DOUBLE"] = 11; + values[valuesById[12] = "UINT32"] = 12; + values[valuesById[13] = "UINT64"] = 13; + values[valuesById[14] = "COMPLEX64"] = 14; + values[valuesById[15] = "COMPLEX128"] = 15; + values[valuesById[16] = "BFLOAT16"] = 16; + return values; + })(); + + TensorProto.Segment = (function() { + + function Segment(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Segment.prototype.begin = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Segment.prototype.end = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + Segment.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TensorProto.Segment(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.begin = reader.int64(); + break; + case 2: + message.end = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Segment.decodeText = function decodeText(reader) { + var message = new $root.onnx.TensorProto.Segment(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "begin": + message.begin = reader.int64(); + break; + case "end": + message.end = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Segment; + })(); + + TensorProto.DataLocation = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT"] = 0; + values[valuesById[1] = "EXTERNAL"] = 1; + return values; + })(); + + return TensorProto; + })(); + + onnx.SparseTensorProto = (function() { + + function SparseTensorProto(properties) { + this.dims = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SparseTensorProto.prototype.values = null; + SparseTensorProto.prototype.indices = null; + SparseTensorProto.prototype.dims = $util.emptyArray; + + SparseTensorProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.SparseTensorProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values = $root.onnx.TensorProto.decode(reader, reader.uint32()); + break; + case 2: + message.indices = $root.onnx.TensorProto.decode(reader, reader.uint32()); + break; + case 3: + if (!(message.dims && message.dims.length)) + message.dims = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dims.push(reader.int64()); + } else + message.dims.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SparseTensorProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.SparseTensorProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "values": + message.values = $root.onnx.TensorProto.decodeText(reader, true); + break; + case "indices": + message.indices = $root.onnx.TensorProto.decodeText(reader, true); + break; + case "dims": + if (!(message.dims && message.dims.length)) + message.dims = []; + if (reader.first()) + while (!reader.last()) { + message.dims.push(reader.int64()); + reader.next(); + } + else + message.dims.push(reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SparseTensorProto; + })(); + + onnx.TensorShapeProto = (function() { + + function TensorShapeProto(properties) { + this.dim = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorShapeProto.prototype.dim = $util.emptyArray; + + TensorShapeProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TensorShapeProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.dim && message.dim.length)) + message.dim = []; + message.dim.push($root.onnx.TensorShapeProto.Dimension.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorShapeProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.TensorShapeProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dim": + if (!(message.dim && message.dim.length)) + message.dim = []; + message.dim.push($root.onnx.TensorShapeProto.Dimension.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TensorShapeProto.Dimension = (function() { + + function Dimension(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Dimension.prototype.dim_value = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Dimension.prototype.dim_param = ""; + Dimension.prototype.denotation = ""; + + var $oneOfFields; + + Object.defineProperty(Dimension.prototype, "value", { + get: $util.oneOfGetter($oneOfFields = ["dim_value", "dim_param"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Dimension.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TensorShapeProto.Dimension(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dim_value = reader.int64(); + break; + case 2: + message.dim_param = reader.string(); + break; + case 3: + message.denotation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Dimension.decodeText = function decodeText(reader) { + var message = new $root.onnx.TensorShapeProto.Dimension(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dim_value": + message.dim_value = reader.int64(); + break; + case "dim_param": + message.dim_param = reader.string(); + break; + case "denotation": + message.denotation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Dimension; + })(); + + return TensorShapeProto; + })(); + + onnx.TypeProto = (function() { + + function TypeProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TypeProto.prototype.tensor_type = null; + TypeProto.prototype.sequence_type = null; + TypeProto.prototype.map_type = null; + TypeProto.prototype.sparse_tensor_type = null; + TypeProto.prototype.opaque_type = null; + TypeProto.prototype.denotation = ""; + + var $oneOfFields; + + Object.defineProperty(TypeProto.prototype, "value", { + get: $util.oneOfGetter($oneOfFields = ["tensor_type", "sequence_type", "map_type", "sparse_tensor_type", "opaque_type"]), + set: $util.oneOfSetter($oneOfFields) + }); + + TypeProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor_type = $root.onnx.TypeProto.Tensor.decode(reader, reader.uint32()); + break; + case 4: + message.sequence_type = $root.onnx.TypeProto.Sequence.decode(reader, reader.uint32()); + break; + case 5: + message.map_type = $root.onnx.TypeProto.Map.decode(reader, reader.uint32()); + break; + case 8: + message.sparse_tensor_type = $root.onnx.TypeProto.SparseTensor.decode(reader, reader.uint32()); + break; + case 7: + message.opaque_type = $root.onnx.TypeProto.Opaque.decode(reader, reader.uint32()); + break; + case 6: + message.denotation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TypeProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.TypeProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "tensor_type": + message.tensor_type = $root.onnx.TypeProto.Tensor.decodeText(reader, true); + break; + case "sequence_type": + message.sequence_type = $root.onnx.TypeProto.Sequence.decodeText(reader, true); + break; + case "map_type": + message.map_type = $root.onnx.TypeProto.Map.decodeText(reader, true); + break; + case "sparse_tensor_type": + message.sparse_tensor_type = $root.onnx.TypeProto.SparseTensor.decodeText(reader, true); + break; + case "opaque_type": + message.opaque_type = $root.onnx.TypeProto.Opaque.decodeText(reader, true); + break; + case "denotation": + message.denotation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TypeProto.Tensor = (function() { + + function Tensor(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Tensor.prototype.elem_type = 0; + Tensor.prototype.shape = null; + + Tensor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto.Tensor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.elem_type = reader.int32(); + break; + case 2: + message.shape = $root.onnx.TensorShapeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Tensor.decodeText = function decodeText(reader) { + var message = new $root.onnx.TypeProto.Tensor(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "elem_type": + message.elem_type = reader.int32(); + break; + case "shape": + message.shape = $root.onnx.TensorShapeProto.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Tensor; + })(); + + TypeProto.Sequence = (function() { + + function Sequence(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Sequence.prototype.elem_type = null; + + Sequence.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto.Sequence(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.elem_type = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Sequence.decodeText = function decodeText(reader) { + var message = new $root.onnx.TypeProto.Sequence(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "elem_type": + message.elem_type = $root.onnx.TypeProto.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Sequence; + })(); + + TypeProto.Map = (function() { + + function Map(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Map.prototype.key_type = 0; + Map.prototype.value_type = null; + + Map.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto.Map(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key_type = reader.int32(); + break; + case 2: + message.value_type = $root.onnx.TypeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Map.decodeText = function decodeText(reader) { + var message = new $root.onnx.TypeProto.Map(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "key_type": + message.key_type = reader.int32(); + break; + case "value_type": + message.value_type = $root.onnx.TypeProto.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Map; + })(); + + TypeProto.SparseTensor = (function() { + + function SparseTensor(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SparseTensor.prototype.elem_type = 0; + SparseTensor.prototype.shape = null; + + SparseTensor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto.SparseTensor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.elem_type = reader.int32(); + break; + case 2: + message.shape = $root.onnx.TensorShapeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SparseTensor.decodeText = function decodeText(reader) { + var message = new $root.onnx.TypeProto.SparseTensor(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "elem_type": + message.elem_type = reader.int32(); + break; + case "shape": + message.shape = $root.onnx.TensorShapeProto.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SparseTensor; + })(); + + TypeProto.Opaque = (function() { + + function Opaque(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Opaque.prototype.domain = ""; + Opaque.prototype.name = ""; + + Opaque.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.TypeProto.Opaque(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.domain = reader.string(); + break; + case 2: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Opaque.decodeText = function decodeText(reader) { + var message = new $root.onnx.TypeProto.Opaque(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "domain": + message.domain = reader.string(); + break; + case "name": + message.name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Opaque; + })(); + + return TypeProto; + })(); + + onnx.OperatorSetIdProto = (function() { + + function OperatorSetIdProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OperatorSetIdProto.prototype.domain = ""; + OperatorSetIdProto.prototype.version = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + OperatorSetIdProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.OperatorSetIdProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.domain = reader.string(); + break; + case 2: + message.version = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + OperatorSetIdProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.OperatorSetIdProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "domain": + message.domain = reader.string(); + break; + case "version": + message.version = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return OperatorSetIdProto; + })(); + + onnx.OperatorStatus = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "EXPERIMENTAL"] = 0; + values[valuesById[1] = "STABLE"] = 1; + return values; + })(); + + onnx.FunctionProto = (function() { + + function FunctionProto(properties) { + this.input = []; + this.output = []; + this.attribute = []; + this.node = []; + this.opset_import = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FunctionProto.prototype.name = ""; + FunctionProto.prototype.since_version = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + FunctionProto.prototype.status = 0; + FunctionProto.prototype.input = $util.emptyArray; + FunctionProto.prototype.output = $util.emptyArray; + FunctionProto.prototype.attribute = $util.emptyArray; + FunctionProto.prototype.node = $util.emptyArray; + FunctionProto.prototype.doc_string = ""; + FunctionProto.prototype.opset_import = $util.emptyArray; + + FunctionProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.FunctionProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.since_version = reader.int64(); + break; + case 3: + message.status = reader.int32(); + break; + case 4: + if (!(message.input && message.input.length)) + message.input = []; + message.input.push(reader.string()); + break; + case 5: + if (!(message.output && message.output.length)) + message.output = []; + message.output.push(reader.string()); + break; + case 6: + if (!(message.attribute && message.attribute.length)) + message.attribute = []; + message.attribute.push(reader.string()); + break; + case 7: + if (!(message.node && message.node.length)) + message.node = []; + message.node.push($root.onnx.NodeProto.decode(reader, reader.uint32())); + break; + case 8: + message.doc_string = reader.string(); + break; + case 9: + if (!(message.opset_import && message.opset_import.length)) + message.opset_import = []; + message.opset_import.push($root.onnx.OperatorSetIdProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + FunctionProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.FunctionProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "since_version": + message.since_version = reader.int64(); + break; + case "status": + message.status = reader.enum($root.onnx.OperatorStatus); + break; + case "input": + if (!(message.input && message.input.length)) + message.input = []; + if (reader.first()) + while (!reader.last()) { + message.input.push(reader.string()); + reader.next(); + } + else + message.input.push(reader.string()); + break; + case "output": + if (!(message.output && message.output.length)) + message.output = []; + if (reader.first()) + while (!reader.last()) { + message.output.push(reader.string()); + reader.next(); + } + else + message.output.push(reader.string()); + break; + case "attribute": + if (!(message.attribute && message.attribute.length)) + message.attribute = []; + if (reader.first()) + while (!reader.last()) { + message.attribute.push(reader.string()); + reader.next(); + } + else + message.attribute.push(reader.string()); + break; + case "node": + if (!(message.node && message.node.length)) + message.node = []; + message.node.push($root.onnx.NodeProto.decodeText(reader, true)); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "opset_import": + if (!(message.opset_import && message.opset_import.length)) + message.opset_import = []; + message.opset_import.push($root.onnx.OperatorSetIdProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return FunctionProto; + })(); + + onnx.OperatorProto = (function() { + + function OperatorProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OperatorProto.prototype.op_type = ""; + OperatorProto.prototype.since_version = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + OperatorProto.prototype.status = 0; + OperatorProto.prototype.doc_string = ""; + + OperatorProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.OperatorProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.op_type = reader.string(); + break; + case 2: + message.since_version = reader.int64(); + break; + case 3: + message.status = reader.int32(); + break; + case 10: + message.doc_string = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + OperatorProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.OperatorProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "op_type": + message.op_type = reader.string(); + break; + case "since_version": + message.since_version = reader.int64(); + break; + case "status": + message.status = reader.enum($root.onnx.OperatorStatus); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return OperatorProto; + })(); + + onnx.OperatorSetProto = (function() { + + function OperatorSetProto(properties) { + this.operator = []; + this.functions = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OperatorSetProto.prototype.magic = ""; + OperatorSetProto.prototype.ir_version = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + OperatorSetProto.prototype.ir_version_prerelease = ""; + OperatorSetProto.prototype.ir_build_metadata = ""; + OperatorSetProto.prototype.domain = ""; + OperatorSetProto.prototype.opset_version = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + OperatorSetProto.prototype.doc_string = ""; + OperatorSetProto.prototype.operator = $util.emptyArray; + OperatorSetProto.prototype.functions = $util.emptyArray; + + OperatorSetProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.onnx.OperatorSetProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.magic = reader.string(); + break; + case 2: + message.ir_version = reader.int64(); + break; + case 3: + message.ir_version_prerelease = reader.string(); + break; + case 7: + message.ir_build_metadata = reader.string(); + break; + case 4: + message.domain = reader.string(); + break; + case 5: + message.opset_version = reader.int64(); + break; + case 6: + message.doc_string = reader.string(); + break; + case 8: + if (!(message.operator && message.operator.length)) + message.operator = []; + message.operator.push($root.onnx.OperatorProto.decode(reader, reader.uint32())); + break; + case 9: + if (!(message.functions && message.functions.length)) + message.functions = []; + message.functions.push($root.onnx.FunctionProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + OperatorSetProto.decodeText = function decodeText(reader) { + var message = new $root.onnx.OperatorSetProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "magic": + message.magic = reader.string(); + break; + case "ir_version": + message.ir_version = reader.int64(); + break; + case "ir_version_prerelease": + message.ir_version_prerelease = reader.string(); + break; + case "ir_build_metadata": + message.ir_build_metadata = reader.string(); + break; + case "domain": + message.domain = reader.string(); + break; + case "opset_version": + message.opset_version = reader.int64(); + break; + case "doc_string": + message.doc_string = reader.string(); + break; + case "operator": + if (!(message.operator && message.operator.length)) + message.operator = []; + message.operator.push($root.onnx.OperatorProto.decodeText(reader, true)); + break; + case "functions": + if (!(message.functions && message.functions.length)) + message.functions = []; + message.functions.push($root.onnx.FunctionProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return OperatorSetProto; + })(); + + return onnx; + })(); + + return $root; +})(protobuf); diff --git a/frontend/packages/core/public/netron/onnx.js b/frontend/packages/core/public/netron/onnx.js new file mode 100644 index 00000000..c53daf54 --- /dev/null +++ b/frontend/packages/core/public/netron/onnx.js @@ -0,0 +1,1167 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var onnx = onnx || {}; +var base = base || require('./base'); +var long = long || { Long: require('long') }; +var protobuf = protobuf || require('protobufjs'); +var prototxt = prototxt || require('protobufjs/ext/prototxt'); + +onnx.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension == 'onnx') { + return true; + } + if (extension == 'pb') { + if (identifier.endsWith('saved_model.pb')) { + return false; + } + if (identifier.endsWith('predict_net.pb') || identifier.endsWith('init_net.pb')) { + return false; + } + const tags = context.tags('pb'); + if (tags.size === 0) { + return false; + } + // ignore input_0.pb, output_0.pb + if (tags.size > 0 && + tags.has(1) && tags.get(1) === 0 && + tags.has(2) && tags.get(2) === 0 && + tags.has(9) && tags.get(9) === 2) { + return false; + } + if (tags.size > 0 && + Array.from(tags.values()).some((v) => v === 5)) { + return false; + } + // check ir_version and graph present + if (tags.has(1) && tags.get(1) != 0 || + tags.has(2) && tags.get(2) != 2 || + tags.has(3) && tags.get(3) != 2 || + tags.has(4) && tags.get(4) != 2 || + tags.has(5) && tags.get(5) != 0 || + tags.has(6) && tags.get(6) != 2 || + tags.has(8) && tags.get(8) != 2 || + tags.has(14) && tags.get(14) != 2 || + (!tags.has(7) || tags.get(7) != 2)) { + return false; + } + return true; + } + if (extension == 'pbtxt' || extension == 'prototxt') { + if (identifier.endsWith('predict_net.pbtxt') || identifier.endsWith('predict_net.prototxt') || + identifier.endsWith('init_net.pbtxt') || identifier.endsWith('init_net.prototxt')) { + return false; + } + const tags = context.tags('pbtxt'); + if (tags.has('ir_version') || tags.has('graph')) { + return true; + } + } + return false; + } + + open(context, host) { + return host.require('./onnx-proto').then(() => { + let model = null; + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension == 'pbtxt' || extension == 'prototxt') { + try { + onnx.proto = protobuf.roots.onnx.onnx; + const reader = prototxt.TextReader.create(context.text); + model = onnx.proto.ModelProto.decodeText(reader); + } + catch (error) { + throw new onnx.Error("File text format is not onnx.ModelProto (" + error.message + ") in '" + identifier + "'."); + } + } + else { + try { + onnx.proto = protobuf.roots.onnx.onnx; + model = onnx.proto.ModelProto.decode(context.buffer); + } + catch (error) { + throw new onnx.Error("File format is not onnx.ModelProto (" + error.message + ") in '" + identifier + "'."); + } + } + return onnx.Metadata.open(host).then((metadata) => { + try { + return new onnx.Model(metadata, model); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new onnx.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + }); + } +}; + +onnx.Model = class { + + constructor(metadata, model) { + this._graphs = []; + this._irVersion = model.ir_version; + this._producerName = model.producer_name; + this._producerVersion = model.producer_version; + this._domain = model.domain; + this._modelVersion = model.model_version; + this._description = model.doc_string; + this._metadata = []; + this._imports = null; + + const imports = {}; + if (model.opset_import && model.opset_import.length > 0) { + const results = []; + for (const opset_import of model.opset_import) { + let domain = opset_import.domain || 'ai.onnx'; + const result = domain + ' v' + opset_import.version; + if (!results.includes(result)) { + results.push(result); + } + domain = domain == 'ai.onnx' ? '' : domain; + if (!imports[domain] || imports[domain] > opset_import.version) { + imports[domain] = opset_import.version; + } + } + this._imports = results.join(', '); + } + if (Object.keys(imports).length == 0) { + imports[''] = 1; + imports['ai.onnx.ml'] = 1; + } + + let imageFormat = ''; + if (model.metadata_props) { + const imageMetadata = {}; + for (const metadata_prop of model.metadata_props) { + switch (metadata_prop.key) { + case 'author': + this._author = metadata_prop.value; + break; + case 'company': + this._company = metadata_prop.value; + break; + case 'converted_from': + this._converted_from = metadata_prop.value; + break; + case 'license': + this._license = metadata_prop.value; + break; + case 'license_url': + this._licenseUrl = metadata_prop.value; + break; + case 'Image.BitmapPixelFormat': + case 'Image.ColorSpaceGamma': + case 'Image.NominalPixelRange': + imageMetadata[metadata_prop.key] = metadata_prop.value; + break; + default: + this._metadata.push({ name: metadata_prop.key, value: metadata_prop.value}); + break; + } + } + imageFormat = [ imageMetadata['Image.BitmapPixelFormat'], imageMetadata['Image.ColorSpaceGamma'], imageMetadata['Image.NominalPixelRange'] ].filter((item) => item); + } + this._graphs = []; + if (model && model.graph) { + const graphMetadata = new onnx.GraphMetadata(metadata, imports); + const graph = new onnx.Graph(graphMetadata, imageFormat, model.graph); + this._graphs.push(graph); + } + } + + get format() { + return 'ONNX' + (this._irVersion ? ' v' + this._irVersion.toString() : ''); + } + + get imports() { + return this._imports; + } + + get producer() { + const producer = []; + if (this._producerName) { + producer.push(this._producerName); + } + if (this._producerVersion && this._producerVersion.length > 0) { + producer.push(this._producerVersion); + } + if (producer.length > 0) { + return producer.join(' '); + } + return null; + } + + get domain() { + return this._domain || null; + } + + get description() { + return this._description || null; + } + + get author() { + return this._author || null; + } + + get company() { + return this._company || null; + } + + get source() { + return this._converted_from || null; + } + + get license() { + const license = []; + if (this._license && this._license.length > 0) { + license.push(this._license); + } + if (this._licenseUrl && this._licenseUrl.length > 0) { + license.push('
    ' + this._licenseUrl + ''); + } + if (license.length > 0) { + return license; + } + return null; + } + + get metadata() { + return this._metadata; + } + + get graphs() { + return this._graphs; + } +}; + +onnx.Graph = class { + + constructor(metadata, imageFormat, graph) { + this._node = ''; + this._description = ''; + this._nodes = []; + this._inputs = []; + this._outputs = []; + + if (graph) { + this._name = graph.name || null; + this._description = graph.doc_string || ''; + + const initializers = new Map(); + for (const tensor of graph.initializer) { + initializers.set(tensor.name, new onnx.Tensor(tensor, 'Initializer')); + } + const nodes = []; + const inputCountMap = new Map(); + const outputCountMap = new Map(); + for (const node of graph.node) { + for (const input of node.input) { + inputCountMap.set(input, inputCountMap.has(input) ? inputCountMap.get(input) + 1 : 1); + } + for (const output of node.output) { + outputCountMap.set(output, inputCountMap.has(output) ? inputCountMap.get(output) + 1 : 1); + } + } + for (const input of graph.input) { + inputCountMap.delete(input); + } + for (const output of graph.output) { + outputCountMap.delete(output); + } + for (const node of graph.node) { + let initializerNode = false; + if (node.op_type == 'Constant' && node.input.length == 0 && node.output.length == 1) { + const name = node.output[0]; + if (inputCountMap.has(name) && inputCountMap.get(name) == 1 && + outputCountMap.has(name) && outputCountMap.get(name) == 1 && + node.attribute.length == 1) { + const attribute = node.attribute[0]; + if (attribute && attribute.name == 'value' && attribute.t) { + initializers.set(name, new onnx.Tensor(attribute.t, 'Constant')); + initializerNode = true; + } + } + } + if (!initializerNode) { + nodes.push(node); + } + } + + const args = new Map(); + const arg = (id, type, description, initializer, imageFormat) => { + if (!args.has(id)) { + args.set(id, new onnx.Argument(id, initializer ? initializer.type : type ? onnx.Tensor._formatType(type, imageFormat) : null, initializer, description)); + } + return args.get(id); + }; + + for (const valueInfo of graph.value_info) { + arg(valueInfo.name, valueInfo.type, valueInfo.doc_string, initializers.get(valueInfo.name), imageFormat); + } + for (const valueInfo of graph.input) { + const argument = arg(valueInfo.name, valueInfo.type, valueInfo.doc_string, initializers.get(valueInfo.name), imageFormat); + if (!initializers.has(valueInfo.name)) { + this._inputs.push(new onnx.Parameter(valueInfo.name, [ argument ])); + } + } + for (const valueInfo of graph.output) { + const argument = arg(valueInfo.name, valueInfo.type, valueInfo.doc_string, initializers.get(valueInfo.name), imageFormat); + this._outputs.push(new onnx.Parameter(valueInfo.name, [ argument ])); + } + for (const node of nodes) { + let inputs = []; + const schema = metadata.type(node.op_type); + if (node.input && node.input.length > 0) { + let inputIndex = 0; + if (schema && schema.inputs) { + for (const inputSchema of schema.inputs) { + if (inputIndex < node.input.length || inputSchema.option != 'optional') { + const inputCount = (inputSchema.option == 'variadic') ? (node.input.length - inputIndex) : 1; + const inputArguments = node.input.slice(inputIndex, inputIndex + inputCount).map((id) => { + return arg(id, null, null, initializers.get(id), imageFormat); + }); + inputIndex += inputCount; + inputs.push(new onnx.Parameter(inputSchema.name, inputArguments)); + } + } + } + else { + inputs = inputs.concat(node.input.slice(inputIndex).map((id, index) => { + return new onnx.Parameter((inputIndex + index).toString(), [ + arg(id, null, null, null, imageFormat) + ]); + })); + } + } + let outputs = []; + if (node.output && node.output.length > 0) { + let outputIndex = 0; + if (schema && schema.outputs) { + for (const outputSchema of schema.outputs) { + if (outputIndex < node.output.length || outputSchema.option != 'optional') { + const outputCount = (outputSchema.option == 'variadic') ? (node.output.length - outputIndex) : 1; + const outputArguments = node.output.slice(outputIndex, outputIndex + outputCount).map((id) => { + return arg(id, null, null, null, imageFormat); + }); + outputIndex += outputCount; + outputs.push(new onnx.Parameter(outputSchema.name, outputArguments)); + } + } + } + else { + outputs = outputs.concat(node.output.slice(outputIndex).map((id, index) => { + return new onnx.Parameter((outputIndex + index).toString(), [ + arg(id, null, null, null, imageFormat) + ]); + })); + } + } + this._nodes.push(new onnx.Node(metadata, imageFormat, node.op_type, node.domain, node.name, node.doc_string, node.attribute, inputs, outputs)); + } + } + } + + get name() { + return this._name; + } + + get description() { + return this._description; + } + + get groups() { + return false; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + toString() { + return 'graph(' + this.name + ')'; + } +}; + +onnx.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +onnx.Argument = class { + + constructor(name, type, initializer, description) { + if (typeof name !== 'string') { + throw new onnx.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + this._description = description || ''; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get description() { + return this._description; + } + + get initializer() { + return this._initializer; + } +}; + +onnx.Node = class { + + constructor(metadata, imageFormat, type, domain, name, description, attributes, inputs, outputs) { + this._metadata = metadata; + this._type = type; + this._domain = domain || ''; + this._name = name || ''; + this._description = description || ''; + this._attributes = []; + if (attributes && attributes.length > 0) { + for (const attribute of attributes) { + this._attributes.push(new onnx.Attribute(this._metadata, imageFormat, this.type, attribute)); + } + } + this._inputs = inputs; + this._outputs = outputs; + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get description() { + return this._description; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get domain() { + return this._domain; + } + + get group() { + return null; + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } +}; + +onnx.Attribute = class { + + constructor(metadata, imageFormat, operator, attribute) { + this._name = attribute.name; + this._description = attribute.doc_string || ''; + this._type = null; + this._value = null; + + if (attribute.ints && attribute.ints.length > 0) { + this._value = attribute.ints; + } + else if (attribute.floats && attribute.floats.length > 0) { + this._value = attribute.floats; + } + else if (attribute.strings && attribute.strings.length > 0) { + this._value = attribute.strings.map((s) => onnx.Utility.decodeText(s)); + } + else if (attribute.graphs && attribute.graphs.length > 0) { + this._value = attribute.graphs.map((graph) => new onnx.Graph(metadata, imageFormat, graph)); + this._type = 'graph[]'; + } + else if (attribute.s && attribute.s.length > 0) { + this._value = onnx.Utility.decodeText(attribute.s); + } + else if (Object.prototype.hasOwnProperty.call(attribute, 'f')) { + this._value = attribute.f; + } + else if (Object.prototype.hasOwnProperty.call(attribute, 'i')) { + this._value = attribute.i; + } + else if (Object.prototype.hasOwnProperty.call(attribute, 't')) { + this._type = 'tensor'; + this._value = new onnx.Tensor(attribute.t).value; + } + else if (Object.prototype.hasOwnProperty.call(attribute, 'g')) { + this._type = 'graph'; + this._value = new onnx.Graph(metadata, imageFormat, attribute.g); + } + + const attributeSchema = metadata.attribute(operator, attribute.name); + if (!this._type) { + if (Object.prototype.hasOwnProperty.call(attribute, 'type')) { + if (!onnx.Attribute._attributeTypeMap) { + const map = {}; + map[onnx.proto.AttributeProto.AttributeType.UNDEFINED] = 'undefined'; + map[onnx.proto.AttributeProto.AttributeType.FLOAT] = 'float32'; + map[onnx.proto.AttributeProto.AttributeType.INT] = 'int64'; + map[onnx.proto.AttributeProto.AttributeType.STRING] = 'string'; + map[onnx.proto.AttributeProto.AttributeType.TENSOR] = 'tensor'; + map[onnx.proto.AttributeProto.AttributeType.GRAPH] = 'graph'; + map[onnx.proto.AttributeProto.AttributeType.FLOATS] = 'float32'; + map[onnx.proto.AttributeProto.AttributeType.INTS] = 'int64[]'; + map[onnx.proto.AttributeProto.AttributeType.STRINGS] = 'string[]'; + map[onnx.proto.AttributeProto.AttributeType.TENSORS] = 'tensor[]'; + map[onnx.proto.AttributeProto.AttributeType.GRAPHS] = 'graph[]'; + onnx.Attribute._attributeTypeMap = map; + } + const attributeType = onnx.Attribute._attributeTypeMap[attribute.type]; + this._type = attributeType || onnx.Attribute._attributeTypeMap[onnx.proto.AttributeProto.AttributeType.UNDEFINED]; + } + else if (attributeSchema && attributeSchema.type) { + this._type = attributeSchema.type; + } + } + + if (attributeSchema && Object.prototype.hasOwnProperty.call(attributeSchema, 'default') && attributeSchema.default) { + if (this._value == attributeSchema.default) { + this._visible = false; + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get description() { + return this._description; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +onnx.Tensor = class { + + constructor(tensor, kind) { + this._tensor = tensor; + this._name = tensor.name || ''; + this._kind = kind || null; + this._type = new onnx.TensorType(this._tensor.data_type, new onnx.TensorShape(this._tensor.dims.map((dim) => dim)), null); + + if (this._tensor.data_type == onnx.proto.TensorProto.DataType.FLOAT16 && this._tensor.int32_data && this._tensor.int32_data.length > 0) { + const array = new Uint8Array(this._tensor.int32_data.length << 1); + const dataView = new DataView(array.buffer, array.byteOffset, array.byteLength); + const data = this._tensor.int32_data; + for (let i = 0; i < data.length; i++) { + dataView.setUint16(i << 1, data[i], true); + } + this._tensor.raw_data = array; + delete this._tensor.int32_data; + } + } + + get name() { + return this._name; + } + + get kind() { + return this._kind; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state || null; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return onnx.Tensor._stringify(value, '', ' '); + } + + _context() { + const context = {}; + context.index = 0; + context.count = 0; + context.state = null; + + if (!this._tensor.data_type) { + context.state = 'Tensor has no data type.'; + return context; + } + if (!this._tensor.dims) { + context.state = 'Tensor has no dimensions.'; + return context; + } + + if (this._tensor.data_location === onnx.proto.TensorProto.DataLocation.EXTERNAL) { + context.state = 'External data not implemented.'; + return context; + } + + context.dataType = this._type.dataType; + context.shape = this._type.shape.dimensions; + + switch (this._tensor.data_type) { + case onnx.proto.TensorProto.DataType.FLOAT: + if (this._tensor.float_data && this._tensor.float_data.length > 0) { + context.data = this._tensor.float_data; + } + else if (this._tensor.raw_data && this._tensor.raw_data.length > 0) { + context.rawData = new DataView(this._tensor.raw_data.buffer, this._tensor.raw_data.byteOffset, this._tensor.raw_data.byteLength); + } + else { + context.state = 'Tensor data is empty.'; + } + break; + case onnx.proto.TensorProto.DataType.DOUBLE: + if (this._tensor.double_data && this._tensor.double_data.length > 0) { + context.data = this._tensor.double_data; + } + else if (this._tensor.raw_data && this._tensor.raw_data.length > 0) { + context.rawData = new DataView(this._tensor.raw_data.buffer, this._tensor.raw_data.byteOffset, this._tensor.raw_data.byteLength); + } + else { + context.state = 'Tensor data is empty.'; + } + break; + case onnx.proto.TensorProto.DataType.FLOAT16: + if (this._tensor.raw_data && this._tensor.raw_data.length > 0) { + context.rawData = new DataView(this._tensor.raw_data.buffer, this._tensor.raw_data.byteOffset, this._tensor.raw_data.byteLength); + } + else { + context.state = 'Tensor data is empty.'; + } + break; + case onnx.proto.TensorProto.DataType.BOOL: + case onnx.proto.TensorProto.DataType.INT8: + case onnx.proto.TensorProto.DataType.UINT8: + case onnx.proto.TensorProto.DataType.INT16: + case onnx.proto.TensorProto.DataType.UINT16: + case onnx.proto.TensorProto.DataType.INT32: + if (this._tensor.int32_data && this._tensor.int32_data.length > 0) { + context.data = this._tensor.int32_data; + } + else if (this._tensor.raw_data && this._tensor.raw_data.length > 0) { + context.rawData = new DataView(this._tensor.raw_data.buffer, this._tensor.raw_data.byteOffset, this._tensor.raw_data.byteLength); + } + else { + context.state = 'Tensor data is empty.'; + } + break; + case onnx.proto.TensorProto.DataType.UINT32: + if (this._tensor.uint64_data && this._tensor.uint64_data.length > 0) { + context.data = this._tensor.uint64_data; + } + else if (this._tensor.raw_data && this._tensor.raw_data.length > 0) { + context.rawData = new DataView(this._tensor.raw_data.buffer, this._tensor.raw_data.byteOffset, this._tensor.raw_data.byteLength); + } + else { + context.state = 'Tensor data is empty.'; + } + break; + case onnx.proto.TensorProto.DataType.INT64: + if (this._tensor.int64_data && this._tensor.int64_data.length > 0) { + context.data = this._tensor.int64_data; + } + else if (this._tensor.raw_data && this._tensor.raw_data.length > 0) { + context.rawData = new DataView(this._tensor.raw_data.buffer, this._tensor.raw_data.byteOffset, this._tensor.raw_data.byteLength); + } + else { + context.state = 'Tensor data is empty.'; + } + break; + case onnx.proto.TensorProto.DataType.UINT64: + if (this._tensor.uint64_data && this._tensor.uint64_data.length > 0) { + context.data = this._tensor.uint64_data; + } + else if (this._tensor.raw_data && this._tensor.raw_data.length > 0) { + context.rawData = new DataView(this._tensor.raw_data.buffer, this._tensor.raw_data.byteOffset, this._tensor.raw_data.byteLength); + } + else { + context.state = 'Tensor data is empty.'; + } + break; + default: + context.state = 'Tensor data type is not implemented.'; + break; + } + return context; + } + + _decode(context, dimension) { + const shape = context.shape.length !== 0 ? context.shape : [ 1 ]; + const results = []; + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + if (context.data) { + let value = context.data[context.index++]; + switch (this._tensor.data_type) { + case onnx.proto.TensorProto.DataType.BOOL: + value = value === 0 ? false : true; + break; + } + results.push(value); + context.count++; + } + else if (context.rawData) { + switch (this._tensor.data_type) { + case onnx.proto.TensorProto.DataType.FLOAT16: + results.push(context.rawData.getFloat16(context.index, true)); + context.index += 2; + context.count++; + break; + case onnx.proto.TensorProto.DataType.FLOAT: + results.push(context.rawData.getFloat32(context.index, true)); + context.index += 4; + context.count++; + break; + case onnx.proto.TensorProto.DataType.DOUBLE: + results.push(context.rawData.getFloat64(context.index, true)); + context.index += 8; + context.count++; + break; + case onnx.proto.TensorProto.DataType.INT8: + results.push(context.rawData.getInt8(context.index, true)); + context.index++; + context.count++; + break; + case onnx.proto.TensorProto.DataType.UINT8: + results.push(context.rawData.getUint8(context.index, true)); + context.index++; + context.count++; + break; + case onnx.proto.TensorProto.DataType.INT16: + results.push(context.rawData.getInt16(context.index, true)); + context.index += 2; + context.count++; + break; + case onnx.proto.TensorProto.DataType.UINT16: + results.push(context.rawData.getUint16(context.index, true)); + context.index += 2; + context.count++; + break; + case onnx.proto.TensorProto.DataType.INT32: + results.push(context.rawData.getInt32(context.index, true)); + context.index += 4; + context.count++; + break; + case onnx.proto.TensorProto.DataType.UINT32: + results.push(context.rawData.getUint32(context.index, true)); + context.index += 4; + context.count++; + break; + case onnx.proto.TensorProto.DataType.INT64: + results.push(new long.Long(context.rawData.getUint32(context.index, true), context.rawData.getUint32(context.index + 4, true), false)); + context.index += 8; + context.count++; + break; + case onnx.proto.TensorProto.DataType.UINT64: + results.push(new long.Long(context.rawData.getUint32(context.index, true), context.rawData.getUint32(context.index + 4, true), true)); + context.index += 8; + context.count++; + break; + case onnx.proto.TensorProto.DataType.BOOL: + results.push(context.rawData.getInt8(context.index, true) === 0 ? false : true); + context.index += 1; + context.count++; + break; + } + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + if (context.shape.length == 0) { + return results[0]; + } + return results; + } + + static _stringify(value, indentation, indent) { + if (Array.isArray(value)) { + const result = []; + result.push(indentation + '['); + const items = value.map((item) => onnx.Tensor._stringify(item, indentation + indent, indent)); + if (items.length > 0) { + result.push(items.join(',\n')); + } + result.push(indentation + ']'); + return result.join('\n'); + } + if (typeof value == 'string') { + return indentation + value; + } + if (value == Infinity) { + return indentation + 'Infinity'; + } + if (value == -Infinity) { + return indentation + '-Infinity'; + } + if (isNaN(value)) { + return indentation + 'NaN'; + } + return indentation + value.toString(); + } + + static _formatElementType(elementType) { + if (!onnx.Tensor._elementTypeMap) { + const map = {}; + map[onnx.proto.TensorProto.DataType.UNDEFINED] = 'UNDEFINED'; + map[onnx.proto.TensorProto.DataType.FLOAT] = 'float32'; + map[onnx.proto.TensorProto.DataType.UINT8] = 'uint8'; + map[onnx.proto.TensorProto.DataType.INT8] = 'int8'; + map[onnx.proto.TensorProto.DataType.UINT16] = 'uint16'; + map[onnx.proto.TensorProto.DataType.INT16] = 'int16'; + map[onnx.proto.TensorProto.DataType.INT32] = 'int32'; + map[onnx.proto.TensorProto.DataType.INT64] = 'int64'; + map[onnx.proto.TensorProto.DataType.STRING] = 'string'; + map[onnx.proto.TensorProto.DataType.BOOL] = 'bool'; + map[onnx.proto.TensorProto.DataType.FLOAT16] = 'float16'; + map[onnx.proto.TensorProto.DataType.DOUBLE] = 'float64'; + map[onnx.proto.TensorProto.DataType.UINT32] = 'uint32'; + map[onnx.proto.TensorProto.DataType.UINT64] = 'uint64'; + map[onnx.proto.TensorProto.DataType.COMPLEX64] = 'complex64'; + map[onnx.proto.TensorProto.DataType.COMPLEX128] = 'complex128'; + map[onnx.proto.TensorProto.DataType.BFLOAT16] = 'bfloat16'; + onnx.Tensor._elementTypeMap = map; + } + const name = onnx.Tensor._elementTypeMap[elementType]; + if (name) { + return name; + } + return onnx.Tensor._elementTypeMap[onnx.proto.TensorProto.DataType.UNDEFINED]; + } + + static _formatType(type, imageFormat) { + if (!type) { + return null; + } + let denotation = ''; + switch (type.denotation) { + case 'TENSOR': + denotation = 'Tensor'; + break; + case 'IMAGE': + denotation = 'Image' + (imageFormat ? '(' + imageFormat.join(',') + ')' : ''); + break; + case 'AUDIO': + denotation = 'Audio'; + break; + case 'TEXT': + denotation = 'Text'; + break; + } + switch (type.value) { + case 'tensor_type': + case 'sparse_tensor_type': { + let shape = []; + if (type.tensor_type.shape && type.tensor_type.shape.dim) { + shape = type.tensor_type.shape.dim.map((dim) => { + return dim.dim_param ? dim.dim_param : dim.dim_value; + }); + } + return new onnx.TensorType(type.tensor_type.elem_type, new onnx.TensorShape(shape), denotation); + } + case 'map_type': { + return new onnx.MapType(type.map_type.key_type, onnx.Tensor._formatType(type.map_type.value_type, imageFormat), denotation); + } + case 'sequence_type': { + return new onnx.SequenceType(onnx.Tensor._formatType(type.sequence_type.elem_type, imageFormat), denotation); + } + case 'opaque_type': { + return new onnx.OpaqueType(type.opaque_type.domain, type.opaque_type.name); + } + } + return null; + } +}; + +onnx.TensorType = class { + + constructor(dataType, shape, denotation) { + this._dataType = onnx.Tensor._formatElementType(dataType); + this._shape = shape; + this._denotation = denotation || null; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + get denotation() { + return this._denotation; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +onnx.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return '[' + this._dimensions.join(',') + ']'; + } +}; + +onnx.SequenceType = class { + + constructor(elementType, denotation) { + this._elementType = elementType; + this._denotation = denotation; + } + + get elementType() { + return this._elementType; + } + + get dennotation() { + return this._dennotation; + } + + toString() { + return 'sequence<' + this._elementType.toString() + '>'; + } +}; + +onnx.MapType = class { + + constructor(keyType, valueType, denotation) { + this._keyType = onnx.Tensor._formatElementType(keyType); + this._valueType = valueType; + this._denotation = denotation; + } + + get keyType() { + return this._keyType; + } + + get valueType() { + return this._valueType; + } + + get denotation() { + return this._denotation; + } + + toString() { + return 'map<' + this._keyType + ',' + this._valueType.toString() + '>'; + } +}; + +onnx.OpaqueType = class { + + constructor(domain, name) { + this._domain = domain; + this._name = name; + } + + toString() { + const name = (this._domain ? (this._domain + '.') : '') + this._name; + return 'opaque<' + name + '>'; + } +}; + +onnx.GraphMetadata = class { + + constructor(metadata, imports) { + this._metadata = metadata; + this._imports = imports; + this._cache = new Map(); + this._attributeCache = new Map(); + } + + type(operator) { + if (!this._cache.has(operator)) { + this._cache.set(operator, this._metadata.type(operator, this._imports)); + } + return this._cache.get(operator); + } + + attribute(operator, name) { + const key = operator + ':' + name; + if (!this._attributeCache.has(key)) { + const schema = this.type(operator); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + this._attributeCache.set(operator + ':' + attribute.name, attribute); + } + } + if (!this._attributeCache.has(key)) { + this._attributeCache.set(key, null); + } + } + return this._attributeCache.get(key); + } +}; + +onnx.Metadata = class { + + static open(host) { + if (onnx.Metadata._metadata) { + return Promise.resolve(onnx.Metadata._metadata); + } + return host.request(null, 'onnx-metadata.json', 'utf-8').then((data) => { + onnx.Metadata._metadata = new onnx.Metadata(data); + return onnx.Metadata._metadata; + }).catch(() => { + onnx.Metadata._metadata = new onnx.Metadata(null); + return onnx.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + const name = item.name; + item.schema.name = name; + this._map[name] = this._map[name] || []; + this._map[name].push(item.schema); + } + } + } + } + } + + type(operator, imports) { + let result = null; + const schemas = this._map[operator]; + if (schemas) { + let version = -1; + for (const schema of schemas) { + const domain = schema.domain === 'ai.onnx' ? '' : schema.domain; + const importVersion = imports[domain]; + const sinceVersion = schema.since_version; + if (importVersion >= sinceVersion && version < sinceVersion) { + version = sinceVersion; + result = schema; + } + } + } + return result; + } +}; + +onnx.Utility = class { + + static decodeText(value) { + if (!value.some(c => c <= 32 || c >= 128)) { + onnx.Utility._asciiDecoder = onnx.Utility._asciiDecoder || new TextDecoder('ascii'); + return onnx.Utility._asciiDecoder.decode(value); + } + return [...value]; + } +}; + +onnx.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Error loading ONNX model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = onnx.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/openvino-metadata.json b/frontend/packages/core/public/netron/openvino-metadata.json new file mode 100644 index 00000000..cc8cce35 --- /dev/null +++ b/frontend/packages/core/public/netron/openvino-metadata.json @@ -0,0 +1,1535 @@ +[ + { + "name": "Convolution", + "schema": { + "attributes": [ + { + "default": [ 1, null ], + "description": " *stride* is a distance (in pixels) to slide the filter on the feature map over the (x, y) axis. For example, *stride* equal \"1,1\" means sliding the filter 1 pixel at a time over the (x, y) axis.", + "name": "stride", + "option": "required", + "type": "int32[]" + }, + { + "default": 1, + "description": " *stride-x* is a distance (in pixels) to slide the filter on the feature map over the x axis. For example, *stride-x* equal 1 means sliding the filter 1 pixel at a time over the x axis.", + "name": "stride-x", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *stride-y* is a distance (in pixels) to slide the filter on the feature map over the y axis. For example, *stride-y* equal 1 means sliding the filter 1 pixel at a time over the y axis.", + "name": "stride-y", + "option": "required", + "type": "int32" + }, + { + "default": [ 1, null ], + "name": "strides", + "type": "int32[]" + }, + { + "default": 0, + "description": " *pad* is a number of pixels to add to the left and top of the input. For example, *pad* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad", + "option": "required", + "type": "int32" + }, + { + "default": 0, + "description": " *pad-x* is a number of pixels to add to the left of the input. For example, *pad-x* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-x", + "option": "required", + "type": "int32" + }, + { + "default": 0, + "description": " *pad-y* is a number of pixels to add to the top of the input. For example, *pad-y* equal 1 means adding 1 pixel to the top of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-y", + "option": "required", + "type": "int32" + }, + { + "default": 0, + "name": "pad-r", + "type": "int32" + }, + { + "default": 0, + "name": "pad-b", + "type": "int32" + }, + { + "default": [1, 1], + "description": " *kernel* is a width and height of each filter. For example, *kernel* equal 3 (3, 3) means that each filter has width and height equal to 3.", + "name": "kernel", + "option": "required", + "type": "int32[]" + }, + { + "default": 1, + "description": " *kernel-x* is a width of each filter. For example, *kernel* equal 3 means that each filter has width equal to 3.", + "name": "kernel-x", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *kernel-y* is a height of each filter. For example, *kernel-y* equal 3 means that each filter has height equal to 3.", + "name": "kernel-y", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *output* is a number of output feature maps per whole output (when *group* > 1, *output* still matches the number of output features regardless of *group* value). For example, *output* equals 1 means that there is 1 output feature map in a layer.", + "name": "output", + "option": "required", + "type": "int32", + "visible": false + }, + { + "default": 1, + "description": " *group* denotes the number of groups to which *output* and *input* should be split. For example, *group* equal 1 means that all the filters are applied to full input (usual convolution), *group* equals 2 means that both *input* and *output* channels are separated into 2 groups and *i-th output* group is connected to *i-th input* group channels. *group* equals number of output feature maps denotes depth-wise separable convolution ([Reference](https://medium.com/towards-data-science/types-of-convolutions-in-deep-learning-717013397f4d#6f51)).", + "name": "group", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *dilation* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal \"1,1\" means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal \"2,2\" means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.", + "name": "dilation", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "name": "dilation-x", + "type": "int32" + }, + { + "default": [ 1, null ], + "name": "dilations", + "type": "int32[]" + }, + { + "default": "same_upper", + "name": "auto_pad" + }, + { + "default": [ 0, null ], + "name": "pads_begin", + "type": "int32[]" + }, + { + "default": [ 0, null ], + "name": "pads_end", + "type": "int32[]" + }, + { + "default": 1, + "description": " *dilation-y* denotes the distance in height between elements (weights) in the filter. For example, *dilation-y* equal 1 means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation-y* equal 2 means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.", + "name": "dilation-y", + "option": "required", + "type": "int32" + } + ], + "category": "Layer", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/convolution.html)
    **Detailed description**: [Reference](http://cs231n.github.io/convolutional-networks/#conv)\n**Parameters**: *Convolution* layer parameters should be specified in the `convolution_data` node, which is a child of the layer node.\n**Weights Layout** Weights layout is GOIYX, which means that *X* is changing the fastest, then *Y*, then *Input*, *Output*, then *Group*.\n**Mathematical Formulation**\n* For the convolutional layer, the number of output features in each dimension is calculated using the formula:\n\\f[\nn_{out} = \\left ( \\frac{n_{in} + 2p - k}{s} \\right ) + 1\n\\f]\n* The receptive field in each layer is calculated using the formulas:\n * Jump in the output feature map:\n \\f[\n j_{out} = j_{in} * s\n \\f]\n * Size of the receptive field of output feature:\n \\f[\n r_{out} = r_{in} + ( k - 1 ) * j_{in}\n \\f]\n * Center position of the receptive field of the first output feature:\n \\f[\n start_{out} = start_{in} + ( \\frac{k - 1}{2} - p ) * j_{in}\n \\f]\n * Output is calculated using the following formula:\n \\f[\n out = \\sum_{i = 0}^{n}w_{i}x_{i} + b\n \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n \n \n \n```", + "inputs": [ + { + "name": "inputs", + "option": "variadic" + }, + { + "description": "*(type: Tensor``)* List of input tensors.", + "name": "X1, X2, ..." + } + ], + "outputs": [ + { + "description": "*(type: Tensor``)* Concatenated tensor.", + "name": "concat_result" + }, + { + "description": "*(type: Tensor``)* The dimensions of the inputs.", + "name": "split_info" + } + ], + "support_level": "default" + } + }, + { + "name": "BinaryConvolution", + "schema": { + "category": "Layer" + } + }, + { + "name": "Pooling", + "schema": { + "attributes": [ + { + "default": [ 1, null ], + "description": " *stride* is a distance (in pixels) to slide the filter on the feature map over the (x, y) axis. For example, *stride* equal \"1,1\" means sliding the filter 1 pixel at a time over the (x, y) axis.", + "name": "stride", + "option": "required", + "type": "int32[]" + }, + { + "default": 1, + "description": " *stride-x* is a distance (in pixels) to slide the filter on the feature map over the x axis. For example, *stride-x* equal 1 means sliding the filter 1 pixel at a time over the x axis.", + "name": "stride-x", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *stride-y* is a distance (in pixels) to slide the filter on the feature map over the y axis. For example, *stride-y* equal 1 means sliding the filter 1 pixel at a time over the y axis.", + "name": "stride-y", + "option": "required", + "type": "int32" + }, + { + "default": [ 1, null ], + "name": "strides", + "type": "int32[]" + }, + { + "default": 1, + "description": " *pad* is a number of pixels to add to the left and top of the input. For example, *pad* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad", + "option": "required", + "type": "int32" + }, + { + "default": 0, + "description": " *pad-x* is a number of pixels to add to the left of the input. For example, *pad-x* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-x", + "option": "required", + "type": "int32" + }, + { + "default": 0, + "description": " *pad-y* is a number of pixels to add to the top of the input. For example, *pad-y* equal 1 means adding 1 pixel to the top of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-y", + "option": "required", + "type": "int32" + }, + { + "default": 0, + "name": "pad-r", + "type": "int32" + }, + { + "default": 0, + "name": "pad-b", + "type": "int32" + }, + { + "default": [ 0, null ], + "name": "pads_begin", + "type": "int32[]" + }, + { + "default": [ 0, null ], + "name": "pads_end", + "type": "int32[]" + }, + { + "description": " *kernel* is a width and height of each filter. For example, *kernel* equal 3 (3, 3) means that each filter has width and height equal to 3.", + "name": "kernel", + "option": "required", + "type": "int32[]" + }, + { + "default": 1, + "description": " *kernel-x* is a width of each filter. For example, *kernel* equal 3 means that each filter has width equal to 3.", + "name": "kernel-x", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *kernel-y* is a height of each filter. For example, *kernel-y* equal 3 means that each filter has height equal to 3.", + "name": "kernel-y", + "option": "required", + "type": "int32" + }, + { + "default": "max", + "description": " *pool-method* is a type of pooling strategy for values.", + "name": "pool-method", + "option": "required", + "type": "" + }, + { + "default": false, + "description": " *exclude-pad* is a type of pooling strategy for values in the padding area. For example, if *exclude-pad* is \"true\", zero-values in the padding are not used.", + "name": "exclude-pad", + "option": "required", + "type": "boolean" + }, + { + "default": "ceil", + "description": " *rounding_type* is a type of rounding to be applied.", + "name": "rounding-type", + "option": "required", + "type": "\n * *ceil*\n * *floor*" + } + ], + "category": "Pool", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/pooling.html)\n**Detailed description**: [Reference](http://cs231n.github.io/convolutional-networks/#pool)\n**Parameters**: Specify pooling layer parameters in the `pooling_data` node, which is a child of the layer node.\n**Mathematical Formulation**\n* For *max pool-method*:\n \\f[\n output_{j} = MAX\\{ x_{0}, ... x_{i}\\}\n \\f]\n* For *avg pool-method*:\n \\f[\n output_{j} = \\frac{\\sum_{i = 0}^{n}x_{i}}{n}\n \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n \n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "ROIPooling", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *pooled_h* is a height of the ROI output feature map. For example, *pooled_h* equal 6 means that the height of the output of *ROIpooling* is 6.", + "name": "pooled_h", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *pooled_w* is a width of the ROI output feature map. For example, *pooled_w* equal 6 means that the width of the output of *ROIpooling* is 6.", + "name": "pooled_w", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *spatial_scale* is a ratio of the input feature map over the input image size.", + "name": "spatial_scale", + "option": "required", + "type": " positive floating point value" + } + ], + "category": "Layer", + "description": "**Short description**: It is a *pooling layer* with *max* pooling strategy (see *max* option in the *Pooling layer* parameters description). It is used over feature maps of non-uniform sizes and outputs another feature map of a fixed size.\n**Detailed description**: [deepsense.io reference](https://blog.deepsense.ai/region-of-interest-pooling-explained/)\n**Parameters**: Specify *ROIPooling* layer parameters in the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[\noutput_{j} = MAX\\{ x_{0}, ... x_{i}\\}\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n \n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "FullyConnected", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *out-size* is a length of the output vector. For example, *out-size* equal 4096 means that the output vector length is 4096.", + "name": "out-size", + "option": "required", + "type": "int32" + } + ], + "category": "Layer", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/innerproduct.html)\n**Detailed description**: [Reference](http://cs231n.github.io/convolutional-networks/#fc)\n**Parameters**: Specify *FullyConnected* layer parameters in the `fc_data` node, which is a child of the layer node.\n**Weights Layout** OI, which means that Input is changing the fastest, then Output.\n**Mathematical Formulation**\n* If previous layer is *FullyConnected*:\n \\f[\n y_{i} = f( z_{i} ) \\quad with \\quad z_{i} = \\sum_{j=1}^{m_{1}^{( l-1 )}}w_{i,j}^{( l )}y_{i}^{ ( l -1 )}\n \\f]\n* Otherwise:\n \\f[\n y_{i} = f( z_{i} ) \\quad with \\quad z_{i}^{ ( l )} = \\sum_{j=1}^{m_{1}^{( l-1 )}}\\sum_{r=1}^{m_{2}^{ ( l-1 )}}\\sum_{s=1}^{m_{3}^{ ( l-1 )}}w_{i,j,r,s}^{ ( l )} ( Y_{i}^{ (l-1) })_{r,s}\n \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n \n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "ReLU", + "schema": { + "attributes": [ + { + "default": 0, + "description": " *negative_slope* is a multiplier, which is used if the unit is not active (that is negative). For example, *negative_slope* equal 0.1 means that an inactive unit value would be multiplied by 0.1 and this is the [Leaky ReLU](https://keras.io/layers/advanced-activations/#leakyrelu). If *negative_slope* is equal to 0, this is the usual *ReLU*.", + "name": "negative_slope", + "option": "required", + "type": "float64" + } + ], + "category": "Activation", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/relu.html)\n**Detailed description**: [Reference](https://github.com/Kulbear/deep-learning-nano-foundation/wiki/ReLU-and-Softmax-Activation-Functions#rectified-linear-units)\n**Parameters**: *ReLU* layer parameters can be (not mandatory) specified in the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[\nY_{i}^{( l )} = max(0, Y_{i}^{( l - 1 )})\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Activation", + "schema": { + "attributes": [ + { + "description": " *type* represents particular activation function. For example, *type* equal *sigmoid* means that neurons of this layer have a sigmoid activation function.", + "name": "type", + "option": "required" + }, + { + "default": 1.0, + "name": "alpha", + "type": "float32" + } + ], + "category": "Activation", + "description": "**Short description**: *Activation* layer represents an activation function of each neuron in a layer, which is used to add non-linearity to the computational flow.\n**Detailed description**: [Reference](https://medium.com/the-theory-of-everything/understanding-activation-functions-in-neural-networks-9491262884e0)\n**Parameters**: *Activation layer* parameters should be specified in the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n* Sigmoid function:\n \\f[\n f( x ) = \\frac{1}{1+e^{-x}}\n \\f]\n* Tahn function:\n \\f[\n f ( x ) = \\frac{2}{1+e^{-2x}} - 1 = 2sigmoid(2x) - 1\n \\f]\n*\tElu function:\n\t\\f[\n f(x) = \\left\\{\\begin{array}{ll}\n\t\te^{x} - 1 \\quad \\mbox{if } x < 0 \\\\\n\t\tx \\quad \\mbox{if } x \\geq 0\n\t\\end{array}\\right.\n\t\\f]\n*\tRelu6 function:\n\t\\f[\n f(x) = min(max(0, x), 6)\n\t\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "SoftMax", + "schema": { + "attributes": [ + { + "description": " *axis* represents the axis of which the *SoftMax* is calculated. *axis* equal 1 is a default value.", + "name": "axis", + "option": "required", + "type": "int32" + } + ], + "category": "Activation", + "description": "**Short description**: [Reference](https://github.com/Kulbear/deep-learning-nano-foundation/wiki/ReLU-and-Softmax-Activation-Functions#softmax)\n**Detailed description**: [Reference](http://cs231n.github.io/linear-classify/#softmax)\n**Parameters**: *SoftMax* layer parameters can be (not mandatory) specified in the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[\ny_{c} = \\frac{e^{Z_{c}}}{\\sum_{d=1}^{C}e^{Z_{d}}}\n\\f]\nwhere \\f$C\\f$ is a number of classes\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Deconvolution", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *stride* is a distance (in pixels) to slide the filter on the feature map over the (x, y) axis. For example, *stride* equal \"1,1\" means sliding the filter 1 pixel at a time over the (x, y) axis.", + "name": "stride", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *stride-x* is a distance (in pixels) to slide the filter on the feature map over the x axis. For example, *stride-x* equal 1 means sliding the filter 1 pixel at a time over the x axis.", + "name": "stride-x", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *stride-y* is a distance (in pixels) to slide the filter on the feature map over the y axis. For example, *stride-y* equal 1 means sliding the filter 1 pixel at a time over the y axis.", + "name": "stride-y", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *pad* is a number of pixels to add to the left and top of the input. For example, *pad* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *pad-x* is a number of pixels to add to the left of the input. For example, *pad-x* equal 1 means adding 1 pixel to the left of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-x", + "option": "required", + "type": " int32" + }, + { + "default": 1, + "description": " *pad-y* is a number of pixels to add to the top of the input. For example, *pad-y* equal 1 means adding 1 pixel to the top of the input. Right and bottom padding should be calculated from the expected output width (height).", + "name": "pad-y", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *kernel* is a width and height of each filter. For example, *kernel* equal 3 (3, 3) means that each filter has width and height equal to 3.", + "name": "kernel", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *kernel-x* is a width of each filter. For example, *kernel* equal 3 means that each filter has width equal to 3.", + "name": "kernel-x", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *kernel-y* is a height of each filter. For example, *kernel-y* equal 3 means that each filter has height equal to 3.", + "name": "kernel-y", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *output* is a number of output feature maps per whole output (when *group* > 1, *output* still matches the number of output features regardless of *group* value). For example, *output* equals 1 means that there is 1 output feature map in a layer.", + "name": "output", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *group* denotes the number of groups to which *output* and *input* should be split. For example, *group* equal 1 means that all the filters are applied to full input (usual convolution), *group* equals 2 means that both *input* and *output* channels are separated into 2 groups and *i-th output* group is connected to *i-th input* group channels. *group* equals number of output feature maps denotes depth-wise separable convolution ([Reference](https://medium.com/towards-data-science/types-of-convolutions-in-deep-learning-717013397f4d#6f51)).", + "name": "group", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *dilation* denotes the distance in width and height between elements (weights) in the filter. For example, *dilation* equal \"1,1\" means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation* equal \"2,2\" means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.", + "name": "dilation", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *dilation-y* denotes the distance in height between elements (weights) in the filter. For example, *dilation-y* equal 1 means that all the elements in the filter are neighbors, so it is the same as for the usual convolution. *dilation-y* equal 2 means that all the elements in the filter are matched not to adjacent elements in the input matrix, but to those that are adjacent with distance 1.", + "name": "dilation-y", + "option": "required", + "type": "int32" + } + ], + "category": "Layer", + "description": "**Short description**: *Deconvolution* layer is applied for upsampling the output to the higher image resolution.\n**Detailed description**: [Reference](https://distill.pub/2016/deconv-checkerboard/)\n**Parameters**: *Deconvolution* layer parameters should be specified in the `deconvolution_data` node, which is a child of the layer node.\n**Parameters**: *Convolution* layer parameters should be specified in the `convolution_data` node, which is a child of the layer node.\n**Weights Layout** Weights layout is the following: GOIYX, which means that *X* is changing the fastest, then *Y*, then *Input*, *Output*, then *Group*.\n**Mathematical Formulation**\n*Deconvolution* is also called transpose convolution and performs operation, reverse to convolution.\nThe number of output features for each dimensions is calculated:\n\\f[S_{o}=stride(S_{i} - 1 ) + S_{f} - 2pad \\f]\nWhere \\f$S\\f$ is size of output, input and filter.\nOutput is calculated in the same way as for convolution layer:\n\\f[out = \\sum_{i = 0}^{n}w_{i}x_{i} + b\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Norm", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *alpha* represents the scaling parameter for the normalizing sum. For example, *alpha* equal 0.0001 means that the normalizing sum is multiplied by 0.0001.", + "name": "alpha", + "option": "required", + "type": " floating point positive number" + }, + { + "default": 1, + "description": " *beta* represents the exponent for the normalizing sum. For example, *beta* equal 0.75 means that the normalizing sum is raised to the power of 0.75.", + "name": "beta", + "option": "required", + "type": " floating point positive number" + }, + { + "default": 1, + "description": " *region* represents strategy of local regions extension. For example, *region* equal *across* means that the normalizing sum is performed over adjacent channels.", + "name": "region", + "option": "required", + "type": "" + }, + { + "default": 1, + "description": " *local-size* represents the side length of the region to be used for the normalization sum or number of channels depending on the strategy specified in the *region* parameter. For example, *local-size* equal 5 for the across strategy means application of sum across 5 adjacent channels.", + "name": "local-size", + "option": "required", + "type": " positive integer bigger than zero" + } + ], + "category": "Normalization", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/lrn.html)\n**Detailed description**: [Reference](http://yeephycho.github.io/2016/08/03/Normalizations-in-neural-networks/#Local-Response-Normalization-LRN)\n**Parameters**: *Norm* layer parameters should be specified in the `norm_data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[o_{i} = \\left( 1 + \\left( \\frac{\\alpha}{n} \\right)\\sum_{i}x_{i}^{2} \\right)^{\\beta}\\f]\nWhere \\f$n\\f$ is the size of each local region.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Concat", + "schema": { + "attributes": [ + { + "description": " *axis* is the number of axis over which input blobs are concatenated. For example, *axis* equal 1 means that input blobs are concatenated over the first axis.", + "name": "axis", + "option": "required", + "type": "int32" + } + ], + "category": "Tensor", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/concat.html)\n**Parameters**: *Concat* layer parameters should be specified in the `concat_data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Axis* parameter specifies a blob dimension to concat values. For example, for two input blobs *B1xC1xH1xW1* and *B2xC2xh4xW2* if axis: 1, output blob is****: *B1xC1+C2xH1xW1*. This is only possible if *B1=B2*, *H1=H4*, *W1=W2*.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Split", + "schema": { + "attributes": [ + { "name": "axis", "type": "int32" } + ], + "category": "Tensor", + "description": "**Short description**: *Split* layer splits the input into several output groups. Group sizes are denoted by the number and the size of output ports.\n**Detailed description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/split.html)\n**Parameters**: *None*\n**Mathematical Formulation**\nSplits input blob among children. For example, blob is *BxC+CxHxW* and there are two children. Then, output blob is *BxCxHxW*.\n**Example**\n\n```html\n\n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Reshape", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *axis* is the number of the starting axis for reshape. For example, *axis* equal 1 means that *Reshape* replaces dimensions starting from the next after the first dimension.", + "name": "axis", + "option": "required", + "type": "int32" + }, + { + "description": " *dim* is a set of numbers separated with comma, which denote the dimensions of output blob. For example, *dim* equal 88,1,71 means that output blob gets following dimensions: first dimension equals 88, second dimension equals 1, third dimension equals 71. For more information, refer to the **Description** block. If *dim* is equal to two numbers, it performs [flattening](http://caffe.berkeleyvision.org/tutorial/layers/flatten.html).", + "name": "dim", + "option": "required", + "type": "int32[]" + }, + { + "default": 1, + "description": " *num_axes* is the number of dimensions to be replaced with a reshaped blob starting from the dimension number specified in *axis* property. For example, *num_axes* equal 2 means that 2 dimensions are replaced with reshaped blob.", + "name": "num_axes", + "option": "required", + "type": "int32" + } + ], + "category": "Shape", + "description": "**Short description**: *Reshape* layer changes dimensions of the input blob according to the specified order. Input blob volume is equal to output blob volume, where volume is the product of dimensions.\n**Detailed description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/reshape.html)\n**Parameters**: *Reshape* layer parameters should be specified in the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\nIf you want to reshape input blob *BxCxHxW* into *Bx1x(C*H)xW*, the *dim* parameters of your layer should be:\n```html\n layer {\n name: \"reshape\"\n type: \"Reshape\"\n bottom: \"input\"\n top: \"output\"\n reshape_param {\n shape {\n dim: 0 # copy the dimension from below\n dim: 1\n dim: -1 # infer it from the other dimensions\n dim: 0\n }\n }\n }\n```\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Eltwise", + "schema": { + "attributes": [ + { + "default": "sum", + "description": " *operation* is the simple mathematical operation to be performed over inputs. For example, *operation* equal *mul* means that input blobs are multiplied.", + "name": "operation", + "option": "required", + "type": "string" + } + ], + "description": "**Short description**: *Eltwise* layer performs element-wise operation, which is specified in parameters, over given inputs.\n**Parameters**: *Eltwise* layer parameters should be specified in the `elementwise_data` node, which is placed as a child of the layer node.\n**Mathematical Formulation** *Eltwise* accepts 2 inputs of any number of dimensions - from 1 to 4, however, it is required for both of them to have absolutely same dimensions. The produced blob is also of the same dimension as each of its parents\n*Eltwise* does the following with the input blobs:\n\\f[\no_{i} = f(b_{i}^{1}, b_{i}^{2})\n\\f]\nwhere \\f$b_{i}^{1}\\f$ - first blob \\f$i\\f$-th element, \\f$b_{i}^{2}\\f$ - second blob \\f$i\\f$-th element, \\f$o_{i}\\f$ - output blob \\f$i\\f$-th element, \\f$f(a, b)\\f$ - is a function that performs an operation over its two arguments \\f$a, b\\f$.\n* For *sum* operation, \\f$f(a, b)\\f$ is defined as\n \\f[\n f(a,b) = a + b\n \\f]\n* For *mul* operation, \\f$f(a, b)\\f$ is defined as\n \\f[\n f(a,b) = a * b\n \\f]\n* For *max* operation, \\f$f(a, b)\\f$ is defined as\n \\f[\n f(a,b) = \\left\\{\\begin{array}{ll}\n\t\ta \\quad \\mbox{if } a \\geq b \\\\\n\t\tb \\quad \\mbox{if } b > a\n\t\\end{array}\\right. \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "ScaleShift", + "schema": { + "category": "Layer", + "attributes": [], + "description": "**Short description**: *ScaleShift* layer performs linear transformation of the input blobs. Weights denote scaling parameter, biases - a shift.\n**Parameters**: *ScaleShift* layer does not have additional parameters.\n**Mathematical Formulation**\n\\f[\no_{i} =\\gamma b_{i} + \\beta\n\\f]\n**Example**\n\n```\n\n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Crop", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *axis* is a number of a dimension to be used for cropping. For example, *axis* equal to 1 means that cropping is performed over the first dimension.", + "name": "axis", + "option": "required", + "type": " a list of unique integers, where each element is greater than or equal to 0 and less than input shape length." + }, + { + "default": 1, + "description": " *offset* denotes the starting point for crop in the input blob. For example, *offset* equal to 2 means that crop is starting from the second value in the given axis.", + "name": "offset", + "option": "required", + "type": " a list of integers of the length equal to the length of *axis* attribute. In the list, `offset[i]` is greater than or equal to 0 and less than or equal to `input_shape[axis[i]] - crop_size[axis[i]]`, where `crop_size` is the shape of the second input." + } + ], + "category": "Data", + "description": "**Short description**: *Crop* layer changes selected dimensions of the input blob according to the specified parameters.\n**Parameters**: *Crop* layer parameters should be specified in `data` section, which is placed as a child of the layer node. Due to various representation of Crop attributes in existing frameworks, this layer can be described in three independent ways: *Crop* **Type 1** layer takes two input blobs, and the shape of the second blob specifies the *Crop* size. The layer has two attributes: *axis* and *offset*. Crop layer takes two input blobs, and the shape of the second blob specifies the *Crop* size. The *Crop* layer of this type supports shape inference.\n**Inputs**\n* **1**: Multidimensional input blob *(for example, NCHW, NCH, or NC)*\n* **2**: Shape of this input will be used for crop\n**Example**\n\n```html\n\n \n \n \n 1\n 21\n 44\n 44\n \n \n 1\n 21\n 34\n 34\n \n \n \n \n 1\n 21\n 34\n 34\n \n \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "BatchNormalization", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *epsilon* is the number to be added to the variance to avoid division by zero when normalizing the value. For example, *epsilon* equal 0.001 means that 0.001 is added to the variance.", + "name": "epsilon", + "option": "required", + "type": "float32" + } + ], + "category": "Normalization", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/batchnorm.html)\n**Detailed description**: [Reference](https://kratzert.github.io/2016/02/12/understanding-the-gradient-flow-through-the-batch-normalization-layer.html)\n**Parameters**: *BatchNormalization* layer parameters should be specified as the `batch_norm_data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*BatchNormalization* is the normalization of the output in each hidden layer.\n* **Input**: Values of \\f$x\\f$ over a mini-batch:\n \\f[\n \\beta = \\{ x_{1...m} \\}\n \\f]\n* **Parameters to learn**: \\f$ \\gamma, \\beta\\f$\n* **Output**:\n \\f[\n \\{ o_{i} = BN_{\\gamma, \\beta} ( b_{i} ) \\}\n \\f]\n* **Mini-batch mean**:\n \\f[\n \\mu_{\\beta} \\leftarrow \\frac{1}{m}\\sum_{i=1}^{m}b_{i}\n \\f]\n* **Mini-batch variance**:\n \\f[\n \\sigma_{\\beta }^{2}\\leftarrow \\frac{1}{m}\\sum_{i=1}^{m} ( b_{i} - \\mu_{\\beta} )^{2}\n \\f]\n* **Normalize**:\n \\f[\n \\hat{b_{i}} \\leftarrow \\frac{b_{i} - \\mu_{\\beta}}{\\sqrt{\\sigma_{\\beta }^{2} + \\epsilon }}\n \\f]\n* **Scale and shift**:\n \\f[\n o_{i} \\leftarrow \\gamma\\hat{b_{i}} + \\beta = BN_{\\gamma ,\\beta } ( b_{i} )\n \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Normalize", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *across_spatial* is a flag that denotes if normalization is performed over CHW or HW. For example, *across_spatial* equals 0 means that normalization is not shared across channels.", + "name": "across_spatial", + "option": "required", + "type": "\n * 0\n * 1 - not supported" + }, + { + "default": 1, + "description": " *channel_shared* is a flag that denotes if scale parameters are shared across channels. For example, *channel_shared* equal 0 means that scale parameters are not shared across channels.", + "name": "channel_shared", + "option": "required", + "type": "\n * 0 - scale parameters are not shared across channels\n * 1 - not supported" + }, + { + "default": 1, + "description": " *eps* is the epsilon used to avoid division by zero when normalizing the value. For example, *eps* equals 0.001 means that 0.001 is used if all the values in normalization are equal to zero.", + "name": "eps", + "option": "required", + "type": "float32" + } + ], + "category": "Normalization", + "description": "**Short description**: *Normalize* layer performs l-p normalization of 1 of input blob.\n**Parameters**: *Normalize* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[\no_{i} = \\sum_{i}^{H*W}\\frac{\\left ( n*C*H*W \\right )* scale}{\\sqrt{\\sum_{i=0}^{C*H*W}\\left ( n*C*H*W \\right )^{2}}}\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Tile", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *axis* is the index of the axis to tile. For example, *axis* equals 3 means that fourth axis is used for tiling.", + "name": "axis", + "option": "required", + "type": "int32" + }, + { + "description": " *tiles* is a size of the specified axis in the output blob. For example, *tiles* equal 88 means that output blob gets 88 copies of data from specified axis.", + "name": "tiles", + "option": "required", + "type": "int32" + } + ], + "description": "**Short description**: *Tile* layer extends input blob with copies of data along specific axis.\n**Detailed description**: [Reference](http://caffe.help/manual/layers/tile.html)\n**Parameters**: *Tile* layer parameters should be specified as the `tile_data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Tile* extends input blobs and filling in output blobs following rules:\n\\f[\nout_i=input_i[inner\\_dim*t]\n\\f]\n\\f[\nt \\in \\left ( 0, \\quad tiles \\right )\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Permute", + "schema": { + "attributes": [ + { + "description": " *order* is the set of dimensions indexes for output blob. For example, *order* equal 0,2,3,1 means that the output blob has following dimensions: first dimension from the input blob, third dimension from the input blob, fourth dimension from the input blob, second dimension from the input blob.", + "name": "order", + "option": "required", + "type": "int32[]" + } + ], + "category": "Shape", + "description": "**Short description**: *Permute* layer performs reordering of input blob dimensions.\n**Detailed description**: [Reference](http://caffe.help/manual/layers/tile.html)\n**Parameters**: *Permute* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Permute* layer performs reordering input blob. Source indexes and destination indexes are bound by formula:\n\\f[\nsrc\\_ind_{offset} = n * ordered[1] * ordered[2] * ordered[3] + (h * ordered[3] + w)\n\\f]\n\\f[\nn \\in ( 0, order[0] )\n\\f]\n\\f[\nh \\in ( 0, order[2] )\n\\f]\n\\f[\nw \\in ( 0, order[3] )\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "PriorBox", + "schema": { + "attributes": [ + { + "name": "min_size", + "option": "required", + "type": "float32" + }, + { + "name": "max_size", + "option": "required", + "type": "float32" + }, + { + "default": 1, + "description": " *aspect_ratio* is a variance of aspect ratios. Duplicate values are ignored. For example, *aspect_ratio* equal 2.000000,3.000000 means that for the first box aspect_ratio is equal to 2 and for the second box - 3.", + "name": "aspect_ratio", + "option": "required", + "type": "float32" + }, + { + "default": false, + "description": " *flip* is a flag that denotes that each *aspect_ratio* is duplicated and flipped. For example, *flip* equals 1 and *aspect_ratio* equals 3 mean that aspect_ratio is equal to 1/3.", + "name": "flip", + "option": "required", + "type": "boolean" + }, + { + "default": false, + "description": " *clip* is a flag that denotes if each value in the output blob is within [0,1]. For example, *clip* equal 1 means that each value in the output blob is within [0,1].", + "name": "clip", + "option": "required", + "type": "boolean" + }, + { + "description": " *step* is a distance between box centers. For example, *step* equal 85 means that the distance between neighborhood prior boxes centers is 85.", + "name": "step", + "option": "required", + "type": "float32" + }, + { + "default": 0.5, + "description": " *offset* is a shift of box respectively to top left corner. For example, *offset* equal 85 means that the shift of neighborhood prior boxes centers is 85.", + "name": "offset", + "option": "required", + "type": "float32" + }, + { + "description": " *variance* denotes a variance of adjusting bounding boxes. For example, *variance* equals 85 means that the shift of neighborhood prior boxes centers is 85.", + "name": "variance", + "option": "required", + "type": "float32[]" + }, + { + "default": 1, + "description": " *scale_all_sizes* is a flag that denotes type of inference. For example, *scale_all_sizes* equals 0 means that priorbox layer is inferd in MXNet-like manner. In particular, *max_size* parameter is ignored.", + "name": "scale_all_sizes", + "option": "required", + "type": "int32" + } + ], + "description": "**Short description**: *PriorBox* layer generates prior boxes of specified sizes and aspect ratios across all dimensions.\n**Parameters**: *PriorBox* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**:\n*PriorBox* computes coordinates of prior boxes by following:\n1. First calculates *center_x* and *center_y* of prior box:\n \\f[\n W \\equiv Width \\quad Of \\quad Image\n \\f]\n \\f[\n H \\equiv Height \\quad Of \\quad Image\n \\f]\n * If step equals 0:\n \\f[\n center_x=(w+0.5)\n \\f]\n \\f[\n center_y=(h+0.5)\n \\f]\n * else:\n \\f[\n center_x=(w+offset)*step\n \\f]\n \\f[\n center_y=(h+offset)*step\n \\f]\n \\f[\n w \\subset \\left( 0, W \\right )\n \\f]\n \\f[\n h \\subset \\left( 0, H \\right )\n \\f]\n2. Then, for each \\f$ s \\subset \\left( 0, min_sizes \\right ) \\f$ calculates coordinates of priorboxes:\n \\f[\n xmin = \\frac{\\frac{center_x - s}{2}}{W}\n \\f]\n \\f[\n ymin = \\frac{\\frac{center_y - s}{2}}{H}\n \\f]\n \\f[\n xmax = \\frac{\\frac{center_x + s}{2}}{W}\n \\f]\n \\f[\n ymin = \\frac{\\frac{center_y + s}{2}}{H}\n \\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "SimplerNMS", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *pre_nms_topn (post_nms_topn)* is the quantity of bounding boxes before (after) applying NMS operation. For example, *pre_nms_topn (post_nms_topn)* equals 15 means that the minimum (maximum) box size is 15.", + "name": "pre_nms_topn (post_nms_topn)", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *cls_threshold* is the minimum value of the proposal to be taken into consideration. For example, *cls_threshold* equal 0.5 means that all boxes with prediction probability less than 0.5 are filtered out.", + "name": "cls_threshold", + "option": "required", + "type": "float32" + }, + { + "default": 1, + "description": " *iou_threshold* is the minimum ratio of boxes overlapping to be taken into consideration. For example, *iou_threshold* equal 0.7 means that all boxes with overlapping ratio less than 0.7 are filtered out.", + "name": "iou_threshold", + "option": "required", + "type": "float32" + }, + { + "default": 1, + "description": " *feat_stride* is the step size to slide over boxes (in pixels). For example, *feat_stride* equal 16 means that all boxes are analyzed with the slide 16.", + "name": "feat_stride", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *min_bbox_size* is the minimum size of box to be taken into consideration. For example, *min_bbox_size* equal 35 means that all boxes with box size less than 35 are filtered out.", + "name": "min_bbox_size", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *scale* is array of scales for anchor boxes generating.", + "name": "scale", + "option": "required", + "type": "int32" + } + ], + "category": "Layer", + "description": "**Short description**: *SimplerNMS* layer performs filtering of bounding boxes and outputs only those with the highest confidence of prediction.\n**Parameters**: *SimplerNMS* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*SimplerNMS* accepts three inputs with four dimensions. Produced blob has two dimensions, the first one equals *post_nms_topn*.\n*SimplerNMS* does the following with the input blob:\n1. Generates initial anchor boxes. Left top corner of all boxes is (0, 0). Width and height of boxes are calculated based on scaled (according to the scale parameter) default widths and heights\n2. For each point in the first input blob:\n * pins anchor boxes to picture according to the second input blob, which contains four deltas for each box: for x and y of center, for width, and for height\n * finds out score in the first input blob\n3. Filters out boxes with size less than *min_bbox_size.*\n4. Sorts all proposals (*box, score*) by score from highest to lowest\n5. Takes top *pre_nms_topn* proposals\n6. Calculates intersections for boxes and filters out all with \\f$intersection/union > iou\\_threshold\\f$\n7. Takes top *post_nms_topn* proposals\n8. Returns top proposals\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "DetectionOutput", + "schema": { + "attributes": [ + { + "default": 1, + "description": " number of classes to be predicted", + "name": "num_classes", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " background label id. If there is no background class, set it to -1.", + "name": "background_label_id", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " maximum number of results to be kept on NMS stage", + "name": "top_k", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " if \"true\", variance is encoded in target. Otherwise, we need to adjust the predicted offset accordingly.", + "name": "variance_encoded_in_target", + "option": "required", + "type": " logical values" + }, + { + "default": 1, + "description": " number of total bboxes to be kept per image after NMS step. -1 means keeping all bboxes after NMS step.", + "name": "keep_top_k", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": null, + "name": "num_orient_classes", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " type of coding method for bounding boxes. caffe.PriorBoxParameter.CENTER_SIZE and others.", + "name": "code_type", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " bounding boxes are shared among different classes.", + "name": "share_location", + "option": "required", + "type": " logical values" + }, + { + "default": 1, + "description": null, + "name": "interpolate_orientation", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " threshold to be used in NMS stage", + "name": "nms_threshold", + "option": "required", + "type": "float32" + }, + { + "default": 1, + "description": " only consider detections whose confidences are larger than a threshold. If not provided, consider all boxes.", + "name": "confidence_threshold", + "option": "required", + "type": "float32" + } + ], + "description": "**Short description**: *DetectionOutput* layer performs non-maximum suppression to generate the detection output using information on location and confidence predictions.\n**Detailed description**: [Reference](https://arxiv.org/pdf/1512.02325.pdf)\n**Parameters**: *DetectionOutput* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\nAt each feature map cell, *DetectionOutput* predicts the offsets relative to the default box shapes in the cell, as well as the per-class scores that indicate the presence of a class instance in each of those boxes. Specifically, for each box out of k at a given location, *DetectionOutput* computes class scores and the four offsets relative to the original default box shape. This results in a total of \\f$(c + 4)k\\f$ filters that are applied around each location in the feature map, yielding \\f$(c + 4)kmn\\f$ outputs for a m \u00d7 n feature map.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Memory", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *id* is the id of the pair of *Memory* layers. For example, *id* equals r_27-28 means that layers with id 27 and 28 are in one pair.", + "name": "id", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *index* represents if the given layer is input or output. For example, *index* equal 0 means this layer is output one.", + "name": "index", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *size* represents the size of the group. For example, *size* equals 2 means this group is a pair.", + "name": "size", + "option": "required", + "type": "int32" + } + ], + "description": "**Short description**: *Memory* layer represents delay layer in terms of LSTM terminology. To read more about LSTM topologies please refer this [link](http://colah.github.io/posts/2015-08-Understanding-LSTMs).\n**Detailed description**: *Memory* layer saves state between two infer requests. In the topology, it is the single layer, however, in the Intermediate Representation, it is always represented as a pair of **Memory** layers. One of these layers does not have outputs and another does not have inputs (in terms of the Intermediate Representation).\n**Parameters**: *Memory* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Memory* save data from the input blob.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Clamp", + "schema": { + "attributes": [ + { + "default": 0, + "description": " *min* is the lower bound of values in the output shape. Any value in the input shape that is smaller than the bound, is replaced by the *min* value. For example, *min* equal 10 means that any value in the input shape that is smaller than the bound, is replaced by 10.", + "name": "min", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *max* is the upper bound of values in the output shape. Any value in the input shape that is greater than the bound, is replaced by the *max* value. For example, *max* equals 50 means that any value in the input shape that is greater than the bound, is replaced by 50.", + "name": "max", + "option": "required", + "type": "int32" + } + ], + "description": "**Short description**: *Clamp* layer represents clipping activation operation.\n**Detailed description**: [Reference](https://www.tensorflow.org/versions/r1.2/api_docs/MO_DG/prepare_model/python/tf/clip_by_value)\n**Parameters**: *Clamp* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Clamp* generally does the following with the input blobs:\n\\f[\nout_i=\\left\\{\\begin{array}{ll}\n\tmax\\_value \\quad \\mbox{if } \\quad input_i>max\\_value \\\\\n\tmin\\_value \\quad \\mbox{if } \\quad input_i\n\\end{array}\\right.\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "ArgMax", + "schema": { + "attributes": [ + { + "default": 1, + "description": " if *out_max_val* equals 1, output is a vector of pairs *(max_ind, max_val)*, unless axis is set. Then output is *max_val* along the specified axis.", + "name": "top_k", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " if *out_max_val* equals 1, output is a vector of pairs *(max_ind, max_val)*, unless axis is set. Then output is *max_val* along the specified axis.", + "name": "top_k", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " if set, maximizes along the specified axis, else maximizes the flattened trailing dimensions for each index of the first / num dimension.", + "name": "axis", + "option": "required", + "type": "int32" + } + ], + "description": "**Short description**: *ArgMax* layer compute the index of the *K* maximum values for each datum across all dimensions *CxHxW*.\n**Detailed description**: Intended for use after a classification layer to produce a prediction. If parameter *out_max_val* is set to \"true\", output is a vector of pairs *(max_ind, max_val)* for each image. The *axis* parameter specifies an axis along which to maximize.\n**Parameters**: *ArgMax* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*ArgMax* generally does the following with the input blobs:\n\\f[\no_{i} = \\left\\{\nx| x \\in S \\wedge \\forall y \\in S : f(y) \\leq f(x)\n\\right\\}\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "PSROIPooling", + "schema": { + "attributes": [ + { + "default": 1, + "description": " pooled output channel number", + "name": "output_dim", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " number of groups to encode position-sensitive score maps", + "name": "group_size", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " multiplicative spatial scale factor to translate ROI coordinates from their input scale to the scale used when pooling", + "name": "spatial_scale", + "option": "required", + "type": "float32" + } + ], + "category": "Pool", + "description": "**Short description**: *PSROIPooling* layer compute position-sensitive max pooling on regions of interest specified by input, takes as input N position-sensitive score maps and a list of R regions of interest.\n**Detailed description**: [Reference](https://arxiv.org/pdf/1703.06211.pdf)\n**Parameters**: *PSRoiPooling* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\nThe output value for \\f$(i, j)\\f$-th bin is obtained by summation from one score map \\f$x_{i,j}\\f$ corresponding to that bin. In short, the difference from *RoIPooling* is that a general feature map \\f$x\\f$ is replaced by a specific positive-sensitive score map \\f$x_{i,j}\\f$.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "GRN", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *bias* is added to the variance.", + "name": "bias", + "option": "required", + "type": "float32" + } + ], + "category": "Normalization", + "description": "**Short description**: *GRN* is Global Response Normalization with L2 norm (across channels only).\n**Parameters**: GRN layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*GRN* computes L2 norm by channels for input blob. *GRN* generally does the following with the input blob:\n\\f[\noutput_{i} = \\frac{input_{i}}{\\sqrt{\\sum_{i}^{C} input_{i}}}\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "PReLU", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *channel_shared* shows if negative slope shared across channels or not.", + "name": "channel_shared", + "option": "required", + "type": "int32" + }, + { + "description": " *filler_type* defines initialization type for negative slope.", + "name": "filler_type", + "option": "required", + "type": "string" + }, + { + "default": 1, + "description": " *filler_value* defines the value in constant filler.", + "name": "filler_value", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *min(max)* defines the minimal(maximal) value in uniform filler.", + "name": "min(max)", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *mean* defines the mean value in Gaussian filler.", + "name": "mean", + "option": "required", + "type": "int32" + } + ], + "category": "Activation", + "description": "**Short description**: *PReLU* is the Parametric Rectifier Linear Unit. The difference from *ReLU* is that negative slopes can vary across channels.\n**Parameters**: *PReLU* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*PReLU* accepts one input with four dimensions. The produced blob has the same dimensions as input.\n*PReLU* does the following with the input blob:\n\\f[\no_{i} = max(0, x_{i}) + w_{i} * min(0,x_{i})\n\\f]\nwhere \\f$w_{i}\\f$ is from weights blob.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "RegionYolo", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *coords* is num coordinates for each region", + "name": "coords", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *classes* is num classes for each region", + "name": "classes", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *num* is number of regions", + "name": "num", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *do_softmax* is a flag which specifies the method of infer", + "name": "do_softmax", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *anchors* coordinates regions", + "name": "anchors", + "option": "required", + "type": "float32[]" + }, + { + "default": 1, + "description": " *mask* specifies which anchors to use", + "name": "mask", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *mask* specifies which anchors to use", + "name": "mask", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *axis* is the number of the dimension from which flattening is performed. For example, *axis* equals 1 means that flattening is started from the 1st dimension.", + "name": "axis", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *end_axis* is the number of the dimension on which flattening is ended. For example, *end_axis* equals -1 means that flattening is performed till the last dimension.", + "name": "end_axis", + "option": "required", + "type": "int32" + } + ], + "category": "Layer", + "description": "**Short description**: *RegionYolo* computes coordinates of regions with probability for each class.\n**Detailed description**: [Reference][p_yolo]\n**Parameters**: *RegionYolo* layer parameters should be specified as the `data` node, which is a child of the `layer` node.\n**Example**\n\n```html\n\n \n ... \n ... \n \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "ReorgYolo", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *stride* is distance of cut throws in output blobs.", + "name": "stride", + "option": "required", + "type": "int32" + } + ], + "category": "Layer", + "description": "**Short description**: *ReorgYolo* reorganizes input blob taking into account strides.\n**Detailed description**: [Reference][p_yolo]\n**Parameters**: *ReorgYolo* layer parameters should be specified as the `data` node, which is a child of the `layer` node.\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "PriorBoxClustered", + "schema": { + "attributes": [ + { + "description": " *width* is a parameter that specifies desired boxes widths in pixels.", + "name": "width", + "option": "required", + "type": "float32[]" + }, + { + "name": "height", + "option": "required", + "type": "float32[]" + }, + { + "default": false, + "description": " *clip* is a flag that denotes if each value in the output blob is within [0,1]. For example, *clip* equal 1 means that each value in the output blob is within [0,1].", + "name": "clip", + "option": "required", + "type": "boolean" + }, + { + "default": false, + "description": " *flip* is a flag that denotes whether the list of boxes is augmented with the flipped ones.", + "name": "flip", + "option": "required", + "type": "boolean" + }, + { + "description": " *step* is a distance between box centers. For example, *step* equal 85 means that the distance between neighborhood prior boxes centers is 85.", + "name": "step", + "option": "required", + "type": "float32" + }, + { + "name": "step_w", + "option": "required", + "type": "float32" + }, + { + "name": "step_h", + "option": "required", + "type": "float32" + }, + { + "default": 1, + "description": " *offset* is a shift of box respectively to top left corner. For example, *offset* equal 85 means that the shift of neighborhood prior boxes centers is 85.", + "name": "offset", + "option": "required", + "type": "float32" + }, + { + "description": " *variance* denotes a variance of adjusting bounding boxes. For example, *variance* equal 85 means that the shift of neighborhood prior boxes centers is 85.", + "name": "variance", + "option": "required", + "type": "float32[]" + }, + { + "description": " *img_h* specifies height of input image. These parameters are calculated unless provided explicitly.", + "name": "img_h", + "option": "required", + "type": "float32" + }, + { + "name": "img_w", + "option": "required", + "type": "float32" + } + ], + "description": "**Short description**: *PriorBoxClustered* layer generates prior boxes of specified sizes.\n**Parameters**: *PriorBoxClustered* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*PriorBoxClustered* computes coordinates of prior boxes by following:\n1. Calculates the *center_x* and *center_y* of prior box:\n \\f[\n W \\equiv Width \\quad Of \\quad Image\n \\f]\n \\f[\n H \\equiv Height \\quad Of \\quad Image\n \\f]\n \\f[\n center_x=(w+offset)*step\n \\f]\n \\f[\n center_y=(h+offset)*step\n \\f]\n \\f[\n w \\subset \\left( 0, W \\right )\n \\f]\n \\f[\n h \\subset \\left( 0, H \\right )\n \\f]\n2. For each \\f$s \\subset \\left( 0, W \\right )\\f$ calculates the prior boxes coordinates:\n \\f[\n xmin = \\frac{center_x - \\frac{width_s}{2}}{W}\n \\f]\n\t\\f[\n\tymin = \\frac{center_y - \\frac{height_s}{2}}{H}\n\t\\f]\n\t\\f[\n\txmax = \\frac{center_x - \\frac{width_s}{2}}{W}\n\t\\f]\n\t\\f[\n\tymax = \\frac{center_y - \\frac{height_s}{2}}{H}\n\t\\f]\nIf *clip* is defined, the coordinates of prior boxes are recalculated with the formula:\n\\f$coordinate = \\min(\\max(coordinate,0), 1)\\f$\n**Example**\n\n```html\n\n \n \n ...\n \n \n ...\n \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "MVN", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *across_channels* is a flag that denotes if mean values are shared across channels. For example, *across_channels* equal 0 means that mean values are not shared across channels.", + "name": "across_channels", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *normalize_variance* is a flag that denotes whether to perform variance normalization.", + "name": "normalize_variance", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *eps* is the number to be added to the variance to avoid division by zero when normalizing the value. For example, *epsilon* equal 0.001 means that 0.001 is added to the variance.", + "name": "eps", + "option": "required", + "type": "float32" + } + ], + "category": "Normalization", + "description": "**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/mvn.html)\n**Parameters**: *MVN* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*MVN* subtracts mean from the input blob:\n\\f[\no_{i} = i_{i} - \\frac{\\sum{i_{k}}}{C * H * W}\n\\f]\nIf *normalize_variance* is set to 1, the output blob is divided by variance:\n\\f[\no_{i}=\\frac{o_{i}}{\\sum \\sqrt {o_{k}^2}+\\epsilon}\n\\f]\n**Example**\n\n```html\n\n \n \n ...\n \n \n ...\n \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "CTCGreadyDecoder", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *ctc_merge_repeated* is a flag for collapsing the repeated labels during the ctc calculation.", + "name": "ctc_merge_repeated", + "option": "required", + "type": "int32" + } + ], + "category": "Layer", + "description": "**Short description**: *CTCGreadyDecoder* performs greedy decoding on the logits given in input (best path).\n**Detailed description**: [Reference](https://www.tensorflow.org/api_docs/python/tf/nn/ctc_greedy_decoder)\n**Parameters**: *CTCGreadyDecoder* layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\nGiven an input sequence \\f$X\\f$ of length \\f$T\\f$, *CTCGreadyDecoder* assumes the probability of a length \\f$T\\f$ character sequence \\f$C\\f$ is given by\n\\f[\np(C|X) = \\prod_{t=1}^{T} p(c_{t}|X)\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Proposal", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *pre_nms_topn (post_nms_topn)* is the quantity of bounding boxes before (after) applying NMS operation. For example, *pre_nms_topn (post_nms_topn)* equal 15 means that the minimum (maximum) box size is 15.", + "name": "pre_nms_topn (post_nms_topn)", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *nms_thresh* is the minimum value of the proposal to be taken into consideration. For example, *nms_thresh* equal 0.5 means that all boxes with prediction probability less than 0.5 are filtered out.", + "name": "nms_thresh", + "option": "required", + "type": "float32" + }, + { + "default": 1, + "description": " *feat_stride* is the step size to slide over boxes (in pixels). For example, *feat_stride* equal 16 means that all boxes are analyzed with the slide 16.", + "name": "feat_stride", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *min_size* is the minimum size of box to be taken into consideration. For example, *min_size* equal 35 means that all boxes with box size less than 35 are filtered out.", + "name": "min_size", + "option": "required", + "type": "int32" + }, + { + "default": 1, + "description": " *ratio* is the ratios for anchor generation.", + "name": "ratio", + "option": "required", + "type": "float32[]" + }, + { + "default": 1, + "description": " *ratio* is the ratios for anchor generation.", + "name": "ratio", + "option": "required", + "type": "float32[]" + }, + { + "default": 1, + "description": " *scale* is the scales for anchor generation.", + "name": "scale", + "option": "required", + "type": "float32[]" + } + ], + "category": "Layer", + "description": "**Short description**: *Proposal* layer performs filtering of only those bounding boxes and outputs with the highest confidence of prediction.\n**Parameters**: Proposal layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n*Proposal* layer accepts three inputs with four dimensions. The produced blob has two dimensions: first one equals *batch_size * post_nms_topn*.\n*Proposal* does the following with the input blob:\n1. Generates initial anchor boxes Left top corner of all boxes in (0, 0). Width and height of boxes are calculated from *base_size* with scale and ratio parameters\n2. For each point in the first input blob:\n * pins anchor boxes to the image according to the second input blob that contains four deltas for each box: for *x* and *y* of center, for *width* and for *height*\n * finds out score in the first input blob\n3. Filters out boxes with size less than *min_size*\n4. Sorts all proposals (*box*, *score*) by score from highest to lowest\n5. Takes top *pre_nms_topn* proposals\n6. Calculates intersections for boxes and filter out all with \\f$intersection/union > nms\\_thresh\\f$\n7. Takes top *post_nms_topn* proposals\n8. Returns top proposals\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Resample", + "schema": { + "attributes": [ + { + "default": 1, + "description": " *type* parameter specifies type of blob interpolation.", + "name": "type", + "option": "required", + "type": "\n * *LINEAR* - linear blob interpolation\n * *CUBIC* - cubic blob interpolation\n * *NEAREST* - nearest-neighbor blob interpolation" + }, + { + "default": 1, + "description": " *antialias* is a flag that denotes whether to perform anti-aliasing.", + "name": "antialias", + "option": "required", + "type": "\n * 0 - anti-aliasing is not performed\n * 1 - anti-aliasing is performed" + } + ], + "category": "Layer", + "description": "**Short description**: *Resample* layer scales the input blob by the specified parameters.\n**Parameters**: Resample layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Example**\n\n```html\n\n \n \n ...\n \n \n ...\n \n\u200b\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Power", + "schema": { + "attributes": [], + "description": "**Short description**: *Power* layer computes the output as (shift + scale * x) ^ power for each input element x.\n**Parameters**: Power layer parameters should be specified as the `data` node, which is a child of the layer node.\n**Mathematical Formulation**\n\\f[\np = (shift + scale * x)^{power}\n\\f]\n**Example**\n\n```html\n\n \n ... \n ... \n\n```", + "inputs": null, + "outputs": null, + "support_level": "default" + } + }, + { + "name": "Flatten", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "axis", "type": "int32" }, + { "name": "end_axis", "type": "int32", "default": -1 } + ] + } + }, + { + "name": "Pad", + "schema": { + "category": "Tensor", + "attributes": [ + { "name": "pad_value", "type": "float32" }, + { "name": "pads_begin", "type": "int32[]" }, + { "name": "pads_end", "type": "int32[]" }, + { "name": "pad_mode" } + ] + } + }, + { + "name": "GRUCell", + "schema": { + "category": "Layer" + } + }, + { + "name": "LSTMCell", + "schema": { + "category": "Layer" + } + }, + { + "name": "MaxPool", + "schema": { + "category": "Pool" + } + }, + { + "name": "Transpose", + "schema": { + "category": "Transform" + } + }, + { + "name": "Squeeze", + "schema": { + "category": "Transform" + } + }, + { + "name": "Unsqueeze", + "schema": { + "category": "Transform" + } + }, + { + "name": "Gather", + "schema": { + "category": "Transform" + } + } +] \ No newline at end of file diff --git a/frontend/packages/core/public/netron/openvino.js b/frontend/packages/core/public/netron/openvino.js new file mode 100644 index 00000000..a6a5a892 --- /dev/null +++ b/frontend/packages/core/public/netron/openvino.js @@ -0,0 +1,1113 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var openvino = openvino || {}; +var base = base || require('./base'); +var long = long || { Long: require('long') }; + +openvino.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'xml') { + if (context.text.includes(' 6 && signature.every((v, i) => v == buffer[i])) { + return false; + } + if (buffer.length > 4) { + const signature = buffer[0] | buffer[1] << 8 | buffer[2] << 16 | buffer [3] << 24; + if (signature === 0x00000000 || signature === 0x00000001 || + signature === 0x01306B47 || signature === 0x000D4B38 || signature === 0x0002C056) { + return false; + } + } + return true; + } + return false; + } + + open(context, host) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + switch (extension) { + case 'xml': + return context.request(identifier.substring(0, identifier.length - 4) + '.bin', null).then((bin) => { + return this._openModel(identifier, host, context.text, bin); + }).catch(() => { + return this._openModel(identifier, host, context.text, null); + }); + case 'bin': + return context.request(identifier.substring(0, identifier.length - 4) + '.xml', 'utf-8').then((xml) => { + return this._openModel(identifier, host, xml, context.buffer); + }).catch((error) => { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new openvino.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + }); + } + } + + _openModel(identifier, host, xml, bin) { + return openvino.Metadata.open(host).then((metadata) => { + try { + let errors = false; + const parser = new DOMParser({ errorHandler: () => { errors = true; } }); + const xmlDoc = parser.parseFromString(xml, 'text/xml'); + if (errors || xmlDoc.documentElement == null || xmlDoc.getElementsByTagName('parsererror').length > 0) { + throw new openvino.Error("File format is not OpenVINO."); + } + if (!xmlDoc.documentElement || xmlDoc.documentElement.nodeName != 'net') { + throw new openvino.Error("File format is not OpenVINO IR."); + } + const net = openvino.XmlReader.read(xmlDoc.documentElement); + const model = new openvino.Model(metadata, net, bin); + if (net.disconnectedLayers) { + host.exception(new openvino.Error("Graph contains not connected layers " + JSON.stringify(net.disconnectedLayers) + " in '" + identifier + "'.")); + } + return model; + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new openvino.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + } +}; + +openvino.Model = class { + + constructor(metadata, net, bin) { + this._name = net.name || ''; + this._graphs = [ new openvino.Graph(metadata, net, bin) ]; + } + + get name() { + return this._name; + } + + get format() { + return 'OpenVINO IR'; + } + + get graphs() { + return this._graphs; + } + +}; + +openvino.Graph = class { + + constructor(metadata, net, bin) { + this._name = net.name || ''; + this._nodes = []; + this._inputs = []; + this._outputs = []; + this._arguments = {}; + + for (const layer of this._const(net.layers, net.edges)) { + const inputs = layer.inputs.map((input) => this._argument(layer.id, layer.precision, input, net.edges)); + const outputs = layer.outputs.map((output) => this._argument(layer.id, output.precision || layer.precision, output, null)); + switch (layer.type) { + case 'Input': { + const name = layer.name || ''; + // precision is a part of OpenVINO IR layers of IR v6 and earlier + // in IR v7 and newer the port is no longer an attribute of the layer but of each output port + // IR input is not just a placeholder, it is conceptually the legitimate layer + // in order not to break compatibility with the overall approach + // with openvino.Parameter for inputs and openvino.Node for outputs + // input openvino.Node would be stored as an optional attribute of openvino.Parameter + this._inputs.push(new openvino.Parameter(name, outputs)); + break; + } + default: { + this._nodes.push(new openvino.Node(this, metadata, bin, layer, inputs, outputs)); + break; + } + } + } + + this._replaceTensorIteratorWithSubgraph(metadata, bin, net.layers, net.edges); + delete this._arguments; + + // Validation + // all graph elements are split between inputs and nodes + // by definition IR is a graph can have inputs of two types: "Input" and "Const" + // "Input" layers are already moved to inputs when we parse a graph + // if there are any layers that do not have input arguments and they are no Const ones + // this means that this graph was not properly processed by the graph building logic + const outputSet = new Set(); + for (const node of this._nodes) { + for (const output of node.outputs) { + for (const argument of output.arguments) { + outputSet.add(argument.name); + } + } + } + for (const input of this.inputs) { + for (const argument of input.arguments) { + outputSet.add(argument.name); + } + } + const nodesWithNonExistentInputs = new Set(); + for (const node of this._nodes) { + for (const input of node.inputs) { + for (const argument of input.arguments) { + if (!argument.initializer && !outputSet.has(argument.name)) { + nodesWithNonExistentInputs.add(node); + } + } + } + } + if (nodesWithNonExistentInputs.size !== 0){ + net.disconnectedLayers = Array.from(nodesWithNonExistentInputs).map((node) => node.name); + } + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + _argument(layer, precision, port, map) { + let id = layer + ':' + port.id; + if (map) { + id = map[id]; + } + let argument = this._arguments[id]; + if (!argument) { + const shape = port.dims.length == 0 ? null : new openvino.TensorShape(port.dims); + argument = new openvino.Argument(id, new openvino.TensorType(precision, shape), null); + } + return argument; + } + + _replaceTensorIteratorWithSubgraph(metadata, bin, layers, edges) { + const tensorIteratorLayers = layers.filter((node) => node.type === 'TensorIterator'); + for (const tensorIteratorLayer of tensorIteratorLayers) { + const singleTensorIteratorNodeId = tensorIteratorLayer.id; + const tiNode = this._nodes.find((n) => n._id === singleTensorIteratorNodeId); + const iteratorLayers = tensorIteratorLayer.body.layers; + const iteratorEdgeMap = tensorIteratorLayer.body.edges; + const iteratorBackEdgesMap = tensorIteratorLayer.back_edges; + const iteratorAllEdges = Object.assign({}, iteratorEdgeMap, iteratorBackEdgesMap); + const mappingForNestedIR = tensorIteratorLayer.port_map; + for (const nestedLayer of this._const(iteratorLayers, iteratorAllEdges, iteratorBackEdgesMap)) { + const inputs = nestedLayer.inputs.map((input) => this._argument(nestedLayer.id, nestedLayer.precision, input, iteratorAllEdges)); + const outputs = nestedLayer.outputs.map((output) => this._argument(nestedLayer.id, nestedLayer.precision || output.precision, output, null)); + const nestedNode = new openvino.Node(this, metadata, bin, nestedLayer, inputs, outputs); + nestedNode._id = singleTensorIteratorNodeId + '_' + nestedLayer.id; + for (const input of nestedNode._inputs) { + for (const input_argument of input.arguments) { + // we had a argument with id: 0:1 - meaning from layer "0" and its port "1" + // now as we rename all internal nodes to have an id of the TI included + // e.g. internal layer with id "0" and TI with id "14" results in internal layer to get id "14_0" + if (input_argument.name){ + input_argument._name = singleTensorIteratorNodeId + '_' + input_argument.name; + } + } + } + + for (const output of nestedNode._outputs) { + for (const output_argument of output.arguments) { + // we had a argument with id: 1:1 - meaning from me with id "1" and my port "1" + // now as we rename all internal nodes to have an id of the TI included + // e.g. my layer with id "1" and TI with id "14" results in internal layer to get id "14_1" + if (output_argument.name){ + output_argument._name = singleTensorIteratorNodeId + '_' + output_argument.name; + } + } + } + + this._nodes.push(nestedNode); + } + + // We know for sure that edges that appeared in the nested IR are not aware of the external context + for (const nestedInput of mappingForNestedIR.input) { + const nestedNode = this._nodes.find((n) => n._id === singleTensorIteratorNodeId + '_' + nestedInput.internal_layer_id); + + const candidate_edge = edges[singleTensorIteratorNodeId + ':' + nestedInput.external_port_id]; + if (candidate_edge) { + const parts = candidate_edge.split(':'); + const parentLayerID = parts[0]; + const parentPortID = parts[1]; + const parentNode = this._nodes.find((n) => n._id === parentLayerID); + if (!parentNode) { + // its parent is a TensorIterator that was removed on the previous cycle + // information is still present in the inputs of the current TensorIterator node + const potentialParentInput = tiNode._inputs.find((tiInput) => tiInput._name === 'input'); + if (!potentialParentInput) { + return; + } + const inputWithoutId = nestedNode._inputs.find((input) => { + return Boolean(input.arguments.find((argument) => !argument.name)); + }); + if (inputWithoutId) { + const argumentWithoutId = inputWithoutId.arguments.find((argument) => !argument.name); + if (argumentWithoutId){ + argumentWithoutId._name = potentialParentInput.arguments[0].name; + } + } + } + else { + if (!nestedNode._inputs){ + throw new openvino.Error("Tensor Iterator node with name '" + nestedNode._id + "' does not have inputs."); + } + + const newId = parentLayerID + ':' + parentPortID; + const inputWithoutId = nestedNode._inputs.find((input) => { + return Boolean(input.arguments.find((argument) => !argument.name)); + }); + if (inputWithoutId) { + const argumentWithoutId = inputWithoutId._arguments.find((argument) => !argument._name); + if (argumentWithoutId){ + argumentWithoutId._name = newId; + } + } + else { + // TODO: no tensor information in the new argument - passed as null for now + nestedNode._inputs.push(new openvino.Parameter((nestedNode._inputs.length + 1).toString(), [ + new openvino.Argument(newId, null, null) + ])); + } + } + } + } + + for (const nestedOutput of mappingForNestedIR.output) { + const nestedNode = this._nodes.find((n) => n._id === `${singleTensorIteratorNodeId}_${nestedOutput.internal_layer_id}`); + const toEdge = singleTensorIteratorNodeId + ':' + nestedOutput.external_port_id; + const candidate_edges = Object.keys(edges).filter((key) => edges[key] === toEdge); + for (const candidate_edge of candidate_edges) { + const childLayerID = candidate_edge.split(':')[0]; + const child = this._nodes.find((layer) => layer._id === childLayerID); + if (!child._inputs || (child._inputs && child._inputs.length === 0)){ + continue; + } + if (nestedNode._outputs && nestedNode._outputs[0]) { + for (const child_input of child._inputs) { + for (const argument of child_input._arguments) { + if (!argument.name || (argument.name && argument.name.split(':')[0] !== singleTensorIteratorNodeId)) { + continue; + } + const myPort = nestedNode.outputs[0].arguments[0].name.split(':')[1]; + argument._name = nestedNode.id + ':' + myPort; + } + } + } + } + } + + this._nodes = this._nodes.filter((node) => node.id !== tensorIteratorLayer.id); + } + } + + _const(layers, edges, back_edges) { + const results = []; + back_edges = back_edges || {}; + layers = layers.slice(); + for (const layer of layers) { + if (layer.type === 'Const' && layer.inputs.length === 0 && layer.outputs.length === 1 && + layer.blobs.length === 0 && layer.data && layer.data.length > 3) { + const data = {}; + for (const attribute of layer.data) { + data[attribute.name] = attribute.value; + } + if (data['element_type'] && data['offset'] && data['size']) { + const element_type = data['element_type']; + let precision = null; + switch (element_type) { + case 'f16': precision = 'FP16'; break; + case 'f32': precision = 'FP32'; break; + default: precision = element_type.toUpperCase(); + } + const shape = data['shape'] ? data['shape'].split(',').map((dim) => parseInt(dim.trim(), 10)) : null; + layer.data = []; + layer.blobs.push({ name: 'custom', precision: precision, offset: parseInt(data['offset'], 10), size: parseInt(data['size'], 10), shape: shape }); + } + } + if (layer.type === 'Const' && layer.blobs.length === 1 && !layer.blobs[0].shape && + layer.inputs.length === 0 && layer.outputs.length === 1 && layer.outputs[0].dims) { + layer.blobs[0].shape = layer.outputs[0].dims; + } + } + + const constMap = new Map(); + for (const layer of layers) { + if (layer.type === 'Const' && layer.inputs.length === 0 && layer.outputs.length === 1) { + const from = layer.id + ':' + layer.outputs[0].id; + constMap.set(from, { layer: layer, counter: 0 }); + } + } + for (const to of Object.keys(edges)) { + const from = edges[to]; + if (constMap.has(from)) { + constMap.get(from).counter++; + } + } + if (back_edges) { + for (const to of Object.keys(back_edges)) { + const from = back_edges[to]; + if (constMap.has(from)) { + constMap.get(from).counter++; + } + } + } + for (const pair of constMap) { + if (pair[1].counter !== 1) { + constMap.delete(pair[0]); + } + } + for (const layer of layers) { + if (layer.blobs.length === 0) { + for (let i = layer.inputs.length - 1; i > 0; i--) { + const input = layer.inputs[i]; + const to = layer.id + ':' + input.id; + const from = edges[to] || back_edges[to]; + if (!constMap.has(from)) { + break; + } + const constLayer = constMap.get(from).layer; + const blob = constLayer.blobs[0]; + if (blob) { + blob.id = constLayer.name || constLayer.id; + blob.kind = 'Const'; + layer.blobs.push(blob); + layer.inputs.splice(i, 1); + constMap.get(from).layer = null; + constMap.get(from).delete = true; + } + } + } + } + + while (layers.length > 0) { + const layer = layers.shift(); + if (layer.type === 'Const' && layer.inputs.length === 0 && layer.outputs.length === 1) { + const from = layer.id + ':' + layer.outputs[0].id; + if (constMap.has(from) && constMap.get(from).delete) { + continue; + } + } + results.push(layer); + } + + return results; + } +}; + +openvino.Node = class { + + constructor(graph, metadata, bin, layer, inputs, outputs) { + this._metadata = metadata; + this._type = layer.type; + this._name = layer.name || ''; + this._id = layer.id; + this._inputs = []; + this._outputs = []; + this._initializers = []; + this._attributes = []; + const precision = layer.precision; + let inputIndex = 0; + for (const input of inputs) { + const inputName = (inputIndex == 0) ? 'input' : inputIndex.toString(); + this._inputs.push(new openvino.Parameter(inputName, [ input ])); + inputIndex++; + } + let outputIndex = 0; + for (const output of outputs) { + const outputName = (outputIndex == 0) ? 'output' : outputIndex.toString(); + this._outputs.push(new openvino.Parameter(outputName, [ output ])); + outputIndex++; + } + const attributes = {}; + for (const attribute of layer.data) { + attributes[attribute.name] = attribute.value; + const attributeSchema = metadata.attribute(this.type, attribute.name); + this._attributes.push(new openvino.Attribute(attributeSchema, attribute.name, attribute.value)); + } + for (const blob of layer.blobs) { + const name = blob.name; + const offset = blob.offset; + const size = blob.size; + const data = (bin && (offset + size) <= bin.length) ? bin.slice(offset, offset + size) : null; + let dimensions = blob.shape || null; + const kind = blob.kind || 'Blob'; + const id = blob.id || ''; + const dataType = blob.precision || precision; + const precisionMap = { + 'FP16': 2, 'FP32': 4, + 'I8': 1, 'I16': 2, 'I32': 4, 'I64': 8, + 'U8': 1, 'U16': 2, 'U32': 4, 'U64': 8 + }; + const itemSize = precisionMap[dataType]; + if (itemSize) { + switch (this._type + ':' + name) { + case 'FullyConnected:weights': { + const outSize = parseInt(attributes['out-size'], 10); + dimensions = [ size / (outSize * itemSize), outSize ]; + break; + } + case 'FullyConnected:biases': { + dimensions = [ parseInt(attributes['out-size'], 10) ]; + break; + } + case 'Convolution:weights': + case 'Deconvolution:weights': { + const c = this.inputs[0].arguments[0].type.shape.dimensions[1]; + const group = parseInt(attributes['group'] || '1', 10); + const kernel = attributes['kernel-x'] && attributes['kernel-y'] ? + [ parseInt(attributes['kernel-x'], 10), parseInt(attributes['kernel-y'], 10) ] : + attributes['kernel'].split(',').map((v) => parseInt(v.trim(), 10)); + const n = parseInt(attributes['output'], 10); + dimensions = [ Math.floor(c / group), n ].concat(kernel); + break; + } + case 'ScaleShift:weights': + case 'ScaleShift:biases': + case 'Convolution:biases': + case 'Normalize:weights': + case 'PReLU:weights': { + dimensions = [ Math.floor(size / itemSize) ]; + break; + } + case 'Const:custom': { + if (this._outputs.length > 0 && + this._outputs[0].arguments.length > 0 && + this._outputs[0].arguments[0].type && + this._outputs[0].arguments[0].type.shape && + this._outputs[0].arguments[0].type.shape.dimensions) { + dimensions = this._outputs[0].arguments[0].type.shape.dimensions; + } + break; + } + } + } + const shape = dimensions ? new openvino.TensorShape(dimensions) : null; + this._initializers.push(new openvino.Parameter(name, [ + new openvino.Argument(id, null, new openvino.Tensor(dataType, shape, data, kind)) + ])); + } + } + + get id() { + return this._id; + } + + get name() { + return this._name; + } + + get device() { + return this._device || ''; + } + + get type() { + return this._type; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs.concat(this._initializers); + } + + get outputs() { + return this._outputs; + } +}; + +openvino.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +openvino.Argument = class { + + constructor(name, type, initializer) { + // if (typeof name !== 'string') { + // throw new openvino.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + // } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +openvino.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + this._value = value; + if (schema) { + if (Object.prototype.hasOwnProperty.call(schema, 'type')) { + this._type = schema.type; + switch (schema.type) { + case 'boolean': + switch (value) { + case '1': + case 'true': + this._value = true; + break; + case '0': + case 'false': + this._value = false; + break; + } + break; + case 'int32': { + const intValue = Number.parseInt(this._value, 10); + this._value = Number.isNaN(this._value - intValue) ? value : intValue; + break; + } + case 'float32': + case 'float64': { + const floatValue = Number.parseFloat(this._value); + this._value = Number.isNaN(this._value - floatValue) ? value : floatValue; + break; + } + case 'int32[]': + if (this._value.length > 2) { + let ints = []; + this._value.split(',').map((item) => { + item = item.trim(); + const intValue = Number.parseInt(item, 10); + if (Number.isNaN(item - intValue)) { + ints = null; + } + else if (ints != null) { + ints.push(intValue); + } + }); + if (ints != null) { + this._value = ints; + } + } + break; + case 'float32[]': + if (this._value.length > 2) { + let floats = []; + this._value.split(',').map((item) => { + item = item.trim(); + const floatValue = Number.parseFloat(item); + if (Number.isNaN(item - floatValue)) { + floats = null; + } + else if (floats != null) { + floats.push(floatValue); + } + }); + if (floats != null) { + this._value = floats; + } + } + break; + } + } + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && schema.visible == false) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + let defaultValue = schema.default; + if (this._value == defaultValue) { + this._visible = false; + } + else if (Array.isArray(this._value) && Array.isArray(defaultValue)) { + defaultValue = defaultValue.slice(0, defaultValue.length); + if (defaultValue.length > 1 && defaultValue[defaultValue.length - 1] == null) { + defaultValue.pop(); + while (defaultValue.length < this._value.length) { + defaultValue.push(defaultValue[defaultValue.length - 1]); + } + } + if (this._value.every((item, index) => { return item == defaultValue[index]; })) { + this._visible = false; + } + } + } + } + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get type() { + return this._type; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +openvino.Tensor = class { + + constructor(precision, shape, data, kind) { + this._data = data; + this._type = new openvino.TensorType(precision, shape); + this._kind = kind; + } + + get kind() { + return this._kind; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return openvino.Tensor._stringify(value, '', ' '); + } + + _context() { + const context = {}; + context.state = null; + + if (!this._data) { + context.state = 'Tensor data is empty.'; + return context; + } + + if (!this._type.shape) { + context.state = 'Tensor shape is not defined.'; + return context; + } + + context.index = 0; + context.count = 0; + context.data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + context.dataType = this._type.dataType; + context.shape = this._type.shape.dimensions; + + return context; + } + + _decode(context, dimension) { + const shape = context.shape.length == 0 ? [ 1 ] : context.shape; + const results = []; + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (this._type.dataType) { + case 'float32': + results.push(context.data.getFloat32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'float16': + results.push(context.data.getFloat16(context.index, true)); + context.index += 2; + context.count++; + break; + case 'int8': + results.push(context.data.getInt8(context.index)); + context.index += 1; + context.count++; + break; + case 'int16': + results.push(context.data.getInt16(context.index, true)); + context.index += 2; + context.count++; + break; + case 'int32': + results.push(context.data.getInt32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'int64': + results.push(new long.Long(context.data.getUint32(context.index, true), context.data.getUint32(context.index + 4, true), false)); + context.index += 8; + context.count++; + break; + case 'uint8': + results.push(context.data.getUint8(context.index)); + context.index += 1; + context.count++; + break; + case 'uint16': + results.push(context.data.getUint16(context.index, true)); + context.index += 2; + context.count++; + break; + case 'uint32': + results.push(context.data.getUint32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'uint64': + results.push(new long.Long(context.data.getUint32(context.index, true), context.data.getUint32(context.index + 4, true), true)); + context.index += 8; + context.count++; + break; + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + if (context.shape.length == 0) { + return results[0]; + } + return results; + } + + static _stringify(value, indentation, indent) { + if (Array.isArray(value)) { + const result = []; + result.push(indentation + '['); + const items = value.map((item) => openvino.Tensor._stringify(item, indentation + indent, indent)); + if (items.length > 0) { + result.push(items.join(',\n')); + } + result.push(indentation + ']'); + return result.join('\n'); + } + if (typeof value == 'string') { + return indentation + value; + } + if (value == Infinity) { + return indentation + 'Infinity'; + } + if (value == -Infinity) { + return indentation + '-Infinity'; + } + if (isNaN(value)) { + return indentation + 'NaN'; + } + return indentation + value.toString(); + } +}; + +openvino.TensorType = class { + + constructor(precision, shape) { + precision = precision ? precision.toLowerCase() : precision; + switch (precision) { + case 'f16': this._dataType = 'float16'; break; + case 'fp16': this._dataType = 'float16'; break; + case 'f32': this._dataType = 'float32'; break; + case 'fp32': this._dataType = 'float32'; break; + case 'i8': this._dataType = 'int8'; break; + case 'i16': this._dataType = 'int16'; break; + case 'i32': this._dataType = 'int32'; break; + case 'i64': this._dataType = 'int64'; break; + case 'u1': this._dataType = 'boolean'; break; + case 'u8': this._dataType = 'uint8'; break; + case 'u16': this._dataType = 'uint16'; break; + case 'u32': this._dataType = 'uint32'; break; + case 'u64': this._dataType = 'uint64'; break; + case 'bool': this._dataType = 'boolean'; break; + case '': this._dataType = '?'; break; + case null: this._dataType = '?'; break; + default: throw new openvino.Error("Unknown precision '" + JSON.stringify(precision) + "'."); + } + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + if (this._shape == null) { + return this.dataType + '[?]'; + } + return this.dataType + this._shape.toString(); + } +}; + +openvino.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return '[' + this._dimensions.join(',') + ']'; + } +}; + +openvino.Metadata = class { + + static open(host) { + if (openvino.Metadata._metadata) { + return Promise.resolve(openvino.Metadata._metadata); + } + return host.request(null, 'openvino-metadata.json', 'utf-8').then((data) => { + openvino.Metadata._metadata = new openvino.Metadata(data); + return openvino.Metadata._metadata; + }).catch(() => { + openvino.Metadata._metadata = new openvino.Metadata(null); + return openvino.Metadata._metadata; + }); + } + + constructor(data) { + this._map = new Map(); + this._attributeMap = new Map(); + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item && item.name && item.schema) { + if (this._map.has(item.name)) { + throw new openvino.Error("Duplicate metadata key '" + item.name + "'."); + } + item.schema.name = item.name; + this._map.set(item.name, item.schema); + } + } + } + } + } + + type(name) { + return this._map.get(name) || null; + } + + attribute(type, name) { + const key = type + ':' + name; + if (!this._attributeMap.has(key)) { + this._attributeMap.set(key, null); + const schema = this.type(type); + if (schema && schema.attributes) { + for (const attribute of schema.attributes) { + this._attributeMap.set(type + ':' + attribute.name, attribute); + } + } + } + return this._attributeMap.get(key); + } +}; + +openvino.XmlReader = class { + + static read(element) { + const children = (parent, name) => { + const children = []; + let child = parent.firstChild; + while (child != null) { + if (child.nodeType == 1 && child.nodeName == name) { + children.push(child); + } + child = child.nextSibling; + } + return children; + }; + const child = (parent, name) => { + const elements = children(parent, name); + if (elements.length > 1) { + throw new openvino.Error("Element '" + parent.nodeName + "' has multiple '" + name + "' elements."); + } + return elements.length > 0 ? elements[0] : null; + }; + const ports = (parent, name) => { + const elements = child(parent, name); + if (elements) { + return children(elements, 'port').map((element) => { + return { + id: element.getAttribute('id'), + precision: element.getAttribute('precision'), + dims: Array.prototype.slice.call(element.getElementsByTagName('dim')).map((dim) => parseInt(dim.textContent.trim(), 10)) + }; + }); + } + return []; + }; + const layers = (parent) => { + const elements = child(parent, 'layers'); + if (elements) { + return children(elements, 'layer').map((element) => { + const data = child(element, 'data'); + const blobs = child(element, 'blobs'); + const layer = { + id: element.getAttribute('id'), + name: element.getAttribute('name'), + type: element.getAttribute('type'), + precision: element.getAttribute('precision'), + data: !data ? [] : Array.from(data.attributes).map((attribute) => { + return { name: attribute.name, value: attribute.value}; + }), + blobs: !blobs ? [] : Array.from(blobs.childNodes).filter((node) => node.nodeType === 1).map((blob) => { + return { + name: blob.nodeName, + precision: blob.getAttribute('precision'), + offset: parseInt(blob.getAttribute('offset'), 10), + size: parseInt(blob.getAttribute('size'), 10) + }; + }), + inputs: ports(element, 'input'), + outputs: ports(element, 'output'), + }; + if (layer.type === 'TensorIterator') { + layer.back_edges = edges(element, 'back_edges'); + const body = child(element, 'body'); + if (body) { + layer.body = { + layers: layers(body), + edges: edges(body) + }; + } + const port_map = child(element, 'port_map'); + if (port_map) { + layer.port_map = { input: [], output: [] }; + for (const port of Array.from(port_map.childNodes).filter((element) => element.nodeType === 1)) { + const item = { + axis: port.getAttribute("axis"), + external_port_id: port.getAttribute("external_port_id"), + internal_layer_id: port.getAttribute("internal_layer_id"), + internal_port_id: port.getAttribute("internal_port_id") + }; + switch (port.nodeName) { + case 'input': layer.port_map.input.push(item); break; + case 'output': layer.port_map.output.push(item); break; + } + } + } + } + return layer; + }); + } + return []; + }; + const edges = (parent, name) => { + const map = {}; + const elements = child(parent, name || 'edges'); + if (elements) { + for (const element of children(elements, 'edge')) { + const fromLayer = element.getAttribute('from-layer'); + const fromPort = element.getAttribute('from-port'); + const toLayer = element.getAttribute('to-layer'); + const toPort = element.getAttribute('to-port'); + map[toLayer + ':' + toPort] = fromLayer + ':' + fromPort; + } + } + return map; + }; + return { + name: element.getAttribute('name'), + batch: element.getAttribute('batch'), + version: element.getAttribute('version'), + layers: layers(element), + edges: edges(element) + }; + } +}; + +openvino.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading OpenVINO model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = openvino.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/paddle-metadata.json b/frontend/packages/core/public/netron/paddle-metadata.json new file mode 100644 index 00000000..0f79f4a2 --- /dev/null +++ b/frontend/packages/core/public/netron/paddle-metadata.json @@ -0,0 +1,116 @@ +[ + { + "name": "conv2d", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "workspace_size_MB", "default": 4096 }, + { "name": "fuse_residual_connection", "default": false }, + { "name": "fuse_eltwise", "default": false }, + { "name": "fuse_relu", "default": false }, + { "name": "data_format", "default": "AnyLayout" }, + { "name": "groups", "default": 1 }, + { "name": "paddings", "default": [0, 0] }, + { "name": "dilations", "default": [1, 1] }, + { "name": "strides", "default": [1, 1] } + ] + } + }, + { + "name": "depthwise_conv2d", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "workspace_size_MB", "default": 4096 }, + { "name": "fuse_residual_connection", "default": false }, + { "name": "data_format", "default": "AnyLayout" }, + { "name": "groups", "default": 1 }, + { "name": "fuse_relu", "default": false } + ] + } + }, + { + "name": "relu", + "schema": { + "category": "Activation" + } + }, + { + "name": "softmax", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "data_format", "default": "AnyLayout" } + ] + } + }, + { + "name": "batch_norm", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "momentum", "default": 0.8999999761581421 }, + { "name": "epsilon", "default": 9.999999747378752e-06 }, + { "name": "fuse_with_relu", "default": false }, + { "name": "data_layout", "default": "NCHW" } + ] + } + }, + { + "name": "pool2d", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "data_format", "default": "AnyLayout" }, + { "name": "ceil_mode", "default": false }, + { "name": "global_pooling", "default": false }, + { "name": "exclusive", "default": true }, + { "name": "pooling_type", "default": "max" }, + { "name": "paddings", "default": [0, 0] } + ] + } + }, + { + "name": "elementwise_add", + "schema": { + "attributes": [ + { "name": "axis", "default": -1 } + ] + } + }, + { + "name": "concat", + "schema": { + "category": "Tensor" + } + }, + { + "name": "reshape", + "schema": { + "category": "Shape" + } + }, + { + "name": "reshape2", + "schema": { + "category": "Shape" + } + }, + { + "name": "lrn", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "alpha", "default": 9.999999747378752e-05 }, + { "name": "beta", "default": 0.75 }, + { "name": "k", "default": 1 } + ] + } + }, + { + "name": "pad2d", + "schema": { + "category": "Tensor" + } + } +] diff --git a/frontend/packages/core/public/netron/paddle-proto.js b/frontend/packages/core/public/netron/paddle-proto.js new file mode 100644 index 00000000..29b9565f --- /dev/null +++ b/frontend/packages/core/public/netron/paddle-proto.js @@ -0,0 +1,1029 @@ +/*eslint-disable block-scoped-var, id-length, no-control-regex, no-magic-numbers, no-prototype-builtins, no-redeclare, no-shadow, no-var, sort-vars*/ +(function($protobuf) { + "use strict"; + + var $Reader = $protobuf.Reader, $util = $protobuf.util; + + var $root = $protobuf.roots.paddle || ($protobuf.roots.paddle = {}); + + $root.paddle = (function() { + + var paddle = {}; + + paddle.framework = (function() { + + var framework = {}; + + framework.proto = (function() { + + var proto = {}; + + proto.Version = (function() { + + function Version(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Version.prototype.version = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + Version.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.Version(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Version; + })(); + + proto.AttrType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "INT"] = 0; + values[valuesById[1] = "FLOAT"] = 1; + values[valuesById[2] = "STRING"] = 2; + values[valuesById[3] = "INTS"] = 3; + values[valuesById[4] = "FLOATS"] = 4; + values[valuesById[5] = "STRINGS"] = 5; + values[valuesById[6] = "BOOLEAN"] = 6; + values[valuesById[7] = "BOOLEANS"] = 7; + values[valuesById[8] = "BLOCK"] = 8; + values[valuesById[9] = "LONG"] = 9; + values[valuesById[10] = "BLOCKS"] = 10; + values[valuesById[11] = "LONGS"] = 11; + return values; + })(); + + proto.OpDesc = (function() { + + function OpDesc(properties) { + this.inputs = []; + this.outputs = []; + this.attrs = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OpDesc.prototype.type = ""; + OpDesc.prototype.inputs = $util.emptyArray; + OpDesc.prototype.outputs = $util.emptyArray; + OpDesc.prototype.attrs = $util.emptyArray; + OpDesc.prototype.is_target = false; + + OpDesc.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.OpDesc(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 3: + message.type = reader.string(); + break; + case 1: + if (!(message.inputs && message.inputs.length)) + message.inputs = []; + message.inputs.push($root.paddle.framework.proto.OpDesc.Var.decode(reader, reader.uint32())); + break; + case 2: + if (!(message.outputs && message.outputs.length)) + message.outputs = []; + message.outputs.push($root.paddle.framework.proto.OpDesc.Var.decode(reader, reader.uint32())); + break; + case 4: + if (!(message.attrs && message.attrs.length)) + message.attrs = []; + message.attrs.push($root.paddle.framework.proto.OpDesc.Attr.decode(reader, reader.uint32())); + break; + case 5: + message.is_target = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("type")) + throw $util.ProtocolError("missing required 'type'", { instance: message }); + return message; + }; + + OpDesc.Attr = (function() { + + function Attr(properties) { + this.ints = []; + this.floats = []; + this.strings = []; + this.bools = []; + this.blocks_idx = []; + this.longs = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Attr.prototype.name = ""; + Attr.prototype.type = 0; + Attr.prototype.i = 0; + Attr.prototype.f = 0; + Attr.prototype.s = ""; + Attr.prototype.ints = $util.emptyArray; + Attr.prototype.floats = $util.emptyArray; + Attr.prototype.strings = $util.emptyArray; + Attr.prototype.b = false; + Attr.prototype.bools = $util.emptyArray; + Attr.prototype.block_idx = 0; + Attr.prototype.l = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Attr.prototype.blocks_idx = $util.emptyArray; + Attr.prototype.longs = $util.emptyArray; + + Attr.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.OpDesc.Attr(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.int32(); + break; + case 3: + message.i = reader.int32(); + break; + case 4: + message.f = reader.float(); + break; + case 5: + message.s = reader.string(); + break; + case 6: + if (!(message.ints && message.ints.length)) + message.ints = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.ints.push(reader.int32()); + } else + message.ints.push(reader.int32()); + break; + case 7: + if (!(message.floats && message.floats.length)) + message.floats = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.floats.push(reader.float()); + } else + message.floats.push(reader.float()); + break; + case 8: + if (!(message.strings && message.strings.length)) + message.strings = []; + message.strings.push(reader.string()); + break; + case 10: + message.b = reader.bool(); + break; + case 11: + if (!(message.bools && message.bools.length)) + message.bools = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.bools.push(reader.bool()); + } else + message.bools.push(reader.bool()); + break; + case 12: + message.block_idx = reader.int32(); + break; + case 13: + message.l = reader.int64(); + break; + case 14: + if (!(message.blocks_idx && message.blocks_idx.length)) + message.blocks_idx = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.blocks_idx.push(reader.int32()); + } else + message.blocks_idx.push(reader.int32()); + break; + case 15: + if (!(message.longs && message.longs.length)) + message.longs = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.longs.push(reader.int64()); + } else + message.longs.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("name")) + throw $util.ProtocolError("missing required 'name'", { instance: message }); + if (!message.hasOwnProperty("type")) + throw $util.ProtocolError("missing required 'type'", { instance: message }); + return message; + }; + + return Attr; + })(); + + OpDesc.Var = (function() { + + function Var(properties) { + this["arguments"] = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Var.prototype.parameter = ""; + Var.prototype["arguments"] = $util.emptyArray; + + Var.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.OpDesc.Var(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.parameter = reader.string(); + break; + case 2: + if (!(message["arguments"] && message["arguments"].length)) + message["arguments"] = []; + message["arguments"].push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("parameter")) + throw $util.ProtocolError("missing required 'parameter'", { instance: message }); + return message; + }; + + return Var; + })(); + + return OpDesc; + })(); + + proto.OpProto = (function() { + + function OpProto(properties) { + this.inputs = []; + this.outputs = []; + this.attrs = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OpProto.prototype.type = ""; + OpProto.prototype.inputs = $util.emptyArray; + OpProto.prototype.outputs = $util.emptyArray; + OpProto.prototype.attrs = $util.emptyArray; + OpProto.prototype.comment = ""; + + OpProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.OpProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.string(); + break; + case 2: + if (!(message.inputs && message.inputs.length)) + message.inputs = []; + message.inputs.push($root.paddle.framework.proto.OpProto.Var.decode(reader, reader.uint32())); + break; + case 3: + if (!(message.outputs && message.outputs.length)) + message.outputs = []; + message.outputs.push($root.paddle.framework.proto.OpProto.Var.decode(reader, reader.uint32())); + break; + case 4: + if (!(message.attrs && message.attrs.length)) + message.attrs = []; + message.attrs.push($root.paddle.framework.proto.OpProto.Attr.decode(reader, reader.uint32())); + break; + case 5: + message.comment = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("type")) + throw $util.ProtocolError("missing required 'type'", { instance: message }); + if (!message.hasOwnProperty("comment")) + throw $util.ProtocolError("missing required 'comment'", { instance: message }); + return message; + }; + + OpProto.Var = (function() { + + function Var(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Var.prototype.name = ""; + Var.prototype.comment = ""; + Var.prototype.duplicable = false; + Var.prototype.intermediate = false; + Var.prototype.dispensable = false; + + Var.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.OpProto.Var(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.comment = reader.string(); + break; + case 3: + message.duplicable = reader.bool(); + break; + case 4: + message.intermediate = reader.bool(); + break; + case 5: + message.dispensable = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("name")) + throw $util.ProtocolError("missing required 'name'", { instance: message }); + if (!message.hasOwnProperty("comment")) + throw $util.ProtocolError("missing required 'comment'", { instance: message }); + return message; + }; + + return Var; + })(); + + OpProto.Attr = (function() { + + function Attr(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Attr.prototype.name = ""; + Attr.prototype.type = 0; + Attr.prototype.comment = ""; + Attr.prototype.generated = false; + + Attr.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.OpProto.Attr(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.int32(); + break; + case 3: + message.comment = reader.string(); + break; + case 4: + message.generated = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("name")) + throw $util.ProtocolError("missing required 'name'", { instance: message }); + if (!message.hasOwnProperty("type")) + throw $util.ProtocolError("missing required 'type'", { instance: message }); + if (!message.hasOwnProperty("comment")) + throw $util.ProtocolError("missing required 'comment'", { instance: message }); + return message; + }; + + return Attr; + })(); + + return OpProto; + })(); + + proto.VarType = (function() { + + function VarType(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + VarType.prototype.type = 0; + VarType.prototype.selected_rows = null; + VarType.prototype.lod_tensor = null; + VarType.prototype.tensor_array = null; + VarType.prototype.reader = null; + VarType.prototype.tuple = null; + + VarType.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.VarType(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type = reader.int32(); + break; + case 2: + message.selected_rows = $root.paddle.framework.proto.VarType.TensorDesc.decode(reader, reader.uint32()); + break; + case 3: + message.lod_tensor = $root.paddle.framework.proto.VarType.LoDTensorDesc.decode(reader, reader.uint32()); + break; + case 4: + message.tensor_array = $root.paddle.framework.proto.VarType.LoDTensorArrayDesc.decode(reader, reader.uint32()); + break; + case 5: + message.reader = $root.paddle.framework.proto.VarType.ReaderDesc.decode(reader, reader.uint32()); + break; + case 7: + message.tuple = $root.paddle.framework.proto.VarType.Tuple.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("type")) + throw $util.ProtocolError("missing required 'type'", { instance: message }); + return message; + }; + + VarType.Type = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "BOOL"] = 0; + values[valuesById[1] = "INT16"] = 1; + values[valuesById[2] = "INT32"] = 2; + values[valuesById[3] = "INT64"] = 3; + values[valuesById[4] = "FP16"] = 4; + values[valuesById[5] = "FP32"] = 5; + values[valuesById[6] = "FP64"] = 6; + values[valuesById[19] = "SIZE_T"] = 19; + values[valuesById[20] = "UINT8"] = 20; + values[valuesById[21] = "INT8"] = 21; + values[valuesById[7] = "LOD_TENSOR"] = 7; + values[valuesById[8] = "SELECTED_ROWS"] = 8; + values[valuesById[9] = "FEED_MINIBATCH"] = 9; + values[valuesById[10] = "FETCH_LIST"] = 10; + values[valuesById[11] = "STEP_SCOPES"] = 11; + values[valuesById[12] = "LOD_RANK_TABLE"] = 12; + values[valuesById[13] = "LOD_TENSOR_ARRAY"] = 13; + values[valuesById[14] = "PLACE_LIST"] = 14; + values[valuesById[15] = "READER"] = 15; + values[valuesById[17] = "RAW"] = 17; + values[valuesById[18] = "TUPLE"] = 18; + return values; + })(); + + VarType.TensorDesc = (function() { + + function TensorDesc(properties) { + this.dims = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorDesc.prototype.data_type = 0; + TensorDesc.prototype.dims = $util.emptyArray; + + TensorDesc.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.VarType.TensorDesc(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.data_type = reader.int32(); + break; + case 2: + if (!(message.dims && message.dims.length)) + message.dims = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dims.push(reader.int64()); + } else + message.dims.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("data_type")) + throw $util.ProtocolError("missing required 'data_type'", { instance: message }); + return message; + }; + + return TensorDesc; + })(); + + VarType.LoDTensorDesc = (function() { + + function LoDTensorDesc(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LoDTensorDesc.prototype.tensor = null; + LoDTensorDesc.prototype.lod_level = 0; + + LoDTensorDesc.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.VarType.LoDTensorDesc(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor = $root.paddle.framework.proto.VarType.TensorDesc.decode(reader, reader.uint32()); + break; + case 2: + message.lod_level = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("tensor")) + throw $util.ProtocolError("missing required 'tensor'", { instance: message }); + return message; + }; + + return LoDTensorDesc; + })(); + + VarType.LoDTensorArrayDesc = (function() { + + function LoDTensorArrayDesc(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + LoDTensorArrayDesc.prototype.tensor = null; + LoDTensorArrayDesc.prototype.lod_level = 0; + + LoDTensorArrayDesc.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.VarType.LoDTensorArrayDesc(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor = $root.paddle.framework.proto.VarType.TensorDesc.decode(reader, reader.uint32()); + break; + case 2: + message.lod_level = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("tensor")) + throw $util.ProtocolError("missing required 'tensor'", { instance: message }); + return message; + }; + + return LoDTensorArrayDesc; + })(); + + VarType.ReaderDesc = (function() { + + function ReaderDesc(properties) { + this.lod_tensor = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ReaderDesc.prototype.lod_tensor = $util.emptyArray; + + ReaderDesc.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.VarType.ReaderDesc(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.lod_tensor && message.lod_tensor.length)) + message.lod_tensor = []; + message.lod_tensor.push($root.paddle.framework.proto.VarType.LoDTensorDesc.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ReaderDesc; + })(); + + VarType.Tuple = (function() { + + function Tuple(properties) { + this.element_type = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Tuple.prototype.element_type = $util.emptyArray; + + Tuple.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.VarType.Tuple(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.element_type && message.element_type.length)) + message.element_type = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.element_type.push(reader.int32()); + } else + message.element_type.push(reader.int32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return Tuple; + })(); + + return VarType; + })(); + + proto.VarDesc = (function() { + + function VarDesc(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + VarDesc.prototype.name = ""; + VarDesc.prototype.type = null; + VarDesc.prototype.persistable = false; + VarDesc.prototype.need_check_feed = false; + + VarDesc.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.VarDesc(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = $root.paddle.framework.proto.VarType.decode(reader, reader.uint32()); + break; + case 3: + message.persistable = reader.bool(); + break; + case 4: + message.need_check_feed = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("name")) + throw $util.ProtocolError("missing required 'name'", { instance: message }); + if (!message.hasOwnProperty("type")) + throw $util.ProtocolError("missing required 'type'", { instance: message }); + return message; + }; + + return VarDesc; + })(); + + proto.BlockDesc = (function() { + + function BlockDesc(properties) { + this.vars = []; + this.ops = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BlockDesc.prototype.idx = 0; + BlockDesc.prototype.parent_idx = 0; + BlockDesc.prototype.vars = $util.emptyArray; + BlockDesc.prototype.ops = $util.emptyArray; + BlockDesc.prototype.forward_block_idx = -1; + + BlockDesc.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.BlockDesc(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.idx = reader.int32(); + break; + case 2: + message.parent_idx = reader.int32(); + break; + case 3: + if (!(message.vars && message.vars.length)) + message.vars = []; + message.vars.push($root.paddle.framework.proto.VarDesc.decode(reader, reader.uint32())); + break; + case 4: + if (!(message.ops && message.ops.length)) + message.ops = []; + message.ops.push($root.paddle.framework.proto.OpDesc.decode(reader, reader.uint32())); + break; + case 5: + message.forward_block_idx = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("idx")) + throw $util.ProtocolError("missing required 'idx'", { instance: message }); + if (!message.hasOwnProperty("parent_idx")) + throw $util.ProtocolError("missing required 'parent_idx'", { instance: message }); + return message; + }; + + return BlockDesc; + })(); + + proto.CompatibleInfo = (function() { + + function CompatibleInfo(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CompatibleInfo.prototype.version = ""; + CompatibleInfo.prototype.type = 0; + + CompatibleInfo.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.CompatibleInfo(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.string(); + break; + case 2: + message.type = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("version")) + throw $util.ProtocolError("missing required 'version'", { instance: message }); + if (!message.hasOwnProperty("type")) + throw $util.ProtocolError("missing required 'type'", { instance: message }); + return message; + }; + + CompatibleInfo.Type = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "COMPATIBLE"] = 0; + values[valuesById[1] = "DEFINITELY_NOT"] = 1; + values[valuesById[2] = "POSSIBLE"] = 2; + values[valuesById[3] = "BUG_FIX"] = 3; + values[valuesById[4] = "PRECISION_CHANGE"] = 4; + return values; + })(); + + return CompatibleInfo; + })(); + + proto.OpCompatibleMap = (function() { + + function OpCompatibleMap(properties) { + this.pair = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OpCompatibleMap.prototype.pair = $util.emptyArray; + OpCompatibleMap.prototype.default_required_version = ""; + + OpCompatibleMap.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.OpCompatibleMap(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.pair && message.pair.length)) + message.pair = []; + message.pair.push($root.paddle.framework.proto.OpCompatibleMap.OpCompatiblePair.decode(reader, reader.uint32())); + break; + case 2: + message.default_required_version = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + OpCompatibleMap.OpCompatiblePair = (function() { + + function OpCompatiblePair(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OpCompatiblePair.prototype.op_name = ""; + OpCompatiblePair.prototype.compatible_info = null; + + OpCompatiblePair.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.OpCompatibleMap.OpCompatiblePair(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.op_name = reader.string(); + break; + case 2: + message.compatible_info = $root.paddle.framework.proto.CompatibleInfo.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + if (!message.hasOwnProperty("op_name")) + throw $util.ProtocolError("missing required 'op_name'", { instance: message }); + if (!message.hasOwnProperty("compatible_info")) + throw $util.ProtocolError("missing required 'compatible_info'", { instance: message }); + return message; + }; + + return OpCompatiblePair; + })(); + + return OpCompatibleMap; + })(); + + proto.ProgramDesc = (function() { + + function ProgramDesc(properties) { + this.blocks = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ProgramDesc.prototype.blocks = $util.emptyArray; + ProgramDesc.prototype.version = null; + ProgramDesc.prototype.op_compatible_map = null; + + ProgramDesc.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.paddle.framework.proto.ProgramDesc(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.blocks && message.blocks.length)) + message.blocks = []; + message.blocks.push($root.paddle.framework.proto.BlockDesc.decode(reader, reader.uint32())); + break; + case 4: + message.version = $root.paddle.framework.proto.Version.decode(reader, reader.uint32()); + break; + case 3: + message.op_compatible_map = $root.paddle.framework.proto.OpCompatibleMap.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + return ProgramDesc; + })(); + + return proto; + })(); + + return framework; + })(); + + return paddle; + })(); + + return $root; +})(protobuf); diff --git a/frontend/packages/core/public/netron/paddle.js b/frontend/packages/core/public/netron/paddle.js new file mode 100644 index 00000000..925bbee4 --- /dev/null +++ b/frontend/packages/core/public/netron/paddle.js @@ -0,0 +1,522 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var paddle = paddle || {}; +var protobuf = protobuf || require('protobufjs'); +var base = base || require('./base'); + +paddle.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (identifier == '__model__' || extension == 'paddle') { + return true; + } + return false; + } + + open(context, host) { + return host.require('./paddle-proto').then(() => { + let desc = null; + const identifier = context.identifier; + try { + paddle.proto = protobuf.roots.paddle.paddle.framework.proto; + desc = paddle.proto.ProgramDesc.decode(context.buffer); + } + catch (error) { + throw new paddle.Error("File format is not paddle.ProgramDesc (" + error.message + ") in '" + identifier + "'."); + } + return paddle.Metadata.open(host).then((metadata) => { + try { + return new paddle.Model(metadata, desc); + } + catch (error) { + host.exception(error, false); + throw new paddle.Error(error.message); + } + }); + }); + } +}; + +paddle.Model = class { + + constructor(metadata, desc) { + this._graphs = []; + for (const block of desc.blocks) { + this._graphs.push(new paddle.Graph(metadata, block)); + } + } + + get graphs() { + return this._graphs; + } + + get format() { + return 'PaddlePaddle'; + } +}; + +paddle.Graph = class { + + constructor(metadata, block) { + this._nodes = []; + this._inputs = []; + this._outputs = []; + + const initializers = {}; + const types = {}; + for (const variable of block.vars) { + if (variable.persistable && variable.type && + variable.type.type != paddle.proto.VarType.Type.FETCH_LIST && + variable.type.type != paddle.proto.VarType.Type.FEED_MINIBATCH) { + initializers[variable.name] = new paddle.Tensor(variable); + } + else { + types[variable.name] = paddle.Graph._type(variable); + } + + } + + const scope = {}; + for (let i = 0; i < block.ops.length; i++) { + for (const input of block.ops[i].inputs) { + input.arguments = input.arguments.map((argument) => scope[argument] ? scope[argument] : argument); + } + for (const output of block.ops[i].outputs) { + output.arguments = output.arguments.map((argument) => { + if (scope[argument]) { + const next = argument + '\n' + i.toString(); // custom argument id + scope[argument] = next; + return next; + } + scope[argument] = argument; + return argument; + }); + } + } + + let lastNode = null; + let lastOutput = null; + for (const op of block.ops) { + if (op.type == 'feed') { + const inputName = op.attrs.filter((attr) => attr.name == 'col')[0].i.toString(); + this._inputs.push(new paddle.Parameter(inputName, op.outputs[0].arguments.map((id) => { + return new paddle.Argument(id, types[id], null, null); + }))); + } + else if (op.type == 'fetch') { + const outputName = op.attrs.filter((attr) => attr.name == 'col')[0].i.toString(); + this._outputs.push(new paddle.Parameter(outputName, op.inputs[0].arguments.map((id) => { + return new paddle.Argument(id, types[id], null, null); + }))); + } + else { + const node = new paddle.Node(metadata, op, initializers, types); + if (op.inputs.length == 1 && op.inputs[0].arguments.length == 1 && + op.outputs.length >= 1 && op.outputs[0].arguments.length == 1 && + op.inputs[0].arguments[0].split('\n').shift() == op.outputs[0].arguments[0].split('\n').shift() && + lastNode && + lastOutput == op.inputs[0].arguments[0].split('\n').shift()) { + lastNode.chain.push(node); + } + else { + this._nodes.push(node); + lastNode = null; + lastOutput = null; + if (op.outputs.length == 1 && op.outputs[0].arguments.length == 1) { + lastNode = node; + lastOutput = op.outputs[0].arguments[0].split('\n').shift(); + } + } + } + } + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + static _type(variable) { + switch (variable.type.type) { + case paddle.proto.VarType.Type.LOD_TENSOR: + if (variable.type.lod_tensor) { + return new paddle.TensorType(variable.type.lod_tensor.tensor); + } + break; + default: + break; + } + return null; + } +}; + + +paddle.Parameter = class { + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +paddle.Argument = class { + + constructor(name, type, description, initializer) { + if (typeof name !== 'string') { + throw new paddle.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type || null; + this._description = description || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._type) { + return this._type; + } + if (this._initializer) { + return this._initializer.type; + } + return null; + } + + get description() { + return this._description; + } + + get initializer() { + return this._initializer; + } +}; + +paddle.Node = class { + + constructor(metadata, op, initializers, types) { + this._metadata = metadata; + this._type = op.type; + this._attributes = []; + this._inputs = []; + this._outputs = []; + this._chain = []; + for (const attr of op.attrs) { + const schema = metadata.attribute(this._type, this._name); + this._attributes.push(new paddle.Attribute(schema, attr)); + } + for (const input of op.inputs) { + if (input.arguments.length > 0) { + const inputArguments = input.arguments.map((argument) => new paddle.Argument(argument, types[argument.split('\n').shift()], null, initializers[argument])); + this._inputs.push(new paddle.Parameter(input.parameter, inputArguments)); + } + } + for (const output of op.outputs) { + if (output.arguments.length > 0) { + const outputArguments = output.arguments.map((argument) => new paddle.Argument(argument, types[argument.split('\n').shift()], null, null)); + this._outputs.push(new paddle.Parameter(output.parameter, outputArguments)); + } + } + this._update(this._inputs, 'X'); + this._update(this._inputs, 'Input'); + this._update(this._outputs, 'Y'); + this._update(this._outputs, 'Out'); + } + + get type() { + return this._type; + } + + get name() { + return ''; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get chain() { + return this._chain; + } + + _update(list, name) { + let item = null; + for (let i = 0; i < list.length; i++) { + if (list[i].name == name) { + item = list[i]; + list.splice(i, 1); + break; + } + } + if (item) { + list.splice(0, 0, item); + } + } +}; + +paddle.Attribute = class { + + constructor(schema, attr) { + this._name = attr.name; + this._value = '?'; + switch (attr.type) { + case paddle.proto.AttrType.STRING: + this._type = 'string'; + this._value = attr.s; + break; + case paddle.proto.AttrType.STRINGS: + this._type = 'string[]'; + this._value = attr.strings; + break; + case paddle.proto.AttrType.BOOLEAN: + this._type = 'boolean'; + this._value = attr.b; + break; + case paddle.proto.AttrType.BOOLEANS: + this._type = 'boolean[]'; + this._value = attr.bools; + break; + case paddle.proto.AttrType.FLOAT: + this._type = 'float32'; + this._value = attr.f; + break; + case paddle.proto.AttrType.FLOATS: + this._type = 'float[]'; + this._value = attr.floats; + break; + case paddle.proto.AttrType.INT: + this._type = 'int32'; + this._value = attr.i; + break; + case paddle.proto.AttrType.INTS: + this._type = 'int32[]'; + this._value = attr.ints; + break; + case paddle.proto.AttrType.LONG: + this._type = 'int64'; + break; + case paddle.proto.AttrType.LONGS: + this._type = 'int64[]'; + break; + default: + break; + } + switch (this._name) { + case 'use_mkldnn': + case 'use_cudnn': + case 'op_callstack': + case 'op_role': + case 'op_role_var': + case 'op_namescope': + case 'is_test': + this._visible = false; + break; + } + if (schema) { + if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + const defaultValue = schema.default; + const value = this._value; + if (defaultValue == value) { + this._visible = false; + } + else if (Array.isArray(value) && Array.isArray(defaultValue) && value.length == defaultValue.length) { + if (value.every((item, index) => { return item == defaultValue[index]; })) { + this._visible = false; + } + } + + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +paddle.Tensor = class { + + constructor(variable) { + this._type = paddle.Graph._type(variable); + } + + get type() { + return this._type; + } + + get state() { + return 'Tensor data not implemented.'; + } + + get value() { + return null; + } + + toString() { + return ''; + } +}; + +paddle.TensorType = class { + + constructor(desc) { + switch (desc.data_type) { + case paddle.proto.VarType.Type.INT32: + this._dataType = 'int32'; + break; + case paddle.proto.VarType.Type.INT64: + this._dataType = 'int64'; + break; + case paddle.proto.VarType.Type.FP32: + this._dataType = 'float32'; + break; + case paddle.proto.VarType.Type.FP64: + this._dataType = 'float64'; + break; + default: + this._dataType = '?'; + break; + } + this._shape = new paddle.TensorShape(desc.dims); + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + get denotation() { + return this._denotation; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +paddle.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions.map((dimension) => { + return dimension != -1 ? dimension : '?'; + }); + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return (this._dimensions && this._dimensions.length) ? ('[' + this._dimensions.join(',') + ']') : ''; + } +}; + +paddle.Metadata = class { + + static open(host) { + if (paddle.Metadata._metadata) { + return Promise.resolve(paddle.Metadata._metadata); + } + return host.request(null, 'paddle-metadata.json', 'utf-8').then((data) => { + paddle.Metadata._metadata = new paddle.Metadata(data); + return paddle.Metadata._metadata; + }).catch(() => { + paddle.Metadata._metadata = new paddle.Metadata(null); + return paddle.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + this._attributeCache = {}; + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + item.schema.name = item.name; + this._map[item.name] = item.schema; + } + } + } + } + + type(name) { + return this._map[name] || null; + } + + attribute(type, name) { + let map = this._attributeCache[type]; + if (!map) { + map = {}; + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[type] = map; + } + return map[name] || null; + } +}; + +paddle.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Error loading PaddlePaddle model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = paddle.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/pickle.js b/frontend/packages/core/public/netron/pickle.js new file mode 100644 index 00000000..4333dbe7 --- /dev/null +++ b/frontend/packages/core/public/netron/pickle.js @@ -0,0 +1,563 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var pickle = pickle || {}; + +pickle.Unpickler = class { + + constructor(buffer) { + this._reader = new pickle.Reader(buffer, 0); + } + + load(function_call, persistent_load) { + let reader = this._reader; + let marker = []; + let stack = []; + let memo = new Map(); + while (reader.position < reader.length) { + const opcode = reader.byte(); + switch (opcode) { + case pickle.OpCode.PROTO: { + const version = reader.byte(); + if (version > 4) { + throw new pickle.Error("Unsupported protocol version '" + version + "'."); + } + break; + } + case pickle.OpCode.GLOBAL: + stack.push([ reader.line(), reader.line() ].join('.')); + break; + case pickle.OpCode.STACK_GLOBAL: + stack.push([ stack.pop(), stack.pop() ].reverse().join('.')); + break; + case pickle.OpCode.PUT: { + const index = parseInt(reader.line(), 10); + memo.set(index, stack[stack.length - 1]); + break; + } + case pickle.OpCode.OBJ: { + const items = stack; + stack = marker.pop(); + stack.push(function_call(items.pop(), items)); + break; + } + case pickle.OpCode.GET: { + const index = parseInt(reader.line(), 10); + stack.push(memo.get(index)); + break; + } + case pickle.OpCode.POP: + stack.pop(); + break; + case pickle.OpCode.POP_MARK: + stack = marker.pop(); + break; + case pickle.OpCode.DUP: + stack.push(stack[stack.length-1]); + break; + case pickle.OpCode.PERSID: + stack.push(persistent_load(reader.line())); + break; + case pickle.OpCode.BINPERSID: + stack.push(persistent_load(stack.pop())); + break; + case pickle.OpCode.REDUCE: { + const items = stack.pop(); + const type = stack.pop(); + stack.push(function_call(type, items)); + break; + } + case pickle.OpCode.NEWOBJ: { + const items = stack.pop(); + const type = stack.pop(); + stack.push(function_call(type, items)); + break; + } + case pickle.OpCode.BINGET: + stack.push(memo.get(reader.byte())); + break; + case pickle.OpCode.LONG_BINGET: + stack.push(memo.get(reader.uint32())); + break; + case pickle.OpCode.BINPUT: + memo.set(reader.byte(), stack[stack.length - 1]); + break; + case pickle.OpCode.LONG_BINPUT: + memo.set(reader.uint32(), stack[stack.length - 1]); + break; + case pickle.OpCode.BININT: + stack.push(reader.int32()); + break; + case pickle.OpCode.BININT1: + stack.push(reader.byte()); + break; + case pickle.OpCode.LONG: + stack.push(parseInt(reader.line(), 10)); + break; + case pickle.OpCode.BININT2: + stack.push(reader.uint16()); + break; + case pickle.OpCode.BINBYTES: + stack.push(reader.bytes(reader.int32())); + break; + case pickle.OpCode.SHORT_BINBYTES: + stack.push(reader.bytes(reader.byte())); + break; + case pickle.OpCode.FLOAT: + stack.push(parseFloat(reader.line())); + break; + case pickle.OpCode.BINFLOAT: + stack.push(reader.float64()); + break; + case pickle.OpCode.INT: { + const value = reader.line(); + if (value == '01') { + stack.push(true); + } + else if (value == '00') { + stack.push(false); + } + else { + stack.push(parseInt(value, 10)); + } + break; + } + case pickle.OpCode.EMPTY_LIST: + stack.push([]); + break; + case pickle.OpCode.EMPTY_TUPLE: + stack.push([]); + break; + case pickle.OpCode.EMPTY_SET: + stack.push([]); + break; + case pickle.OpCode.ADDITEMS: { + const items = stack; + stack = marker.pop(); + let obj = stack[stack.length - 1]; + for (let i = 0; i < items.length; i++) { + obj.push(items[i]); + } + break; + } + case pickle.OpCode.DICT: { + const items = stack; + stack = marker.pop(); + let dict = {}; + for (let i = 0; i < items.length; i += 2) { + dict[items[i]] = items[i + 1]; + } + stack.push(dict); + break; + } + case pickle.OpCode.LIST: { + const items = stack; + stack = marker.pop(); + stack.push(items); + break; + } + case pickle.OpCode.TUPLE: { + const items = stack; + stack = marker.pop(); + stack.push(items); + break; + } + case pickle.OpCode.SETITEM: { + const value = stack.pop(); + const key = stack.pop(); + let obj = stack[stack.length - 1]; + if (obj.__setitem__) { + obj.__setitem__(key, value); + } + else { + obj[key] = value; + } + break; + } + case pickle.OpCode.SETITEMS: { + const items = stack; + stack = marker.pop(); + let obj = stack[stack.length - 1]; + for (let i = 0; i < items.length; i += 2) { + if (obj.__setitem__) { + obj.__setitem__(items[i], items[i + 1]); + } + else { + obj[items[i]] = items[i + 1]; + } + } + break; + } + case pickle.OpCode.EMPTY_DICT: + stack.push({}); + break; + case pickle.OpCode.APPEND: { + const append = stack.pop(); + stack[stack.length-1].push(append); + break; + } + case pickle.OpCode.APPENDS: { + const appends = stack; + stack = marker.pop(); + let list = stack[stack.length - 1]; + list.push.apply(list, appends); + break; + } + case pickle.OpCode.STRING: { + const str = reader.line(); + stack.push(str.substr(1, str.length - 2)); + break; + } + case pickle.OpCode.BINSTRING: + stack.push(reader.string(reader.uint32())); + break; + case pickle.OpCode.SHORT_BINSTRING: + stack.push(reader.string(reader.byte())); + break; + case pickle.OpCode.UNICODE: + stack.push(reader.line()); + break; + case pickle.OpCode.BINUNICODE: + stack.push(reader.string(reader.uint32(), 'utf-8')); + break; + case pickle.OpCode.SHORT_BINUNICODE: + stack.push(reader.string(reader.byte(), 'utf-8')); + break; + case pickle.OpCode.BUILD: { + const state = stack.pop(); + let obj = stack.pop(); + if (obj.__setstate__) { + if (obj.__setstate__.__call__) { + obj.__setstate__.__call__([ obj, state ]); + } + else { + obj.__setstate__(state); + } + } + else { + for (const p in state) { + obj[p] = state[p]; + } + } + if (obj.__read__) { + obj = obj.__read__(this); + } + stack.push(obj); + break; + } + case pickle.OpCode.MARK: + marker.push(stack); + stack = []; + break; + case pickle.OpCode.NEWTRUE: + stack.push(true); + break; + case pickle.OpCode.NEWFALSE: + stack.push(false); + break; + case pickle.OpCode.LONG1: { + const data = reader.bytes(reader.byte()); + let number = 0; + switch (data.length) { + case 0: number = 0; break; + case 1: number = data[0]; break; + case 2: number = data[1] << 8 | data[0]; break; + case 3: number = data[2] << 16 | data[1] << 8 | data[0]; break; + case 4: number = data[3] << 24 | data[2] << 16 | data[1] << 8 | data[0]; break; + default: number = Array.prototype.slice.call(data, 0); break; + } + stack.push(number); + break; + } + case pickle.OpCode.LONG4: + // TODO decode LONG4 + stack.push(reader.bytes(reader.uint32())); + break; + case pickle.OpCode.TUPLE1: + stack.push([ stack.pop() ]); + break; + case pickle.OpCode.TUPLE2: { + const b = stack.pop(); + const a = stack.pop(); + stack.push([ a, b ]); + break; + } + case pickle.OpCode.TUPLE3: { + const c = stack.pop(); + const b = stack.pop(); + const a = stack.pop(); + stack.push([ a, b, c ]); + break; + } + case pickle.OpCode.MEMOIZE: + memo.set(memo.size, stack[stack.length - 1]); + break; + case pickle.OpCode.FRAME: + reader.bytes(8); + break; + case pickle.OpCode.NONE: + stack.push(null); + break; + case pickle.OpCode.STOP: + return stack.pop(); + default: + throw new pickle.Error("Unknown opcode '" + opcode + "'."); + } + } + throw new pickle.Error('Unexpected end of file.'); + } + + read(size) { + return this._reader.bytes(size); + } + + unescape(token, size) { + const length = token.length; + const a = new Uint8Array(length); + if (size && size == length) { + for (let p = 0; p < size; p++) { + a[p] = token.charCodeAt(p); + } + return a; + } + let i = 0; + let o = 0; + while (i < length) { + let c = token.charCodeAt(i++); + if (c !== 0x5C || i >= length) { + a[o++] = c; + } + else { + c = token.charCodeAt(i++); + switch (c) { + case 0x27: a[o++] = 0x27; break; // ' + case 0x5C: a[o++] = 0x5C; break; // \\ + case 0x22: a[o++] = 0x22; break; // " + case 0x72: a[o++] = 0x0D; break; // \r + case 0x6E: a[o++] = 0x0A; break; // \n + case 0x74: a[o++] = 0x09; break; // \t + case 0x62: a[o++] = 0x08; break; // \b + case 0x58: // x + case 0x78: { // X + const xsi = i - 1; + const xso = o; + for (let xi = 0; xi < 2; xi++) { + if (i >= length) { + i = xsi; + o = xso; + a[o] = 0x5c; + break; + } + let xd = token.charCodeAt(i++); + xd = xd >= 65 && xd <= 70 ? xd - 55 : xd >= 97 && xd <= 102 ? xd - 87 : xd >= 48 && xd <= 57 ? xd - 48 : -1; + if (xd === -1) { + i = xsi; + o = xso; + a[o] = 0x5c; + break; + } + a[o] = a[o] << 4 | xd; + } + o++; + break; + } + default: + if (c < 48 || c > 57) { // 0-9 + a[o++] = 0x5c; + a[o++] = c; + } + else { + i--; + let osi = i; + let oso = o; + for (let oi = 0; oi < 3; oi++) { + if (i >= length) { + i = osi; + o = oso; + a[o] = 0x5c; + break; + } + let od = token.charCodeAt(i++); + if (od < 48 || od > 57) { + i = osi; + o = oso; + a[o] = 0x5c; + break; + } + a[o] = a[o] << 3 | od - 48; + } + o++; + } + break; + } + } + } + return a.slice(0, o); + } +}; + +// https://svn.python.org/projects/python/trunk/Lib/pickletools.py +// https://github.com/python/cpython/blob/master/Lib/pickle.py +pickle.OpCode = { + MARK: 40, // '(' + EMPTY_TUPLE: 41, // ')' + STOP: 46, // '.' + POP: 48, // '0' + POP_MARK: 49, // '1' + DUP: 50, // '2' + BINBYTES: 66, // 'B' (Protocol 3) + SHORT_BINBYTES: 67, // 'C' (Protocol 3) + FLOAT: 70, // 'F' + BINFLOAT: 71, // 'G' + INT: 73, // 'I' + BININT: 74, // 'J' + BININT1: 75, // 'K' + LONG: 76, // 'L' + BININT2: 77, // 'M' + NONE: 78, // 'N' + PERSID: 80, // 'P' + BINPERSID: 81, // 'Q' + REDUCE: 82, // 'R' + STRING: 83, // 'S' + BINSTRING: 84, // 'T' + SHORT_BINSTRING: 85, // 'U' + UNICODE: 86, // 'V' + BINUNICODE: 88, // 'X' + EMPTY_LIST: 93, // ']' + APPEND: 97, // 'a' + BUILD: 98, // 'b' + GLOBAL: 99, // 'c' + DICT: 100, // 'd' + APPENDS: 101, // 'e' + GET: 103, // 'g' + BINGET: 104, // 'h' + LONG_BINGET: 106, // 'j' + LIST: 108, // 'l' + OBJ: 111, // 'o' + PUT: 112, // 'p' + BINPUT: 113, // 'q' + LONG_BINPUT: 114, // 'r' + SETITEM: 115, // 's' + TUPLE: 116, // 't' + SETITEMS: 117, // 'u' + EMPTY_DICT: 125, // '}' + PROTO: 128, + NEWOBJ: 129, + TUPLE1: 133, // '\x85' + TUPLE2: 134, // '\x86' + TUPLE3: 135, // '\x87' + NEWTRUE: 136, // '\x88' + NEWFALSE: 137, // '\x89' + LONG1: 138, // '\x8a' + LONG4: 139, // '\x8b' + SHORT_BINUNICODE: 140, // '\x8c' (Protocol 4) + BINUNICODE8: 141, // '\x8d' (Protocol 4) + BINBYTES8: 142, // '\x8e' (Protocol 4) + EMPTY_SET: 143, // '\x8f' (Protocol 4) + ADDITEMS: 144, // '\x90' (Protocol 4) + FROZENSET: 145, // '\x91' (Protocol 4) + NEWOBJ_EX: 146, // '\x92' (Protocol 4) + STACK_GLOBAL: 147, // '\x93' (Protocol 4) + MEMOIZE: 148, // '\x94' (Protocol 4) + FRAME: 149 // '\x95' (Protocol 4) +}; + +pickle.Reader = class { + + constructor(buffer) { + if (buffer) { + this._buffer = buffer; + this._dataView = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._position = 0; + } + pickle.Reader._utf8Decoder = pickle.Reader._utf8Decoder || new TextDecoder('utf-8'); + pickle.Reader._asciiDecoder = pickle.Reader._asciiDecoder || new TextDecoder('ascii'); + } + + get length() { + return this._buffer.byteLength; + } + + get position() { + return this._position; + } + + byte() { + const position = this._position; + this.skip(1); + return this._dataView.getUint8(position); + } + + bytes(length) { + const position = this._position; + this.skip(length); + return this._buffer.subarray(position, this._position); + } + + uint16() { + const position = this.position; + this.skip(2); + return this._dataView.getUint16(position, true); + } + + int32() { + const position = this.position; + this.skip(4); + return this._dataView.getInt32(position, true); + } + + uint32() { + const position = this.position; + this.skip(4); + return this._dataView.getUint32(position, true); + } + + float32() { + const position = this.position; + this.skip(4); + return this._dataView.getFloat32(position, true); + } + + float64() { + const position = this.position; + this.skip(8); + return this._dataView.getFloat64(position, true); + } + + skip(offset) { + this._position += offset; + if (this._position > this._buffer.length) { + throw new pickle.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + string(size, encoding) { + const data = this.bytes(size); + return (encoding == 'utf-8') ? + pickle.Reader._utf8Decoder.decode(data) : + pickle.Reader._asciiDecoder.decode(data); + } + + line() { + const index = this._buffer.indexOf(0x0A, this._position); + if (index == -1) { + throw new pickle.Error("Could not find end of line."); + } + const size = index - this._position; + const text = this.string(size, 'ascii'); + this.skip(1); + return text; + } +}; + + +pickle.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Unpickle Error'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.Unpickler = pickle.Unpickler; +} diff --git a/frontend/packages/core/public/netron/python.js b/frontend/packages/core/public/netron/python.js new file mode 100644 index 00000000..00ae6677 --- /dev/null +++ b/frontend/packages/core/public/netron/python.js @@ -0,0 +1,1603 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental Python parser + +var python = python || {}; + +python.Parser = class { + + constructor(text, file) { + this._tokenizer = new python.Tokenizer(text, file); + if (!python.Parser._precedence) { + python.Parser._precedence = { + 'or': 2, 'and': 3, 'not' : 4, + 'in': 5, 'instanceof': 5, 'is': 5, '<': 5, '>': 5, '<=': 5, '>=': 5, '<>': 5, '==': 5, '!=': 5, + '|': 6, '^' : 7, '&' : 8, + '<<': 9, '>>': 9, '+': 10, '-': 10, '*': 11, '@': 11, '/': 11, '//': 11, '%': 11, + // '+': 12, '-': 12, + '~': 13, '**': 14 + }; + } + } + + parse() { + const node = this._node('program'); + node.body = []; + while (!this._tokenizer.match('eof')) { + const statement = this._parseStatement(); + if (statement) { + node.body.push(statement); + continue; + } + if (this._tokenizer.eat('\n') || this._tokenizer.eat(';') || this._tokenizer.peek().type == 'eof') { + continue; + } + if (this._tokenizer.eat('indent') && this._tokenizer.peek().type == 'eof') { + continue; + } + throw new python.Error('Unknown statement' + this._tokenizer.location()); + } + return node; + } + + _parseSuite() { + const node = this._node('block'); + node.statements = []; + let statement = null; + if (this._tokenizer.eat('\n')) { + if (this._tokenizer.eat('indent')) { + while (!this._tokenizer.eat('eof') && !this._tokenizer.eat('dedent')) { + if (this._tokenizer.eat(';')) { + continue; + } + statement = this._parseStatement(); + if (statement) { + node.statements.push(statement); + continue; + } + if (this._tokenizer.eat('\n')) { + continue; + } + if (this._tokenizer.match('dedent') || this._tokenizer.match('eof')) { + continue; + } + throw new python.Error('Empty statement' + this._tokenizer.location()); + } + } + } + else if (!this._tokenizer.eat('eof')) { + while (!this._tokenizer.match('\n') && !this._tokenizer.match('eof') && !this._tokenizer.match('dedent')) { + if (this._tokenizer.eat(';')) { + continue; + } + statement = this._parseStatement(); + if (statement) { + node.statements.push(statement); + continue; + } + throw new python.Error('Empty statement' + this._tokenizer.location()); + } + this._tokenizer.eat('\n'); + } + + return node; + } + + _parseStatement() { + + let node = this._node(); + + node = this._eat('id', 'break'); + if (node) { + return node; + } + node = this._eat('id', 'continue'); + if (node) { + return node; + } + node = this._eat('id', 'return'); + if (node) { + node.expression = this._parseExpression(-1, [], true); + return node; + } + node = this._eat('id', 'raise'); + if (node) { + node.exception = this._parseExpression(-1, [ 'from' ]); + if (this._tokenizer.eat('id', 'from')) { + node.from = this._parseExpression(); + } + else if (this._tokenizer.eat(',')) { + node.exception = [ node.exception ]; + node.exception.push(this._parseExpression()); + if (this._tokenizer.eat(',')) { + node.exception.push(this._parseExpression()); + } + } + return node; + } + node = this._eat('id', 'assert'); + if (node) { + node.condition = this._parseExpression(); + while (this._tokenizer.eat(',')) { + node.condition = { type: 'list', value: [ node.condition ] }; + node.condition.value.push(this._parseExpression()); + } + return node; + } + node = this._eat('id', 'exec'); + if (node) { + node.variable = this._parseExpression(-1, [ 'in' ]); + if (this._tokenizer.eat('in')) { + do { + node.target = node.target || []; + node.target.push(this._parseExpression(-1, [ 'in' ], false)); + } + while (this._tokenizer.eat(',')); + } + return node; + } + + node = this._eat('id', 'global'); + if (node) { + node.variable = []; + do { + node.variable.push(this._parseName()); + } + while (this._tokenizer.eat(',')); + return node; + } + node = this._eat('id', 'nonlocal'); + if (node) { + node.variable = []; + do { + node.variable.push(this._parseName()); + } + while (this._tokenizer.eat(',')); + return node; + } + node = this._eat('id', 'import'); + if (node) { + node.modules = []; + do { + const module = this._node('module'); + module.name = this._parseExpression(-1, [], false); + if (this._tokenizer.eat('id', 'as')) { + module.as = this._parseExpression(-1, [], false); + } + node.modules.push(module); + } + while (this._tokenizer.eat(',')); + return node; + } + node = this._eat('id', 'from'); + if (node) { + const dots = this._tokenizer.peek(); + if (dots && Array.from(dots.type).every((c) => c == '.')) { + node.from = this._eat(dots.type); + node.from.expression = this._parseExpression(); + } + else { + node.from = this._parseExpression(); + } + this._tokenizer.expect('id', 'import'); + node.import = []; + const close = this._tokenizer.eat('('); + do { + const symbol = this._node(); + symbol.symbol = this._parseExpression(-1, [], false); + if (this._tokenizer.eat('id', 'as')) { + symbol.as = this._parseExpression(-1, [], false); + } + node.import.push(symbol); + } + while (this._tokenizer.eat(',')); + if (close) { + this._tokenizer.expect(')'); + } + return node; + } + node = this._eat('id', 'class'); + if (node) { + node.name = this._parseName().value; + if (this._tokenizer.peek().value === '(') { + node.base = this._parseArguments(); + } + this._tokenizer.expect(':'); + node.body = this._parseSuite(); + return node; + } + + const async = this._eat('id', 'async'); + if (async && + !this._tokenizer.match('id', 'def') && + !this._tokenizer.match('id', 'with') && + !this._tokenizer.match('id', 'for')) { + throw new python.Error("Expected 'def', 'with' or 'for'" + this._tokenizer.location()); + } + + node = this._eat('id', 'def'); + if (node) { + if (async) { + node.async = async; + } + node.name = this._parseName().value; + this._tokenizer.expect('('); + node.parameters = this._parseParameters(')'); + if (this._tokenizer.eat('->')) { + node.returnType = this._parseType(); + } + this._tokenizer.expect(':'); + node.body = this._parseSuite(); + return node; + } + node = this._eat('id', 'del'); + if (node) { + node.expression = this._parseExpression(-1, [], true); + return node; + } + node = this._eat('id', 'print'); + if (node) { + node.expression = this._parseExpression(-1, [], true); + return node; + } + node = this._eat('id', 'if'); + if (node) { + node.condition = this._parseExpression(); + this._tokenizer.expect(':'); + node.then = this._parseSuite(); + let current = node; + this._tokenizer.eat('\n'); + while (this._tokenizer.eat('id', 'elif')) { + current.else = this._node('if'); + current = current.else; + current.condition = this._parseExpression(); + this._tokenizer.expect(':'); + current.then = this._parseSuite(); + this._tokenizer.eat('\n'); + } + if (this._tokenizer.eat('id', 'else')) { + this._tokenizer.expect(':'); + current.else = this._parseSuite(); + } + return node; + } + node = this._eat('id', 'while'); + if (node) { + node.condition = this._parseExpression(); + this._tokenizer.expect(':'); + node.body = this._parseSuite(); + if (this._tokenizer.eat('id', 'else')) { + this._tokenizer.expect(':'); + node.else = this._parseSuite(); + } + return node; + } + node = this._eat('id', 'pass'); + if (node) { + return node; + } + node = this._eat('id', 'for'); + if (node) { + node.variable = []; + node.variable.push(this._parseExpression(-1, [ 'in' ])); + while (this._tokenizer.eat(',')) { + if (this._tokenizer.match('id', 'in')) { + node.variable.push({}); + break; + } + node.variable.push(this._parseExpression(-1, [ 'in' ])); + } + this._tokenizer.expect('id', 'in'); + node.target = []; + node.target.push(this._parseExpression()); + while (this._tokenizer.eat(',')) { + if (this._tokenizer.match(':')) { + node.target.push({}); + break; + } + node.target.push(this._parseExpression(-1, [ 'in' ])); + } + this._tokenizer.expect(':'); + node.body = this._parseSuite(); + if (this._tokenizer.eat('id', 'else')) { + this._tokenizer.expect(':'); + node.else = this._parseSuite(); + } + return node; + } + node = this._eat('id', 'with'); + if (node) { + if (async) { + node.async = async; + } + node.item = []; + do { + const item = this._node(); + item.type = 'with_item'; + item.expression = this._parseExpression(); + if (this._tokenizer.eat('id', 'as')) { + item.variable = this._parseExpression(); + } + node.item.push(item); + } + while (this._tokenizer.eat(',')); + this._tokenizer.expect(':'); + node.body = this._parseSuite(); + return node; + } + node = this._eat('id', 'try'); + if (node) { + this._tokenizer.expect(':'); + node.body = this._parseSuite(); + node.except = []; + while (this._tokenizer.match('id', 'except')) { + const except = this._node('except'); + this._tokenizer.expect('id', 'except'); + except.clause = []; + except.clause.push(this._parseExpression()); + while (this._tokenizer.eat(',')) { + if (this._tokenizer.match(':') || this._tokenizer.match('as')) { + except.clause.push({}); + break; + } + except.clause.push(this._parseExpression()); + } + if (this._tokenizer.eat('id', 'as')) { + except.variable = this._parseExpression(); + } + this._tokenizer.expect(':'); + except.body = this._parseSuite(); + node.except.push(except); + } + if (this._tokenizer.match('id', 'else')) { + node.else = this._node('else'); + this._tokenizer.expect('id', 'else'); + this._tokenizer.expect(':'); + node.else.body = this._parseSuite(); + } + if (this._tokenizer.match('id', 'finally')) { + node.finally = this._node('finally'); + this._tokenizer.expect('id', 'finally'); + this._tokenizer.expect(':'); + node.finally.body = this._parseSuite(); + } + return node; + } + + if (this._tokenizer.match('@')) { + node = this._node('decorator'); + this._tokenizer.expect('@'); + node.value = this._parseExpression(); + if (!node.value || (node.value.type !== 'call' && node.value.type !== 'id' && node.value.type !== '.')) { + throw new python.Error('Invalid decorator' + this._tokenizer.location()); + } + return node; + } + + const expression = this._parseExpression(-1, [], true); + if (expression) { + if (expression.type == 'id' && this._tokenizer.eat(':')) { + node = this._node('var'); + node.name = expression.value; + node.location = expression.location; + node.variableType = this._parseExpression(-1, [ '=' ]); + if (this._tokenizer.eat('=')) { + node.initializer = this._parseExpression(); + } + return node; + } + let statement = false; + switch (expression.type) { + case '=': + case ':=': + case '==': + case '!=': + case '+=': + case '-=': + case '*=': + case '@=': + case '/=': + case '//=': + case '**=': + case '&=': + case '|=': + case '%=': + case '>>=': + case '<<=': + case '>>': + case '<<': + case '>=': + case '<=': + case '<': + case '>': + case '%': + case '^=': + case '...': + case 'call': + case 'assert': + case 'raise': + case 'string': + case 'list': + case 'var': + case '.': + case '[]': + case 'yield': + case '+': + case '-': + case '*': + case '**': + case '@': + case '/': + case '//': + case '~': + case '&': + case '^': + case '|': + case 'not': + case 'id': + case 'number': + case 'in': + case 'and': + case 'or': + case 'if': + case 'for': + case 'tuple': + case 'lambda': + case 'await': + statement = true; + break; + } + if (statement) { + return expression; + } + throw new python.Error("Unhandled expression" + this._tokenizer.location()); + } + + return null; + } + + _parseExpression(minPrecedence, terminal, tuple) { + minPrecedence = minPrecedence || -1; + const terminalSet = new Set(terminal); + const stack = []; + for (;;) { + let node = this._node(); + const token = this._tokenizer.peek(); + if (stack.length == 1 && terminalSet.has(token.value)) { + break; + } + const precedence = python.Parser._precedence[token.value]; + if (precedence) { + if (precedence >= minPrecedence) { + this._tokenizer.read(); + node.type = token.value; + if (token.type == 'id' && (token.value === 'in' || token.value === 'not')) { + if (token.value === 'in') { + node.type = 'in'; + } + else if (this._tokenizer.eat('id', 'in')) { + node.type = 'not in'; + } + else { + node.type = 'not'; + node.expression = this._parseExpression(precedence, terminal, tuple === false ? false : true); + stack.push(node); + continue; + } + } + else if (token.value == '~') { + node.type = '~'; + node.expression = this._parseExpression(precedence, terminal, tuple === false ? false : true); + stack.push(node); + continue; + } + else if (token.type == 'id' && token.value == 'is') { + if (this._tokenizer.eat('id', 'not')) { + node.type = 'is not'; + } + } + node.left = stack.pop(); + node.right = this._parseExpression(precedence, terminal, tuple === false ? false : true); + stack.push(node); + continue; + } + } + if (this._tokenizer.eat(':=')) { + node.type = ':='; + node.target = stack.pop(); + node.expression = this._parseExpression(-1, terminal, tuple === false ? false : true); + stack.push(node); + continue; + } + if (this._tokenizer.eat('=')) { + node.type = '='; + node.target = stack.pop(); + node.expression = this._parseExpression(-1, terminal, tuple === false ? false : true); + stack.push(node); + continue; + } + switch (token.type) { + case '-=': + case '**=': + case '*=': + case '//=': + case '/=': + case '&=': + case '%=': + case '^=': + case '+=': + case '<<=': + case '>>=': + case '|=': + case '@=': + node = this._node(token.type); + this._tokenizer.expect(token.type); + node.target = stack.pop(); + node.expression = this._parseExpression(-1, terminal, true); + stack.push(node); + continue; + } + node = this._eat('id', 'if'); + if (node) { + node.then = stack.pop(); + node.condition = this._parseExpression(); + this._tokenizer.expect('id', 'else'); + node.else = this._parseExpression(); + stack.push(node); + continue; + } + while (this._tokenizer.match('id', 'for') || this._tokenizer.match('id', 'async')) { + const async = this._eat('id', 'async'); + if (async && !this._tokenizer.match('id', 'for')) { + throw new python.Error("Expected 'for'" + this._tokenizer.location()); + } + node = this._eat('id', 'for'); + if (node) { + if (async) { + node.async = async; + } + node.expression = stack.pop(); + node.variable = this._parseExpression(-1, [ 'in' ], true); + this._tokenizer.expect('id', 'in'); + node.target = this._parseExpression(-1, [ 'for', 'if' ], true); + while (this._tokenizer.eat('id', 'if')) { + node.condition = node.condition || []; + node.condition.push(this._parseExpression(-1, [ 'for', 'if' ])); + } + stack.push(node); + } + } + node = this._eat('id', 'lambda'); + if (node) { + node.parameters = this._parseParameters(':'); + node.body = this._parseExpression(-1, terminal, false); + stack.push(node); + continue; + } + node = this._eat('id', 'yield'); + if (node) { + if (this._tokenizer.eat('id', 'from')) { + node.from = this._parseExpression(-1, [], true); + } + else { + node.expression = []; + do { + node.expression.push(this._parseExpression(-1, [], false)); + } + while (this._tokenizer.eat(',')); + } + stack.push(node); + continue; + } + node = this._eat('id', 'await'); + if (node) { + node.expression = this._parseExpression(minPrecedence, terminal, tuple); + stack.push(node); + continue; + } + node = this._eat('.'); + if (node) { + this._tokenizer.eat('\n'); + node.target = stack.pop(); + node.member = this._parseName(); + stack.push(node); + continue; + } + if (this._tokenizer.peek().value === '(') { + if (stack.length == 0) { + node = this._node('tuple'); + const args = this._parseArguments(); + if (args.length == 1) { + stack.push(args[0]); + } + else { + node.value = args; + stack.push(node); + } + } + else { + node = this._node('call'); + node.target = stack.pop(); + node.arguments = this._parseArguments(); + stack.push(node); + } + continue; + } + if (this._tokenizer.peek().value === '[') { + if (stack.length == 0) { + stack.push(this._parseExpressions()); + } + else { + node = this._node('[]'); + node.target = stack.pop(); + node.arguments = this._parseSlice(); + stack.push(node); + } + continue; + } + if (this._tokenizer.peek().value == '{') { + stack.push(this._parseDictOrSetMaker()); + continue; + } + node = this._node(); + const literal = this._parseLiteral(); + if (literal) { + if (stack.length > 0 && literal.type == 'number' && + (literal.value.startsWith('-') || literal.value.startsWith('+'))) { + node.type = literal.value.substring(0, 1); + literal.value = literal.value.substring(1); + node.left = stack.pop(); + node.right = literal; + stack.push(node); + } + else if (stack.length == 1 && literal.type == 'string' && stack[0].type == 'string') { + stack[0].value += literal.value; + } + else { + stack.push(literal); + } + continue; + } + if (this._tokenizer.peek().keyword) { + break; + } + node = this._eat('...'); + if (node) { + stack.push(node); + continue; + } + const identifier = this._parseName(); + if (identifier) { + stack.push(identifier); + continue; + } + + if (tuple === true && stack.length == 1 && this._tokenizer.eat(',')) { + if (stack[0].type === 'tuple') { + node = stack[0]; + } + else { + node = this._node('tuple'); + node.value = [ stack.pop() ]; + stack.push(node); + } + // for, bar, = + if (this._tokenizer.peek().value === '=') { + continue; + } + if (!this._tokenizer.match('=') && !terminalSet.has(this._tokenizer.peek().value)) { + const nextTerminal = terminal.slice(0).concat([ ',', '=' ]); + const expression = this._parseExpression(minPrecedence, nextTerminal, tuple); + if (expression) { + node.value.push(expression); + continue; + } + } + break; + } + break; + } + + if (stack.length == 1) { + return stack.pop(); + } + if (stack.length != 0) { + throw new python.Error('Unexpected expression' + this._tokenizer.location()); + } + return null; + } + + _parseDictOrSetMaker() { + const list = []; + this._tokenizer.expect('{'); + let dict = true; + while (!this._tokenizer.eat('}')) { + const item = this._parseExpression(-1, [], false); + if (item == null) { + throw new python.Error('Expected expression' + this._tokenizer.location()); + } + if (!this._tokenizer.eat(':')) { + dict = false; + } + if (dict) { + const value = this._parseExpression(-1, [], false); + if (value == null) { + throw new python.Error('Expected expression' + this._tokenizer.location()); + } + list.push({ type: 'pair', key: item, value: value }); + } + else { + list.push(item); + } + this._tokenizer.eat(','); + this._tokenizer.eat('\n'); + if (this._tokenizer.eat('}')) { + break; + } + } + if (dict) { + return { type: 'dict', value: list }; + } + return { type: 'set', value: list }; + } + + _parseExpressions() { + const list = []; + this._tokenizer.expect('['); + while (!this._tokenizer.eat(']')) { + const expression = this._parseExpression(); + if (expression == null) { + throw new python.Error('Expected expression' + this._tokenizer.location()); + } + list.push(expression); + this._tokenizer.eat(','); + while (this._tokenizer.eat('\n')) { + // continue + } + if (this._tokenizer.eat(']')) { + break; + } + } + return { type: 'list', value: list }; + } + + _parseSlice() { + let node = { type: '::' }; + let list = []; + const group = [ 'start', 'stop', 'step' ]; + this._tokenizer.expect('['); + while (!this._tokenizer.eat(']')) { + if (this._tokenizer.eat(':')) { + node[group.shift()] = { type: 'list', value: list }; + list = []; + continue; + } + if (this._tokenizer.eat(',')) { + // list.push({}); + continue; + } + if (this._tokenizer.peek().value != ']') { + const expression = this._parseExpression(); + if (expression == null) { + throw new python.Error('Expected expression' + this._tokenizer.location()); + } + list.push(expression); + } + } + if (list.length > 0) { + node[group.shift()] = { type: 'list', value: list }; + } + if (node.start && !node.stop && !node.step) { + node = node.start; + } + return node; + } + + _parseName() { + const token = this._tokenizer.peek(); + if (token.type == 'id' && !token.keyword) { + this._tokenizer.read(); + return token; + } + return null; + } + + _parseLiteral() { + const token = this._tokenizer.peek(); + if (token.type == 'string' || token.type == 'number' || token.type == 'boolean') { + this._tokenizer.read(); + return token; + } + return null; + } + + _parseTypeArguments() { + const list = []; + this._tokenizer.expect('['); + while (!this._tokenizer.eat(']')) { + const type = this._parseType(); + if (type == null) { + throw new python.Error('Expected type ' + this._tokenizer.location()); + } + list.push(type); + if (!this._tokenizer.eat(',')) { + this._tokenizer.expect(']'); + break; + } + } + return list; + } + + _parseType() { + const type = this._node(); + type.type = 'type'; + type.name = this._parseExpression(-1, [ '[', '=' ]); + if (type.name) { + if (this._tokenizer.peek().value === '[') { + type.arguments = this._parseTypeArguments(); + } + return type; + } + return null; + } + + _parseParameter(terminal) { + const node = this._node('parameter'); + if (this._tokenizer.eat('/')) { + node.name = '/'; + return node; + } + if (this._tokenizer.eat('**')) { + node.parameterType = '**'; + } + if (this._tokenizer.eat('*')) { + node.parameterType = '*'; + } + const identifier = this._parseName(); + if (identifier !== null) { + node.name = identifier.value; + if (terminal !== ':' && this._tokenizer.eat(':')) { + node.parameterType = this._parseType(); + } + if (this._tokenizer.eat('=')) { + node.initializer = this._parseExpression(); + } + return node; + } + return null; + } + + _parseParameters(terminal) { + const list = []; + while (!this._tokenizer.eat(terminal)) { + this._tokenizer.eat('\n'); + if (this._tokenizer.eat('(')) { + list.push(this._parseParameters(')')); + } + else { + list.push(this._parseParameter(terminal)); + } + this._tokenizer.eat('\n'); + if (!this._tokenizer.eat(',')) { + this._tokenizer.expect(terminal); + break; + } + } + return list; + } + + _parseArguments() { + const list = []; + this._tokenizer.expect('('); + while (!this._tokenizer.eat(')')) { + if (this._tokenizer.eat('\n')) { + continue; + } + const expression = this._parseExpression(-1, [], false); + if (expression == null) { + throw new python.Error('Expected expression ' + this._tokenizer.location()); + } + list.push(expression); + if (!this._tokenizer.eat(',')) { + this._tokenizer.eat('\n'); + this._tokenizer.expect(')'); + break; + } + } + return list; + } + + _node(type) { + const node = {}; + node.location = this._tokenizer.location(); + if (type) { + node.type = type; + } + return node; + } + + _eat(type, value) { + if (this._tokenizer.match(type, value)) { + const node = this._node(type === 'id' ? value : type); + this._tokenizer.expect(type, value); + return node; + } + return null; + } +}; + +python.Tokenizer = class { + + constructor(text, file) { + this._text = text; + this._file = file; + this._position = 0; + this._lineStart = 0; + this._line = 0; + this._token = { type: '', value: '' }; + this._brackets = 0; + this._indentation = []; + this._outdent = 0; + if (!python.Tokenizer._whitespace) { + python.Tokenizer._whitespace = new RegExp('[\u1680\u180e\u2000-\u200a\u202f\u205f\u3000\ufeff]'); + const identifierStartChars = '\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc'; + let identifierChars = '\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u0620-\u0649\u0672-\u06d3\u06e7-\u06e8\u06fb-\u06fc\u0730-\u074a\u0800-\u0814\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0840-\u0857\u08e4-\u08fe\u0900-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09d7\u09df-\u09e0\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5f-\u0b60\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62-\u0c63\u0c66-\u0c6f\u0c82\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2-\u0ce3\u0ce6-\u0cef\u0d02\u0d03\u0d46-\u0d48\u0d57\u0d62-\u0d63\u0d66-\u0d6f\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e34-\u0e3a\u0e40-\u0e45\u0e50-\u0e59\u0eb4-\u0eb9\u0ec8-\u0ecd\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f41-\u0f47\u0f71-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1029\u1040-\u1049\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u170e-\u1710\u1720-\u1730\u1740-\u1750\u1772\u1773\u1780-\u17b2\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1920-\u192b\u1930-\u193b\u1951-\u196d\u19b0-\u19c0\u19c8-\u19c9\u19d0-\u19d9\u1a00-\u1a15\u1a20-\u1a53\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1b46-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1bb0-\u1bb9\u1be6-\u1bf3\u1c00-\u1c22\u1c40-\u1c49\u1c5b-\u1c7d\u1cd0-\u1cd2\u1d00-\u1dbe\u1e01-\u1f15\u200c\u200d\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2d81-\u2d96\u2de0-\u2dff\u3021-\u3028\u3099\u309a\ua640-\ua66d\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua7f8-\ua800\ua806\ua80b\ua823-\ua827\ua880-\ua881\ua8b4-\ua8c4\ua8d0-\ua8d9\ua8f3-\ua8f7\ua900-\ua909\ua926-\ua92d\ua930-\ua945\ua980-\ua983\ua9b3-\ua9c0\uaa00-\uaa27\uaa40-\uaa41\uaa4c-\uaa4d\uaa50-\uaa59\uaa7b\uaae0-\uaae9\uaaf2-\uaaf3\uabc0-\uabe1\uabec\uabed\uabf0-\uabf9\ufb20-\ufb28\ufe00-\ufe0f\ufe20-\ufe26\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f'; + python.Tokenizer._identifierStart = new RegExp('[' + identifierStartChars + ']'); + python.Tokenizer._identifierChar = new RegExp('[' + identifierStartChars + identifierChars + ']'); + } + } + + peek() { + if (!this._cache) { + this._token = this._tokenize(this._token); + this._cache = true; + } + return this._token; + } + + read() { + if (!this._cache) { + this._token = this._tokenize(this._token); + } + const next = this._position + this._token.value.length; + while (this._position < next) { + if (python.Tokenizer._isNewline(this._get(this._position))) { + this._position = this._newLine(this._position); + this._lineStart = this._position; + this._line++; + } + else { + this._position++; + } + } + this._cache = false; + return this._token; + } + + match(type, value) { + const token = this.peek(); + if (token.type === type && (!value || token.value === value)) { + return true; + } + return false; + } + + eat(type, value) { + const token = this.peek(); + if (token.type === type && (!value || token.value === value)) { + this.read(); + return true; + } + return false; + } + + expect(type, value) { + const token = this.peek(); + if (token.type !== type) { + throw new python.Error("Unexpected '" + token.value + "' instead of '" + type + "'" + this.location()); + } + if (value && token.value !== value) { + throw new python.Error("Unexpected '" + token.value + "' instead of '" + value + "'" + this.location()); + } + this.read(); + } + + location() { + return ' at ' + this._file + ':' + (this._line + 1).toString() + ':' + (this._position - this._lineStart + 1).toString(); + } + + static _isSpace(c) { + switch (c) { + case ' ': + case '\t': + case '\v': // 11 + case '\f': // 12 + case '\xA0': // 160 + return true; + default: + if (c.charCodeAt(0) >= 0x1680) { + return python.Tokenizer._whitespace.test(c); + } + return false; + } + } + + static _isNewline(c) { + switch(c) { + case '\n': + case '\r': + case '\u2028': // 8232 + case '\u2029': // 8233 + return true; + } + return false; + } + + static _isIdentifierStartChar(c) { + if (c < 'A') { + return c === '$'; + } + if (c <= 'Z') { + return true; + } + if (c < 'a') { + return c === '_'; + } + if (c <= 'z') { + return true; + } + const code = c.charCodeAt(0); + if (code >= 0xAA) { + return python.Tokenizer._identifierStart.test(c); + } + return false; + } + + static _isIdentifierChar(c) { + if (c < '0') { + return c === '$'; + } + if (c <= '9') { + return true; + } + if (c < 'A') { + return false; + } + if (c <= 'Z') { + return true; + } + if (c < 'a') { + return c === '_'; + } + if (c <= 'z') { + return true; + } + const code = c.charCodeAt(0); + if (code >= 0xAA) { + return python.Tokenizer._identifierChar.test(c); + } + return false; + } + + static _isDecimal(c) { + return c >= '0' && c <= '9' || c === '_'; + } + + static _isHex(c) { + return python.Tokenizer._isDecimal(c) || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') || c === '_'; + } + + static _isOctal(c) { + return c >= '0' && c <= '7' || c === '_'; + } + + static _isBinary(c) { + return c === '0' || c === '1' || c === '_'; + } + + _get(position) { + return position >= this._text.length ? '\0' : this._text[position]; + } + + _skipLine() { + while (this._position < this._text.length) { + if (python.Tokenizer._isNewline(this._get(this._position))) { + break; + } + this._position++; + } + } + + _skipWhitespace() { + while (this._position < this._text.length) { + const c = this._text[this._position]; + if (c == '#') { + this._skipLine(); + } + else if (python.Tokenizer._isSpace(c)) { + this._position++; + } + else if (c == '\\') { + // Explicit Line Continuation + this._position++; + if (python.Tokenizer._isNewline(this._get(this._position))) { + this._position = this._newLine(this._position); + this._lineStart = this._position; + this._line++; + } + else { + throw new python.Error("Unexpected '" + this._text[this._position] + "' after line continuation" + this.location()); + } + } + else if (this._brackets > 0 && python.Tokenizer._isNewline(c)) { + // Implicit Line Continuation + this._position = this._newLine(this._position); + this._lineStart = this._position; + this._line++; + } + else { + break; + } + } + } + + _newLine(position) { + if ((this._get(position) === '\n' && this._get(position + 1) === '\r') || + (this._get(position) === '\r' && this._get(position + 1) === '\n')) { + return position + 2; + } + return position + 1; + } + + _tokenize(token) { + if (this._token.type !== '\n') { + this._skipWhitespace(); + } + if (this._token.type === 'dedent') { + this._indentation.pop(); + this._outdent--; + if (this._outdent > 0) { + return { type: 'dedent', value: '' }; + } + } + if (token.type == '\n') { + let indent = ''; + let i = this._position; + while (i < this._text.length) { + const c = this._text[i]; + if (python.Tokenizer._isSpace(c)) { + indent += c; + i++; + } + else if (python.Tokenizer._isNewline(c)) { + indent = ''; + i = this._newLine(i); + this._position = i; + this._lineStart = i; + this._line++; + } + else if (c == '#') { + indent = ''; + while (i < this._text.length && !python.Tokenizer._isNewline(this._text[i])) { + i++; + } + continue; + } + else { + break; + } + } + let type = null; + if (indent.length > 0) { + const current = this._indentation.length > 0 ? this._indentation[this._indentation.length - 1] : ''; + if (indent.length > current.length) { + type = 'indent'; + this._indentation.push(indent); + } + else if (indent.length > 0 && indent.length < current.length) { + type = 'dedent'; + this._outdent = 0; + for (let j = this._indentation.length - 1; j >= 0 && indent.length < this._indentation[j].length; j--) { + this._outdent++; + } + } + else { + this._position += indent.length; + } + } + else if (i >= this._text.length) { + return { type: 'eof', value: '' }; + } + else if (this._indentation.length > 0) { + type = 'dedent'; + this._outdent = this._indentation.length; + } + + switch (type) { + case 'indent': + case 'dedent': + return { type: type, value: indent }; + } + } + if (this._position >= this._text.length) { + return { type: 'eof', value: '' }; + } + const c = this._get(this._position); + const string = this._string(); + if (string) { + return string; + } + switch (c) { + case '(': + case '[': + case '{': + this._brackets++; + return { type: c, value: c }; + case ')': + case ']': + case '}': + if (this._brackets === 0) { + throw new python.Error("Unexpected '" + c + "'" + this.location); + } + this._brackets--; + return { type: c, value: c }; + case ',': + case ';': + case '?': + return { type: c, value: c }; + default: { + const number = this._number(); + if (number) { + return number; + } + if (c === '.') { + let end = this._position + 1; + while (this._get(end) === '.') { + end++; + } + const text = this._text.substring(this._position, end); + return { type: text, value: text }; + } + const identifier = this._identifier(); + if (identifier) { + return identifier; + } + const operator = this._operator(); + if (operator) { + return operator; + } + break; + } + } + if (c === '.') { + return { type: c, value: c }; + } + if (c === '\\') { + return { type: '\\', value: c }; + } + if (python.Tokenizer._isNewline(c)) { + return { type: '\n', value: this._text.substring(this._position, this._newLine(this._position)) }; + } + throw new python.Error("Unexpected token '" + c + "'" + this.location()); + } + + _number() { + let c = this._get(this._position); + const sign = (c === '-' || c === '+') ? 1 : 0; + let i = this._position + sign; + c = this._get(i); + if (c === '0') { + let radix = 0; + let n = this._get(i + 1); + if ((n === 'x' || n === 'X') && python.Tokenizer._isHex(this._get(i + 2))) { + i += 2; + while (python.Tokenizer._isHex(this._get(i))) { + i += 1; + } + if (this._get(i) === 'l' || this._get(i) === 'L') { + i += 1; + } + radix = 16; + } + else if ((n === 'b' || n === 'B') && python.Tokenizer._isBinary(this._get(i + 2))) { + i += 2; + while (python.Tokenizer._isBinary(this._get(i))) { + i++; + } + radix = 2; + } + else if ((n === 'o' || n === 'O') && python.Tokenizer._isOctal(this._get(i + 2))) { + i += 2; + while (python.Tokenizer._isOctal(this._get(i))) { + i++; + } + radix = 8; + } + else if (n >= '0' && n <= '7') { + i++; + while (python.Tokenizer._isOctal(this._get(i))) { + i += 1; + } + if (this._get(i) === 'l' || this._get(i) === 'L') { + i += 1; + } + radix = 8; + } + if (radix > 0 && this._get(i) !== '.') { + const radixText = this._text.substring(this._position, i); + const radixParseText = radixText.indexOf('_') !== -1 ? radixText.split('_').join('') : radixText; + if (!isNaN(parseInt(radixParseText, radix))) { + return { type: 'number', value: radixText }; + } + } + } + i = this._position + sign; + let decimal = false; + if (this._get(i) >= '1' && this._get(i) <= '9') { + while (python.Tokenizer._isDecimal(this._get(i))) { + i++; + } + c = this._get(i).toLowerCase(); + decimal = c !== '.' && c !== 'e'; + } + if (this._get(i) === '0') { + i++; + c = this._get(i).toLowerCase(); + decimal = !python.Tokenizer._isDecimal(c) && c !== '.' && c !== 'e' && c !== 'j'; + } + if (decimal) { + if (this._get(i) === 'j' || this._get(i) === 'J' || this._get(i) === 'l' || this._get(i) === 'L') { + return { 'type': 'number', value: this._text.substring(this._position, i + 1) }; + } + const intText = this._text.substring(this._position, i); + if (!isNaN(parseInt(intText, 10))) { + return { type: 'number', value: intText }; + } + } + i = this._position + sign; + if ((this._get(i) >= '0' && this._get(i) <= '9') || + (this._get(i) === '.' && this._get(i + 1) >= '0' && this._get(i + 1) <= '9')) { + while (python.Tokenizer._isDecimal(this._get(i))) { + i++; + } + if (this._get(i) === '.') { + i++; + } + while (python.Tokenizer._isDecimal(this._get(i))) { + i++; + } + if (i > (this._position + sign)) { + if (this._get(i) === 'e' || this._get(i) === 'E') { + i++; + if (this._get(i) == '-' || this._get(i) == '+') { + i++; + } + if (!python.Tokenizer._isDecimal(this._get(i))) { + i = this._position; + } + else { + while (python.Tokenizer._isDecimal(this._get(i))) { + i++; + } + } + } + else { + while (python.Tokenizer._isDecimal(this._get(i))) { + i++; + } + } + } + if (i > (this._position + sign)) { + if (this._get(i) === 'j' || this._get(i) === 'J') { + return { type: 'number', value: this._text.substring(this._position, i + 1) }; + } + const floatText = this._text.substring(this._position, i); + const floatParseText = floatText.indexOf('_') != -1 ? floatText.split('_').join('') : floatText; + if (!isNaN(parseFloat(floatParseText))) { + return { type: 'number', value: floatText }; + } + } + } + return null; + } + + _identifier() { + let i = this._position; + if (python.Tokenizer._isIdentifierStartChar(this._get(i))) { + i++; + while (python.Tokenizer._isIdentifierChar(this._get(i))) { + i++; + } + } + if (i > this._position) { + const text = this._text.substring(this._position, i); + return { type: 'id', value: text, keyword: python.Tokenizer._isKeyword(text) }; + } + return null; + } + + _operator() { + let length = 0; + const c0 = this._get(this._position); + const c1 = this._get(this._position + 1); + const c2 = this._get(this._position + 2); + switch (c0) { + case '+': + case '&': + case '|': + case '^': + case '=': + case '!': + case '%': + case '~': + length = c1 === '=' ? 2 : 1; + break; + case '-': + length = c1 === '=' || c1 === '>' ? 2 : 1; + break; + case '*': + if (c1 === '*') { + length = c2 === '=' ? 3 : 2; + } + else { + length = c1 === '=' ? 2 : 1; + } + break; + case '/': + if (c1 === '/') { + length = c2 === '=' ? 3 : 2; + } + else { + length = c1 === '=' ? 2 : 1; + } + break; + case '<': + if (c1 === '>') { + length = 2; + } + else if (c1 === '<') { + length = c2 === '=' ? 3 : 2; + } + else { + length = c1 === '=' ? 2 : 1; + } + break; + case '>': + if (c1 === '>') { + length = c2 === '=' ? 3 : 2; + } + else { + length = c1 === '=' ? 2 : 1; + } + break; + case '@': + length = c1 === '=' ? 2 : 1; + break; + case ':': + length = c1 === '=' ? 2 : 1; + } + if (length > 0) { + const text = this._text.substring(this._position, this._position + length); + return { type: text, value: text }; + } + return null; + } + + _string() { + let i = this._position; + let prefix = -1; + if (this._get(i) === "'" || this._get(i) === '"') { + prefix = ''; + } + else if (this._get(i + 1) === "'" || this._get(i + 1) === '"') { + const c = this._get(i); + switch (c.toLowerCase()) { + case 'b': + case 'f': + case 'r': + case 'u': + prefix = c; + break; + } + } + else if (this._get(i + 2) === "'" || this._get(i + 2) === '"') { + const cc = this._text.substr(this._position, 2); + switch (cc.toLowerCase()) { + case 'br': + case 'fr': + case 'rb': + case 'rf': + case 'ur': + prefix = cc; + break; + } + } + if (prefix.length >= 0) { + i += prefix.length; + let quote = ''; + let count = 0; + const q0 = this._get(i); + const q1 = this._get(i + 1); + const q2 = this._get(i + 2); + switch (q0) { + case "'": + quote = q0; + count = (q1 === "'" && q2 === "'") ? 3 : 1; + break; + case '"': + quote = q0; + count = (q1 === '"' && q2 === '"') ? 3 : 1; + } + i += count; + if (count == 1) { + while (i < this._text.length) { + if (this._text[i] === quote) { + return { type: 'string', value: this._text.substring(this._position, i + 1) }; + } + else if (this._text[i] === '\\' && + (this._get(i + 1) == quote || this._get(i + 1) == '\n' || this._get(i + 1) == '\\')) { + i += 2; + } + else if (this._text[i] === '\r' || this._text[i] === '\n') { + break; + } + else { + i++; + } + } + } + else if (count == 3) { + while (i < this._text.length) { + if (this._get(i) === quote && this._get(i + 1) === quote && this._get(i + 2) === quote) { + return { type: 'string', value: this._text.substring(this._position, i + 3) }; + } + else if (this._get(i) === '\\' && this._get(i + 1) === quote) { + i += 2; + continue; + } + i++; + } + } + } + i = this._position; + if (this._get(i) === '`') { + i++; + while (i < this._text.length) { + if (this._text[i] === '`') { + return { type: 'string', value: this._text.substring(this._position, i + 1) }; + } + i++; + } + } + return null; + } + + static _isKeyword(value) { + switch (value) { + case 'and': + case 'as': + case 'else': + case 'for': + case 'if': + case 'import': + case 'in': + case 'is': + case 'not': + case 'or': + return true; + } + return false; + } +}; + +python.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Error loading Python module.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.Parser = python.Parser; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/pytorch-metadata.json b/frontend/packages/core/public/netron/pytorch-metadata.json new file mode 100755 index 00000000..d3a00b3e --- /dev/null +++ b/frontend/packages/core/public/netron/pytorch-metadata.json @@ -0,0 +1,4574 @@ +[ + { + "name": "torch.nn.modules.conv.Conv1d", + "schema": { + "attributes": [ + { + "name": "output_padding", + "visible": false + }, + { + "name": "in_channels", + "visible": false + }, + { + "name": "out_channels", + "visible": false + }, + { + "default": 1, + "name": "groups" + }, + { + "default": false, + "name": "transposed" + }, + { + "default": [ + 0 + ], + "name": "padding" + }, + { + "default": [ + 1 + ], + "name": "dilation" + }, + { + "default": [ + 1 + ], + "name": "stride" + } + ], + "category": "Layer" + } + }, + { + "name": "torch.nn.modules.conv.ConvTranspose1d", + "schema": { + "attributes": [ + { + "name": "output_padding", + "visible": false + }, + { + "name": "in_channels", + "visible": false + }, + { + "name": "out_channels", + "visible": false + }, + { + "default": 1, + "name": "groups" + }, + { + "default": true, + "name": "transposed" + }, + { + "default": [ + 0 + ], + "name": "padding" + }, + { + "default": [ + 1 + ], + "name": "dilation" + }, + { + "default": [ + 1 + ], + "name": "stride" + } + ], + "category": "Layer" + } + }, + { + "name": "torch.nn.modules.conv.Conv2d", + "schema": { + "attributes": [ + { + "name": "output_padding", + "visible": false + }, + { + "name": "in_channels", + "visible": false + }, + { + "name": "out_channels", + "visible": false + }, + { + "default": 1, + "name": "groups" + }, + { + "default": false, + "name": "transposed" + }, + { + "default": [ + 0, + 0 + ], + "name": "padding" + }, + { + "default": [ + 1, + 1 + ], + "name": "dilation" + }, + { + "default": [ + 1, + 1 + ], + "name": "stride" + } + ], + "category": "Layer" + } + }, + { + "name": "torch.nn.modules.conv.ConvTranspose2d", + "schema": { + "attributes": [ + { + "name": "output_padding", + "visible": false + }, + { + "name": "in_channels", + "visible": false + }, + { + "name": "out_channels", + "visible": false + }, + { + "default": 1, + "name": "groups" + }, + { + "default": true, + "name": "transposed" + }, + { + "default": [ + 0, + 0 + ], + "name": "padding" + }, + { + "default": [ + 1, + 1 + ], + "name": "dilation" + }, + { + "default": [ + 1, + 1 + ], + "name": "stride" + } + ], + "category": "Layer" + } + }, + { + "name": "torch.nn.modules.conv.Conv3d", + "schema": { + "category": "Layer" + } + }, + { + "name": "torch.nn.modules.conv.ConvTranspose3d", + "schema": { + "category": "Layer" + } + }, + { + "name": "torch.nn.modules.linear.Linear", + "schema": { + "attributes": [], + "category": "Layer" + } + }, + { + "name": "torch.nn.modules.activation.Sigmoid", + "schema": { + "category": "Activation" + } + }, + { + "name": "Softmax", + "schema": { + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.activation.Softmax2d", + "schema": { + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.activation.Softplus", + "schema": { + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.activation.Tanh", + "schema": { + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.activation.LogSoftmax", + "schema": { + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.activation.ReLU", + "schema": { + "attributes": [ + { + "default": false, + "name": "inplace", + "visible": false + }, + { + "default": 0, + "name": "threshold" + }, + { + "default": 0, + "name": "value" + } + ], + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.activation.ReLU6", + "schema": { + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.activation.PReLU", + "schema": { + "attributes": [], + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.activation.ELU", + "schema": { + "attributes": [], + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.activation.GLU", + "schema": { + "attributes": [], + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.activation.Hardtanh", + "schema": { + "attributes": [], + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.activation.LeakyReLU", + "schema": { + "attributes": [], + "category": "Activation" + } + }, + { + "name": "torch.nn.modules.pooling.MaxPool1d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.MaxPool2d", + "schema": { + "attributes": [ + { + "default": 0, + "name": "padding" + }, + { + "default": 1, + "name": "dilation" + }, + { + "default": false, + "name": "return_indices" + }, + { + "name": "ceil_mode", + "visible": false + } + ], + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.MaxPool3d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.MaxUnpool1d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.MaxUnpool2d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.MaxUnpool3d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.AvgPool2d", + "schema": { + "attributes": [ + { + "default": 0, + "name": "padding" + }, + { + "default": true, + "name": "count_include_pad" + }, + { + "name": "ceil_mode", + "visible": false + } + ], + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.AvgPool3d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.AdaptiveAvgPool1d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.AdaptiveAvgPool2d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.AdaptiveAvgPool3d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.AdaptiveMaxPool1d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.AdaptiveMaxPool2d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.pooling.AdaptiveMaxPool3d", + "schema": { + "category": "Pool" + } + }, + { + "name": "torch.nn.modules.batchnorm.BatchNorm1d", + "schema": { + "attributes": [], + "category": "Normalization" + } + }, + { + "name": "torch.nn.modules.batchnorm.BatchNorm2d", + "schema": { + "attributes": [ + { + "default": 1e-05, + "name": "eps" + }, + { + "default": 0.1, + "name": "momentum" + }, + { + "default": true, + "name": "affine" + }, + { + "default": true, + "name": "track_running_stats" + } + ], + "category": "Normalization", + "inputs": [ + { + "name": "input" + }, + { + "name": "weight" + }, + { + "name": "bias" + }, + { + "name": "running_mean" + }, + { + "name": "running_var" + }, + { + "name": "num_batches_tracked", + "visible": false + } + ] + } + }, + { + "name": "torch.nn.modules.normalization.GroupNorm", + "schema": { + "category": "Normalization" + } + }, + { + "name": "torch.nn.modules.normalization.LayerNorm", + "schema": { + "category": "Normalization" + } + }, + { + "name": "torch.nn.modules.dropout.Dropout2d", + "schema": { + "attributes": [ + { + "default": false, + "name": "inplace", + "visible": false + }, + { + "default": 0.5, + "name": "p" + } + ], + "category": "Dropout" + } + }, + { + "name": "torch.nn.modules.dropout.Dropout", + "schema": { + "attributes": [ + { + "default": false, + "name": "inplace", + "visible": false + }, + { + "default": 0.5, + "name": "p" + } + ], + "category": "Dropout" + } + }, + { + "name": "torch.nn.modules.rnn.GRU", + "schema": { + "category": "Layer" + } + }, + { + "name": "torch.nn.modules.rnn.GRUCell", + "schema": { + "category": "Layer" + } + }, + { + "name": "torch.nn.modules.rnn.LSTM", + "schema": { + "attributes": [ + { + "default": 0, + "name": "dropout" + }, + { + "default": {}, + "name": "dropout_state" + }, + { + "default": 1, + "name": "num_layers" + }, + { + "name": "batch_first", + "visible": false + }, + { + "name": "bidirectional", + "visible": false + }, + { + "name": "bias", + "visible": false + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "weight_ih_l0", + "visible": false + }, + { + "name": "weight_hh_l0", + "visible": false + }, + { + "name": "bias_ih_l0", + "visible": false + }, + { + "name": "bias_hh_l0", + "visible": false + }, + { + "name": "weight_ih_l1", + "visible": false + }, + { + "name": "weight_hh_l1", + "visible": false + }, + { + "name": "bias_ih_l1", + "visible": false + }, + { + "name": "bias_hh_l1", + "visible": false + } + ] + } + }, + { + "name": "torch.nn.modules.rnn.LSTMCell", + "schema": { + "category": "Layer" + } + }, + { + "name": "torch.nn.modules.rnn.RNN", + "schema": { + "category": "Layer" + } + }, + { + "name": "torch.nn.modules.sparse.Embedding", + "schema": { + "attributes": [ + { + "default": 2, + "name": "norm_type" + }, + { + "default": false, + "name": "scale_grad_by_freq" + }, + { + "default": false, + "name": "sparse" + }, + { + "default": null, + "name": "max_norm" + }, + { + "default": null, + "name": "padding_idx" + } + ], + "category": "Transform" + } + }, + { + "name": "torch.nn.modules.padding.ReflectionPad1d", + "schema": { + "category": "Tensor" + } + }, + { + "name": "torch.nn.modules.padding.ReflectionPad2d", + "schema": { + "category": "Tensor" + } + }, + { + "name": "torch.nn.modules.padding.ReplicationPad1d", + "schema": { + "category": "Tensor" + } + }, + { + "name": "torch.nn.modules.padding.ReplicationPad2d", + "schema": { + "category": "Tensor" + } + }, + { + "name": "torch.nn.modules.padding.ReplicationPad3d", + "schema": { + "category": "Tensor" + } + }, + { + "name": "torch.nn.modules.padding.ZeroPad2d", + "schema": { + "category": "Tensor" + } + }, + { + "name": "torch.nn.modules.padding.ConstantPad1d", + "schema": { + "category": "Tensor" + } + }, + { + "name": "torch.nn.modules.padding.ConstantPad2d", + "schema": { + "category": "Tensor" + } + }, + { + "name": "torch.nn.modules.padding.ConstantPad3d", + "schema": { + "category": "Tensor" + } + }, + { + "name": "torch.nn.modules.pixelshuffle.PixelShuffle", + "schema": {} + }, + { + "name": "torch.nn.modules.instancenorm.InstanceNorm1d", + "schema": {} + }, + { + "name": "torch.nn.modules.instancenorm.InstanceNorm2d", + "schema": {} + }, + { + "name": "torch.nn.modules.instancenorm.InstanceNorm3d", + "schema": {} + }, + { + "name": "torch.nn.modules.normalization.CrossMapLRN2d", + "schema": { + "attributes": [ + { + "default": 0.0001, + "name": "alpha" + }, + { + "default": 0.75, + "name": "beta" + }, + { + "default": 1, + "name": "k" + } + ], + "category": "Normalization" + } + }, + { + "name": "torch._convolution", + "schema": { + "attributes": [ + { + "default": 1, + "name": "stride", + "type": "int64[]" + }, + { + "default": 0, + "name": "padding", + "type": "int64[]" + }, + { + "default": 1, + "name": "dilation", + "type": "int64[]" + }, + { + "default": false, + "name": "transposed", + "type": "boolean" + }, + { + "default": 0, + "name": "output_padding", + "type": "int64[]" + }, + { + "default": 1, + "name": "groups", + "type": "int64" + }, + { + "name": "benchmark", + "type": "boolean", + "visible": false + }, + { + "name": "deterministic", + "type": "boolean", + "visible": false + }, + { + "name": "cudnn_enabled", + "type": "boolean", + "visible": false + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "weight" + }, + { + "name": "bias" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.conv2d", + "schema": { + "attributes": [ + { + "default": 1, + "name": "stride", + "type": "int64[]" + }, + { + "default": 0, + "name": "padding", + "type": "int64[]" + }, + { + "default": 1, + "name": "dilation", + "type": "int64[]" + }, + { + "default": 1, + "name": "groups", + "type": "int64" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "weight" + }, + { + "name": "bias", + "option": "optional" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.conv3d", + "schema": { + "attributes": [ + { + "default": 1, + "name": "stride", + "type": "int64[]" + }, + { + "default": 0, + "name": "padding", + "type": "int64[]" + }, + { + "default": 1, + "name": "dilation", + "type": "int64[]" + }, + { + "default": 1, + "name": "groups", + "type": "int64" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "weight" + }, + { + "name": "bias", + "option": "optional" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "ops.quantized.add", + "schema": { + "attributes": [ + { + "name": "scale" + }, + { + "name": "zero_point" + } + ], + "inputs": [ + { + "name": "A" + }, + { + "name": "B" + } + ], + "outputs": [ + { + "name": "C" + } + ] + } + }, + { + "name": "ops.quantized.linear", + "schema": { + "attributes": [ + { + "name": "scale" + }, + { + "name": "zero_point" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "packed_params" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.dequantize", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "ops.quantized.conv2d", + "schema": { + "attributes": [ + { + "default": 1, + "name": "stride", + "type": "int64[]" + }, + { + "default": 0, + "name": "padding", + "type": "int64[]" + }, + { + "default": 1, + "name": "dilation", + "type": "int64[]" + }, + { + "default": 1, + "name": "groups", + "type": "int64" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "packed_params" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "ops.quantized.conv2d_relu", + "schema": { + "attributes": [ + { + "default": 1, + "name": "stride", + "type": "int64[]" + }, + { + "default": 0, + "name": "padding", + "type": "int64[]" + }, + { + "default": 1, + "name": "dilation", + "type": "int64[]" + }, + { + "default": 1, + "name": "groups", + "type": "int64" + }, + { + "name": "output_scale", + "type": "float64" + }, + { + "name": "output_zero_point", + "type": "int64" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "packed_weight" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.max_pool1d", + "schema": { + "attributes": [ + { + "name": "kernel_size", + "type": "int64[]" + }, + { + "name": "stride", + "type": "int64[]" + }, + { + "default": 0, + "name": "padding", + "type": "int64[]" + }, + { + "default": 1, + "name": "dilation", + "type": "int64[]" + }, + { + "default": false, + "name": "ceil_mode", + "type": "boolean" + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.max_pool2d", + "schema": { + "attributes": [ + { + "name": "kernel_size", + "type": "int64[]" + }, + { + "name": "stride", + "type": "int64[]" + }, + { + "default": 0, + "name": "padding", + "type": "int64[]" + }, + { + "default": 1, + "name": "dilation", + "type": "int64[]" + }, + { + "default": false, + "name": "ceil_mode", + "type": "boolean" + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.max_pool3d", + "schema": { + "attributes": [ + { + "name": "kernel_size", + "type": "int64[]" + }, + { + "name": "stride", + "type": "int64[]" + }, + { + "default": 0, + "name": "padding", + "type": "int64[]" + }, + { + "default": 1, + "name": "dilation", + "type": "int64[]" + }, + { + "default": false, + "name": "ceil_mode", + "type": "boolean" + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.max_pool1d_with_indices", + "schema": { + "attributes": [ + { + "name": "kernel_size", + "type": "int64[]" + }, + { + "name": "stride", + "type": "int64[]" + }, + { + "default": 0, + "name": "padding", + "type": "int64[]" + }, + { + "default": 1, + "name": "dilation", + "type": "int64[]" + }, + { + "default": false, + "name": "ceil_mode", + "type": "boolean" + }, + { + "default": false, + "name": "return_indices", + "type": "boolean" + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "indices" + } + ] + } + }, + { + "name": "torch.max_pool2d_with_indices", + "schema": { + "attributes": [ + { + "name": "kernel_size", + "type": "int64[]" + }, + { + "name": "stride", + "type": "int64[]" + }, + { + "default": 0, + "name": "padding", + "type": "int64[]" + }, + { + "default": 1, + "name": "dilation", + "type": "int64[]" + }, + { + "default": false, + "name": "ceil_mode", + "type": "boolean" + }, + { + "default": false, + "name": "return_indices", + "type": "boolean" + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "indices" + } + ] + } + }, + { + "name": "torch.adaptive_avg_pool1d", + "schema": { + "attributes": [ + { + "name": "output_size", + "type": "int64[]", + "visible": false + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.adaptive_avg_pool2d", + "schema": { + "attributes": [ + { + "name": "output_size", + "type": "int64[]", + "visible": false + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.adaptive_avg_pool3d", + "schema": { + "attributes": [ + { + "name": "output_size", + "type": "int64[]", + "visible": false + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.adaptive_max_pool1d", + "schema": { + "attributes": [ + { + "name": "output_size", + "type": "int64[]", + "visible": false + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.adaptive_max_pool2d", + "schema": { + "attributes": [ + { + "name": "output_size", + "type": "int64[]", + "visible": false + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.adaptive_max_pool3d", + "schema": { + "attributes": [ + { + "name": "output_size", + "type": "int64[]", + "visible": false + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.avg_pool1d", + "schema": { + "attributes": [ + { + "name": "kernel_size", + "type": "int64[]" + }, + { + "name": "stride", + "type": "int64[]" + }, + { + "default": 0, + "name": "padding", + "type": "int64[]" + }, + { + "default": false, + "name": "ceil_mode", + "type": "boolean" + }, + { + "default": true, + "name": "count_include_pad", + "type": "boolean" + }, + { + "name": "divisor_override" + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.avg_pool2d", + "schema": { + "attributes": [ + { + "name": "kernel_size", + "type": "int64[]" + }, + { + "name": "stride", + "type": "int64[]" + }, + { + "name": "padding", + "type": "int64[]" + }, + { + "default": false, + "name": "ceil_mode", + "type": "boolean" + }, + { + "default": true, + "name": "count_include_pad", + "type": "boolean" + }, + { + "name": "divisor_override" + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.avg_pool3d", + "schema": { + "attributes": [ + { + "name": "kernel_size", + "type": "int64[]" + }, + { + "name": "stride", + "type": "int64[]" + }, + { + "name": "padding", + "type": "int64[]" + }, + { + "default": false, + "name": "ceil_mode", + "type": "boolean" + }, + { + "default": true, + "name": "count_include_pad", + "type": "boolean" + }, + { + "name": "divisor_override", + "type": "int64" + } + ], + "category": "Pool", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.batch_norm", + "schema": { + "attributes": [ + { + "name": "training", + "type": "boolean", + "visible": false + }, + { + "default": 0.1, + "name": "momentum", + "type": "float32" + }, + { + "default": 1e-05, + "name": "eps", + "type": "float32" + }, + { + "name": "cudnn_enabled", + "type": "boolean", + "visible": false + } + ], + "category": "Normalization", + "inputs": [ + { + "name": "input" + }, + { + "name": "weight" + }, + { + "name": "bias" + }, + { + "name": "running_mean" + }, + { + "name": "running_var" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.relu_", + "schema": { + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.relu", + "schema": { + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.prelu", + "schema": { + "category": "Activation", + "inputs": [ + { + "name": "input" + }, + { + "name": "weight" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.hardtanh", + "schema": { + "attributes": [ + { + "name": "min_value", + "type": "float64" + }, + { + "name": "max_value", + "type": "float64" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.hardtanh_", + "schema": { + "attributes": [ + { + "name": "min_value", + "type": "float64" + }, + { + "name": "max_value", + "type": "float64" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.sigmoid", + "schema": { + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.sigmoid_", + "schema": { + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.threshold", + "schema": { + "attributes": [ + { + "default": 0, + "name": "threshold", + "type": "float64" + }, + { + "default": 0, + "name": "value", + "type": "float64" + }, + { + "default": false, + "name": "inplace", + "type": "boolean" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.threshold_", + "schema": { + "attributes": [ + { + "name": "threshold", + "type": "float64" + }, + { + "name": "value", + "type": "float64" + }, + { + "name": "inplace", + "type": "boolean" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.softmax", + "schema": { + "attributes": [ + { + "name": "dim", + "type": "int64" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.log_softmax", + "schema": { + "attributes": [ + { + "name": "dim", + "type": "int64" + }, + { + "name": "dtype", + "visible": false + } + ], + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.tanh", + "schema": { + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.leaky_relu", + "schema": { + "attributes": [ + { + "default": 0.01, + "name": "negative_slope" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.slice", + "schema": { + "attributes": [ + { + "default": 0, + "name": "dim", + "type": "int64" + }, + { + "default": 0, + "name": "start", + "type": "int64" + }, + { + "default": 9223372036854775807, + "name": "end", + "type": "int64" + }, + { + "default": 1, + "name": "step", + "type": "int64" + } + ], + "category": "Tensor", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.dropout", + "schema": { + "attributes": [ + { + "default": 0.5, + "name": "p", + "type": "float64" + }, + { + "name": "train", + "type": "boolean", + "visible": false + } + ], + "category": "Dropout", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.feature_dropout", + "schema": { + "attributes": [ + { + "name": "p", + "type": "float64" + }, + { + "name": "train", + "type": "boolean" + } + ], + "category": "Dropout", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.dropout_", + "schema": { + "attributes": [ + { + "default": 0.5, + "name": "p", + "type": "float64" + }, + { + "name": "train", + "type": "boolean", + "visible": false + } + ], + "category": "Dropout", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.addmm", + "schema": { + "attributes": [ + { + "default": 1, + "name": "alpha", + "type": "float64" + }, + { + "default": 1, + "name": "beta", + "type": "float64" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "mat" + }, + { + "name": "mat1" + }, + { + "name": "mat2" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.add_:Tensor", + "schema": { + "attributes": [ + { + "default": 1, + "name": "alpha", + "type": "float64" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.add_:Scalar", + "schema": { + "attributes": [ + { + "name": "other" + }, + { + "default": 1, + "name": "alpha", + "type": "float64" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.sub_:Tensor", + "schema": { + "attributes": [ + { + "default": 1, + "name": "alpha", + "type": "float64" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.sub_:Scalar", + "schema": { + "attributes": [ + { + "name": "other" + }, + { + "default": 1, + "name": "alpha", + "type": "float64" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.pow", + "schema": { + "attributes": [ + { + "name": "exponent", + "type": "float64" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.add:Tensor", + "schema": { + "attributes": [ + { + "default": 1, + "name": "alpha", + "type": "float64" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.add:Scalar", + "schema": { + "attributes": [ + { + "name": "other" + }, + { + "default": 1, + "name": "alpha", + "type": "float64" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.sub:Scalar", + "schema": { + "attributes": [ + { + "name": "other" + }, + { + "default": 1, + "name": "alpha", + "type": "float64" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.rsub:Scalar", + "schema": { + "attributes": [ + { + "name": "other" + }, + { + "default": 1, + "name": "alpha", + "type": "float64" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.sub:Tensor", + "schema": { + "attributes": [ + { + "default": 1, + "name": "alpha", + "type": "float64" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.rsub:Tensor", + "schema": { + "attributes": [ + { + "default": 1, + "name": "alpha", + "type": "float64" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.div:Tensor", + "schema": { + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.div:Scalar", + "schema": { + "attributes": [ + { + "name": "other" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.floordiv", + "schema": { + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.sum", + "schema": { + "attributes": [ + { + "name": "dim", + "type": "int64[]" + }, + { + "name": "keepdim", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.matmul", + "schema": { + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.transpose", + "schema": { + "attributes": [ + { + "name": "dim0", + "type": "int64" + }, + { + "name": "dim1", + "type": "int64" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.chunk", + "schema": { + "attributes": [ + { + "name": "chunks", + "type": "int64" + }, + { + "default": 0, + "name": "dim", + "type": "int64" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "output2" + } + ] + } + }, + { + "name": "torch.view", + "schema": { + "attributes": [ + { + "name": "size", + "type": "int64[]", + "visible": false + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.to", + "schema": { + "attributes": [ + { + "name": "dtype", + "visible": false + }, + { + "default": false, + "name": "non_blocking", + "type": "boolean" + }, + { + "default": false, + "name": "copy", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.clone", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.expand", + "schema": { + "attributes": [ + { + "default": false, + "name": "non_blocking", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.copy_", + "schema": { + "attributes": [ + { + "default": false, + "name": "non_blocking", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "src" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.reshape", + "schema": { + "attributes": [ + { + "name": "shape", + "type": "int64[]" + } + ], + "category": "Shape", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.select", + "schema": { + "attributes": [ + { + "name": "dim", + "type": "int64" + }, + { + "name": "index", + "type": "int64" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.repeat", + "schema": { + "attributes": [ + { + "name": "repeats", + "type": "int64[]" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.mean", + "schema": { + "attributes": [ + { + "name": "dim", + "type": "int64[]" + }, + { + "name": "keepdim", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.lstm:input", + "schema": { + "attributes": [ + { + "name": "has_biases", + "type": "boolean" + }, + { + "name": "num_layers", + "type": "int64" + }, + { + "name": "dropout", + "type": "float64" + }, + { + "name": "train", + "type": "boolean" + }, + { + "name": "bidirectional", + "type": "boolean" + }, + { + "name": "batch_first", + "type": "boolean" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input", + "type": "T" + }, + { + "name": "hx", + "type": "T[]" + }, + { + "name": "params", + "type": "T[]" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.lstm:data", + "schema": { + "attributes": [ + { + "name": "has_biases", + "type": "boolean" + }, + { + "name": "num_layers", + "type": "int64" + }, + { + "name": "dropout", + "type": "float64" + }, + { + "name": "train", + "type": "boolean" + }, + { + "name": "bidirectional", + "type": "boolean" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "data", + "type": "T" + }, + { + "name": "batch_sizes", + "type": "T" + }, + { + "name": "hx", + "type": "T[]" + }, + { + "name": "params", + "type": "T[]" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.gru:input", + "schema": { + "attributes": [ + { + "name": "has_biases", + "type": "boolean" + }, + { + "name": "num_layers", + "type": "int64" + }, + { + "name": "dropout", + "type": "float64" + }, + { + "name": "train", + "type": "boolean" + }, + { + "name": "bidirectional", + "type": "boolean" + }, + { + "name": "first_batch", + "type": "boolean" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input", + "type": "T" + }, + { + "name": "hx", + "type": "T" + }, + { + "name": "params", + "type": "T[]" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.gru:data", + "schema": { + "attributes": [ + { + "name": "has_biases", + "type": "boolean" + }, + { + "name": "num_layers", + "type": "int64" + }, + { + "name": "dropout", + "type": "float64" + }, + { + "name": "train", + "type": "boolean" + }, + { + "name": "bidirectional", + "type": "boolean" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "data", + "type": "T" + }, + { + "name": "batch_sizes", + "type": "T" + }, + { + "name": "hx", + "type": "T" + }, + { + "name": "params", + "type": "T[]" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.contiguous", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.squeeze", + "schema": { + "attributes": [ + { + "name": "dim", + "type": "int64" + } + ], + "category": "Transform", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.unsqueeze", + "schema": { + "attributes": [ + { + "name": "dim", + "type": "int64" + } + ], + "category": "Transform", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.max", + "schema": { + "attributes": [ + { + "name": "dim_or_y", + "type": "int64" + }, + { + "name": "dim", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.min", + "schema": { + "attributes": [ + { + "name": "dim_or_y", + "type": "int64" + }, + { + "name": "dim", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.cat", + "schema": { + "attributes": [ + { + "default": 0, + "name": "dim", + "type": "int64" + } + ], + "category": "Tensor", + "inputs": [ + { + "name": "inputs", + "type": "T[]" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.stack", + "schema": { + "attributes": [ + { + "default": 0, + "name": "dim", + "type": "int64" + } + ], + "category": "Tensor", + "inputs": [ + { + "name": "inputs", + "type": "T[]" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.expand_as", + "schema": { + "inputs": [ + { + "name": "inputs" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.permute", + "schema": { + "attributes": [ + { + "name": "dims", + "type": "int64[]" + } + ], + "category": "Shape", + "inputs": [ + { + "name": "inputs" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.embedding", + "schema": { + "attributes": [ + { + "name": "padding_idx", + "type": "int64" + }, + { + "default": false, + "name": "scale_grad_by_freq", + "type": "boolean" + }, + { + "default": false, + "name": "sparse", + "type": "boolean" + } + ], + "category": "Transform", + "inputs": [ + { + "name": "inputs" + }, + { + "name": "weight" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.flatten", + "schema": { + "attributes": [ + { + "name": "start_dim", + "type": "int64" + }, + { + "name": "end_dim", + "type": "int64" + } + ], + "category": "Shape", + "inputs": [ + { + "name": "inputs" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch._pack_padded_sequence", + "schema": { + "attributes": [ + { + "name": "batch_first", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "lengths" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch._pad_packed_sequence", + "schema": { + "attributes": [ + { + "name": "batch_first", + "type": "boolean" + }, + { + "name": "padding_value", + "type": "float32" + }, + { + "name": "total_length", + "type": "int64" + } + ], + "inputs": [ + { + "name": "data" + }, + { + "name": "batch_sizes" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.quantize_per_tensor", + "schema": { + "attributes": [ + { + "name": "scale", + "type": "float32" + }, + { + "name": "zero_point", + "type": "float32" + }, + { + "name": "dtype" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.floor_divide", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.index", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch._cast_Byte", + "schema": { + "attributes": [ + { + "default": false, + "name": "non_blocking", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch._cast_Char", + "schema": { + "attributes": [ + { + "default": false, + "name": "non_blocking", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch._cast_Short", + "schema": { + "attributes": [ + { + "default": false, + "name": "non_blocking", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch._cast_Int", + "schema": { + "attributes": [ + { + "default": false, + "name": "non_blocking", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch._cast_Long", + "schema": { + "attributes": [ + { + "default": false, + "name": "non_blocking", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch._cast_Half", + "schema": { + "attributes": [ + { + "default": false, + "name": "non_blocking", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch._cast_Float", + "schema": { + "attributes": [ + { + "default": false, + "name": "non_blocking", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch._cast_Double", + "schema": { + "attributes": [ + { + "default": false, + "name": "non_blocking", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.norm", + "schema": { + "attributes": [ + { + "name": "dim" + }, + { + "name": "p" + }, + { + "default": false, + "name": "keepdim", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.elu_", + "schema": { + "attributes": [ + { + "default": 1, + "name": "alpha" + }, + { + "default": 1, + "name": "scale" + }, + { + "default": 1, + "name": "input_scale" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.pixel_shuffle", + "schema": { + "attributes": [ + { + "name": "upscale_factor", + "type": "int64" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.nonzero", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.neg", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.rsqrt", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.grid_sampler", + "schema": { + "attributes": [ + { + "name": "interpolation_mode", + "type": "int64" + }, + { + "name": "padding_mode", + "type": "int64" + }, + { + "name": "align_corners", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + }, + { + "name": "grid" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.sort", + "schema": { + "attributes": [ + { + "default": -1, + "name": "dim", + "type": "int64" + }, + { + "default": false, + "name": "descending", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "values" + }, + { + "name": "indices" + } + ] + } + }, + { + "name": "torch.gt:Tensor", + "schema": { + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.gt:Scalar", + "schema": { + "attributes": [ + { + "name": "other" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.mul:Tensor", + "schema": { + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.mul:Scalar", + "schema": { + "attributes": [ + { + "name": "other" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.mul_:Tensor", + "schema": { + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.mul_:Scalar", + "schema": { + "attributes": [ + { + "name": "other" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.div_:Tensor", + "schema": { + "inputs": [ + { + "name": "input" + }, + { + "name": "other" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.div_:Scalar", + "schema": { + "attributes": [ + { + "name": "other" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.bmm", + "schema": { + "inputs": [ + { + "name": "input" + }, + { + "name": "mat2" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.constant_pad_nd", + "schema": { + "attributes": [ + { + "name": "pad", + "type": "int64[]" + }, + { + "default": 0, + "name": "value" + } + ], + "category": "Tensor", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.ones", + "schema": { + "attributes": [ + { + "name": "size", + "type": "int64[]" + }, + { + "name": "dtype" + }, + { + "name": "layout" + }, + { + "name": "device" + }, + { + "name": "pin_memory" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.zeros", + "schema": { + "attributes": [ + { + "name": "size", + "type": "int64[]" + }, + { + "name": "dtype", + "option": "optional" + }, + { + "name": "layout", + "option": "optional" + }, + { + "name": "device", + "option": "optional" + }, + { + "name": "pin_memory", + "option": "optional" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.zeros_like", + "schema": { + "attributes": [ + { + "name": "dtype" + }, + { + "name": "layout" + }, + { + "name": "device" + }, + { + "name": "pin_memory", + "type": "boolean" + }, + { + "name": "memory_format" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.clamp", + "schema": { + "attributes": [ + { + "name": "min" + }, + { + "name": "max" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.floor", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.abs_", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.leaky_relu_", + "schema": { + "attributes": [ + { + "default": 0.01, + "name": "negative_slope" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.instance_norm", + "schema": { + "attributes": [ + { + "name": "use_input_stats", + "type": "boolean" + }, + { + "name": "momentum", + "type": "float32" + }, + { + "name": "eps", + "type": "float32" + }, + { + "name": "cudnn_enabled", + "type": "boolean" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "input" + }, + { + "name": "weight", + "option": "optional" + }, + { + "name": "bias", + "option": "optional" + }, + { + "name": "running_mean", + "option": "optional" + }, + { + "name": "running_var", + "option": "optional" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.reflection_pad1d", + "schema": { + "attributes": [ + { + "name": "padding", + "type": "int64[]" + } + ], + "category": "Tensor", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.replication_pad1d", + "schema": { + "attributes": [ + { + "name": "padding", + "type": "int64[]" + } + ], + "category": "Tensor", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.replication_pad2d", + "schema": { + "attributes": [ + { + "name": "padding", + "type": "int64[]" + } + ], + "category": "Tensor", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.replication_pad3d", + "schema": { + "attributes": [ + { + "name": "padding", + "type": "int64[]" + } + ], + "category": "Tensor", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.reflection_pad2d", + "schema": { + "attributes": [ + { + "name": "padding", + "type": "int64[]" + } + ], + "category": "Tensor", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.reflection_pad3d", + "schema": { + "attributes": [ + { + "name": "padding", + "type": "int64[]" + } + ], + "category": "Tensor", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.rnn_relu:input", + "schema": { + "attributes": [ + { + "name": "has_biases", + "type": "boolean" + }, + { + "name": "num_layers", + "type": "int64" + }, + { + "name": "dropout", + "type": "float32" + }, + { + "name": "train", + "type": "boolean" + }, + { + "name": "bidirectional", + "type": "boolean" + }, + { + "name": "batch_first", + "type": "boolean" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "hx" + }, + { + "name": "params", + "type": "T[]" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.rnn_tanh:input", + "schema": { + "attributes": [ + { + "name": "has_biases", + "type": "boolean" + }, + { + "name": "num_layers", + "type": "int64" + }, + { + "name": "dropout", + "type": "float32" + }, + { + "name": "train", + "type": "boolean" + }, + { + "name": "bidirectional", + "type": "boolean" + }, + { + "name": "batch_first", + "type": "boolean" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + }, + { + "name": "hx" + }, + { + "name": "params", + "type": "T[]" + } + ], + "outputs": [ + { + "name": "output" + }, + { + "name": "?" + } + ] + } + }, + { + "name": "torch.detach", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.upsample_bilinear2d", + "schema": { + "attributes": [ + { + "name": "output_size", + "type": "int64[]" + }, + { + "name": "align_corners", + "type": "boolean" + }, + { + "name": "scales_h", + "type": "float32" + }, + { + "name": "scales_w", + "type": "float32" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.upsample_nearest2d", + "schema": { + "attributes": [ + { + "name": "output_size", + "type": "int64[]" + }, + { + "name": "scales_h", + "type": "float32" + }, + { + "name": "scales_w", + "type": "float32" + } + ], + "category": "Layer", + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.size", + "schema": { + "attributes": [ + { + "name": "dim", + "type": "int64" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output", + "type": "int64" + } + ] + } + }, + { + "name": "torch.exp", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.reciprocal", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.ones_like", + "schema": { + "attributes": [ + { + "name": "dtype" + }, + { + "name": "layout" + }, + { + "name": "device" + }, + { + "name": "pin_memory", + "type": "boolean" + }, + { + "name": "memory_format" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.arange", + "schema": { + "attributes": [ + { + "name": "dtype" + }, + { + "name": "layout" + }, + { + "name": "device" + }, + { + "name": "pin_memory", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.sqrt", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.erf", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.argmin", + "schema": { + "attributes": [ + { + "name": "dim", + "type": "int64" + }, + { + "default": false, + "name": "keepdim", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.argmax", + "schema": { + "attributes": [ + { + "name": "dim", + "type": "int64" + }, + { + "default": false, + "name": "keepdim", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + }, + { + "name": "torch.log", + "schema": { + "inputs": [ + { + "name": "input" + } + ], + "outputs": [ + { + "name": "output" + } + ] + } + } +] diff --git a/frontend/packages/core/public/netron/pytorch.js b/frontend/packages/core/public/netron/pytorch.js new file mode 100644 index 00000000..0f2898db --- /dev/null +++ b/frontend/packages/core/public/netron/pytorch.js @@ -0,0 +1,3222 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental + +var pytorch = pytorch || {}; +var base = base || require('./base'); +var long = long || { Long: require('long') }; + +pytorch.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if ([ 'pth', 'pt', 'pt1', 'pkl', 'bin', 'model', 'h5', 'pb', 't7', 'dms', 'ckpt', 'zip' ].indexOf(extension) !== -1 || identifier.toLowerCase().endsWith('.tar')) { + if (pytorch.Container.open(context)) { + return true; + } + } + return false; + } + + open(context, host) { + const identifier = context.identifier; + return host.require('./pickle').then((pickle) => { + return host.require('./python').then((python) => { + return pytorch.Metadata.open(host).then((metadata) => { + try { + const container = pytorch.Container.open(context, metadata, pickle, python, (error, fatal) => { + const message = error && error.message ? error.message : error.toString(); + host.exception(new pytorch.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."), fatal); + }); + return new pytorch.Model(metadata, container); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new pytorch.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + }); + }); + } +}; + +pytorch.Model = class { + + constructor(metadata, container) { + this._format = container.format; + this._producer = container.producer || ''; + this._graphs = [ new pytorch.Graph(metadata, container) ]; + } + + get format() { + return this._format; + } + + get graphs() { + return this._graphs; + } +}; + +pytorch.Graph = class { + + constructor(metadata, container) { + this._nodes = []; + this._inputs = []; + this._outputs = []; + this._groups = true; + this._littleEndian = container.littleEndian; + + if (container.format.startsWith('TorchScript ')) { + this._name = container.name; + const traced = container.trace(); + const initializers = new Map(); + if (container.data) { + const queue = [ container.data ]; + while (queue.length > 0) { + const module = queue.shift(); + for (const key of Object.keys(module)) { + if (key !== '__module__' && key !== '__name__' && key !== '__parent__') { + const obj = module[key]; + if (!Array.isArray(obj) && obj === Object(obj)) { + if (pytorch.Utility.isTensor(obj)) { + const parameter = obj; + parameter.__parent__ = module; + if (!parameter.initializer && parameter.storage) { + parameter.initializer = new pytorch.Tensor(parameter.name, parameter, true); + } + if (parameter.__variable__ && parameter.__count__ === 1) { + initializers.set(parameter.__variable__, parameter); + } + } + else if (obj && obj.__module__ && obj.__name__) { + obj.__parent__ = module; + if (!obj.__id__) { + obj.__id__ = key; + } + queue.push(obj); + } + } + } + } + } + } + + if (traced) { + if (container.inputs) { + for (const input of container.inputs) { + this._inputs.push(new pytorch.Parameter(input, true, [ + new pytorch.Argument(input, null, null) + ])); + } + } + if (container.outputs) { + for (const output of container.outputs) { + this._outputs.push(new pytorch.Parameter(output, true, [ + new pytorch.Argument(output, null, null) + ])); + } + } + if (container.nodes) { + for (const node of container.nodes) { + const item = { + type: node.type, + node: node + }; + this._nodes.push(new pytorch.Node(metadata, '', item, initializers)); + } + } + } + + if (container.data) { + this._loadScriptModule(metadata, container, container.data, initializers); + } + } + else if (container.data) { + const data = container.data; + this._type = (data.__module__ && data.__name__) ? (data.__module__ + '.' + data.__name__) : ''; + const input = 'data'; + this._inputs.push(new pytorch.Parameter(input, true, [ new pytorch.Argument(input, null, null) ])); + const outputs = this._loadModule(metadata, container.data, [], [ input ]); + for (const output of outputs) { + this._outputs.push(new pytorch.Parameter(output, true, [ new pytorch.Argument(output, null, null) ])); + } + } + else if (container.state) { + for (const state_group of container.state) { + const attributes = state_group.attributes || []; + const inputs = state_group.states.map((state) => { + const tensor = new pytorch.Tensor(state.id, state.value, this._littleEndian); + const visible = state_group.states.length === 0 || tensor.type.toString() !== 'int64' || tensor.value < 1000; + return new pytorch.Parameter(state.name, visible, [ + new pytorch.Argument(state.id, null, tensor) + ]); + }); + const obj = { + name: state_group.name, + type: 'torch.nn.Module', + attributes: attributes, + inputs: inputs, + outputs: [] + }; + this._nodes.push(new pytorch.Node(metadata, '', obj, null)); + } + } + } + + _loadModule(metadata, parent, groups, inputs) { + + if (parent.__module__ && + !parent.__module__ === 'torch.nn.modules.container' && + (!parent._modules || parent._modules.length == 0)) { + this._createNode(groups, '', parent, inputs); + return []; + } + + if (!parent._modules) { + throw new pytorch.Error('Module does not contain modules.'); + } + + for (const module of parent._modules) { + const key = module[0]; + const value = module[1]; + if (module && value) { + const type = value.__module__ + '.' + value.__name__; + switch (type) { + case 'torch.nn.modules.container.Sequential': + groups.push(key); + inputs = this._loadModule(metadata, value, groups, inputs); + groups.pop(key); + break; + case 'torchvision.models.densenet._Transition': + case 'torchvision.models.resnet.Bottleneck': + case 'torchvision.models.densenet._DenseBlock': + case 'torchvision.models.densenet._DenseLayer': + case 'torchvision.models.inception.BasicConv2d': + case 'torchvision.models.inception.InceptionAux': + case 'torchvision.models.inception.InceptionA': + case 'torchvision.models.inception.InceptionB': + case 'torchvision.models.inception.InceptionC': + case 'torchvision.models.inception.InceptionD': + case 'torchvision.models.inception.InceptionE': { + groups.push(key); + const node = this._createNode(metadata, groups, key, value, inputs, this._littleEndian); + inputs = [ node.name ]; + groups.pop(key); + break; + } + default: { + const node = this._createNode(metadata, groups, key, value, inputs); + inputs = [ node.name ]; + break; + } + } + } + } + return inputs; + } + + _createNode(metadata, groups, key, obj, args) { + + const type = obj.__module__ + '.' + obj.__name__; + const schema = metadata.type(type); + + let inputSchema = [ { name: 'input'} ]; + if (schema && schema.inputs && schema.inputs.length > 0) { + inputSchema = schema.inputs.slice(); + } + + let inputs = []; + inputs.push(new pytorch.Parameter(inputSchema.shift().name, true, args.map((argument) => { + return new pytorch.Argument(argument, null, null); + }))); + + const parameters = obj._parameters || obj._buffers || []; + for (const parameter of parameters) { + const key = parameter[0]; + const value = parameter[1]; + let visible = true; + let inputName = ''; + if (inputSchema.length > 0) { + const input = inputSchema.shift(); + inputName = input.name; + visible = input.visible === false ? false : true; + } + if (parameter && value && (value.data || value.storage)) { + let initializer = null; + if (value.data) { + initializer = new pytorch.Tensor('', value.data, this._littleEndian); + } + else if (value.storage) { + initializer = new pytorch.Tensor('', value, this._littleEndian); + } + inputs.push(new pytorch.Parameter(inputName || key, visible, [ new pytorch.Argument('', null, initializer) ])); + } + } + + const group = groups.join('/'); + const name = group ? (group + '/' + key) : key; + + const outputs = [ new pytorch.Parameter('output', true, [ new pytorch.Argument(name, null, null) ]) ]; + + const attributes = []; + for (const name of Object.keys(obj)) { + if (!name.startsWith('_')) { + attributes.push({ name: name, value: obj[name] }); + } + } + const item = { + name: name, + type: type, + attributes: attributes, + inputs: inputs, + outputs: outputs + }; + const node = new pytorch.Node(metadata, group, item, {}); + this._nodes.push(node); + return node; + } + + _loadScriptModule(metadata, container, module, initializers) { + if (module) { + if (pytorch.Graph._getParameters(module).length > 0 && !module.__hide__) { + const item = { module: module }; + this._nodes.push(new pytorch.Node(metadata, '', item, initializers)); + } + const submodules = pytorch.Graph._getSubmodules(module); + for (const submodule of submodules) { + this._loadScriptModule(metadata, container, submodule, initializers); + } + } + } + + static _getParameters(module) { + const parameters = []; + if (module && module.__module__ && module.__name__) { + for (const key of Object.keys(module)) { + if (pytorch.Utility.isTensor(module[key])) { + const parameter = module[key]; + parameter.__id__ = key; + parameters.push(parameter); + } + } + } + return parameters; + } + + static _getSubmodules(module) { + const submodules = []; + if (module && module.__module__ && module.__name__) { + for (const key of Object.keys(module)) { + if (!key.startsWith('__')) { + const value = module[key]; + if (value && value.__module__ && value.__name__ && !pytorch.Utility.isTensor(value)) { + submodules.push(value); + } + } + } + } + return submodules; + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get groups() { + return this._groups; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +pytorch.Parameter = class { + + constructor(name, visible, args) { + this._name = name; + this._visible = visible; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get arguments() { + return this._arguments; + } +}; + +pytorch.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new pytorch.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type; + this._initializer = initializer; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +pytorch.Node = class { + + constructor(metadata, group, item, initializers) { + this._metadata = metadata; + this._group = group || ''; + this._name = item.name || ''; + + if (!item.module && !item.node) { + this._type = item.type; + this._inputs = item.inputs; + this._outputs = item.outputs; + this._attributes = item.attributes.map((attribute) => { + const schema = metadata.attribute(this._type, attribute.name); + return new pytorch.Attribute(schema, attribute.name, attribute.value); + }); + } + else { + this._attributes = []; + this._inputs = []; + this._outputs = []; + + let module = item.module; + if (module) { + this._type = 'torch.nn.modules.module.Module'; + for (const parameter of pytorch.Graph._getParameters(module)) { + this._inputs.push(new pytorch.Parameter(parameter.__id__, true, [ + new pytorch.Argument('', null, parameter.initializer || null) + ])); + if (parameter.__variable__) { + this._outputs.push(new pytorch.Parameter(parameter.__id__, true, [ + new pytorch.Argument(parameter.__variable__, null, null) + ])); + } + } + } + + if (item.node) { + this._type = item.type; + const schema = metadata.type(this._type); + module = null; + let match = true; + let count = 0; + for (const input of item.node.inputs) { + for (const argument of input) { + const parameter = initializers.get(argument.id); + if (parameter) { + if (parameter.__parent__ && (module == null || module == parameter.__parent__)) { + module = parameter.__parent__; + count++; + } + else { + match = false; + break; + } + } + } + if (!match) { + break; + } + } + if (module) { + const params = pytorch.Graph._getParameters(module).filter((p) => p.__id__ !== 'num_batches_tracked'); + if (params.length == count && match) { + module.__hide__ = true; + for (const input of item.node.inputs) { + for (const argument of input) { + const parameter = initializers.get(argument.id); + if (parameter && parameter.initializer) { + argument.initializer = parameter.initializer; + } + } + } + } + else { + module = null; + } + } + + for (let inputIndex = 0; inputIndex < item.node.inputs.length; inputIndex++) { + let inputName = inputIndex.toString(); + if (schema && schema.inputs && schema.inputs.length > inputIndex) { + inputName = schema.inputs[inputIndex].name; + } + this._inputs.push(new pytorch.Parameter(inputName, true, + item.node.inputs[inputIndex].map((input) => new pytorch.Argument(input.id, null, input.initializer || null)) + )); + } + + for (let outputIndex = 0; outputIndex < item.node.outputs.length; outputIndex++) { + let outputName = outputIndex.toString(); + if (schema && schema.outputs && schema.outputs.length > outputIndex) { + outputName = schema.outputs[outputIndex].name; + } + this._outputs.push(new pytorch.Parameter(outputName, true, [ + new pytorch.Argument(item.node.outputs[outputIndex], null, null) + ])); + } + + for (let i = 0; i < item.node.attributes.length; i++) { + let attributeSchema = null; + let name = i.toString(); + let value = item.node.attributes[i]; + if (value && value.type === '=' && value.target.type == 'id') { + name = value.target.value; + value = value.expression; + attributeSchema = metadata.attribute(this._type, name); + } + else if (schema && schema.attributes && schema.attributes.length > i) { + attributeSchema = schema.attributes[i]; + name = attributeSchema.name; + } + this._attributes.push(new pytorch.Attribute(attributeSchema, name, value)); + } + } + if (module) { + if (module.__id__) { + let current = module; + this._name = current.__id__; + while (current.__parent__ != null) { + current = current.__parent__; + if (!current.__parent__ && !current.__id__) { + break; + } + this._name = [ current.__id__, this._name ].join('.'); + } + } + } + } + } + + get name() { + return this._name; + } + + get group() { + return this._group; + } + + get type() { + const index = this._type.indexOf(':'); + return index === -1 ? this._type : this._type.substring(0, index); + } + + get metadata() { + return this._metadata.type(this._type); + } + + get function() { + return this._type.startsWith('torch.nn.modules.') && this._type !== 'torch.nn.modules.module.Module'; + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } +}; + +pytorch.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + this._value = value; + + if (this._name === 'training') { + this._visible = false; + this._type = 'boolean'; + return; + } + + if (value && value.type) { + switch (value.type) { + case 'number': + this._value = value.value; + break; + case 'string': + this._value = value.value; + break; + case 'boolean': + this._value = value.value; + break; + case 'id': + this._value = value.value; + break; + } + } + + if (schema) { + if (Object.prototype.hasOwnProperty.call(schema, 'type')) { + this._type = schema.type; + } + + switch (this._type) { + case 'boolean': + if (this._value == 'False') { + this._value = false; + } + else if (this._value == 'True') { + this._value = true; + } + break; + case 'int32': + case 'int64': + if (typeof this._value !== 'number') { + if (typeof this._value === 'string') { + this._value = parseInt(this._value, 10); + } + } + break; + case 'float32': + case 'float64': + if (typeof this._value !== 'number') { + if (typeof this._value === 'string') { + this._value = parseFloat(this._value); + } + } + break; + case 'int32[]': + case 'int64[]': { + switch (this._value.type) { + case 'list': + this._value = this._value.value.map((item) => { + if (item.type === 'number') { + const number = parseInt(item.value, 10); + if (!Number.isNaN(item.value - number)) { + return number; + } + } + return item; + }); + break; + } + break; + } + } + + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + if (JSON.stringify(schema.default) == JSON.stringify(this._value)) { + this._visible = false; + } + else if (Array.isArray(this._value) && + !Array.isArray(schema.default) && + this.value.every((item) => item == schema.default)) { + this._visible = false; + } + } + } + + if (Array.isArray(value) && value.length > 0 && value.every((obj) => obj && obj.__module__ && obj.__module__.startsWith('torch.nn'))) { + this._value = '?'; + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +pytorch.Tensor = class { + + constructor(name, tensor, littleEndian) { + this._name = name || ''; + this._type = new pytorch.TensorType(tensor.storage.dataType, new pytorch.TensorShape(tensor.size)); + this._data = tensor.storage.data; + this._littleEndian = littleEndian; + } + + get kind() { + return 'Tensor'; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return pytorch.Tensor._stringify(value, '', ' '); + } + + _context() { + const context = {}; + context.state = null; + context.index = 0; + context.count = 0; + + if (!this._type.dataType) { + context.state = 'Tensor has no data type.'; + return context; + } + switch (this._type.dataType) { + case 'uint8': + case 'qint8': + case 'int8': + case 'int16': + case 'int32': + case 'int64': + case 'float16': + case 'float32': + case 'float64': + break; + default: + context.state = "Tensor data type '" + this._type.dataType + "' is not supported."; + return context; + } + if (!this._type.shape) { + context.state = 'Tensor has no dimensions.'; + return context; + } + if (!this._data) { + context.state = 'Tensor data is empty.'; + return context; + } + + context.data = this._data; + context.dataType = this._type.dataType; + context.dimensions = this._type.shape.dimensions; + context.dataView = new DataView(context.data.buffer, context.data.byteOffset, context.data.byteLength); + return context; + } + + _decode(context, dimension) { + const results = []; + const dimensions = (context.dimensions.length == 0) ? [ 1 ] : context.dimensions; + const size = dimensions[dimension]; + if (dimension == dimensions.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (context.dataType) { + case 'uint8': + results.push(context.dataView.getUint8(context.index, this._littleEndian)); + context.index++; + context.count++; + break; + case 'qint8': + case 'int8': + results.push(context.dataView.getInt8(context.index, this._littleEndian)); + context.index++; + context.count++; + break; + case 'int16': + results.push(context.dataView.getInt16(context.index, this._littleEndian)); + context.index += 2; + context.count++; + break; + case 'int32': + results.push(context.dataView.getInt32(context.index, this._littleEndian)); + context.index += 4; + context.count++; + break; + case 'int64': + results.push(new long.Long(context.dataView.getUint32(context.index, true), context.dataView.getUint32(context.index + 4, true), false)); + context.index += 8; + context.count++; + break; + case 'float16': + results.push(context.dataView.getFloat16(context.index, this._littleEndian)); + context.index += 2; + context.count++; + break; + case 'float32': + results.push(context.dataView.getFloat32(context.index, this._littleEndian)); + context.index += 4; + context.count++; + break; + case 'float64': + results.push(context.dataView.getFloat64(context.index, this._littleEndian)); + context.index += 8; + context.count++; + break; + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + if (context.dimensions.length == 0) { + return results[0]; + } + return results; + } + + static _stringify(value, indentation, indent) { + if (Array.isArray(value)) { + const result = []; + result.push(indentation + '['); + const items = value.map((item) => pytorch.Tensor._stringify(item, indentation + indent, indent)); + if (items.length > 0) { + result.push(items.join(',\n')); + } + result.push(indentation + ']'); + return result.join('\n'); + } + if (value && long.Long.isLong(value)) { + return indentation + value.toString(); + } + if (typeof value == 'string') { + return indentation + value; + } + if (value == Infinity) { + return indentation + 'Infinity'; + } + if (value == -Infinity) { + return indentation + '-Infinity'; + } + if (isNaN(value)) { + return indentation + 'NaN'; + } + return indentation + value.toString(); + } +}; + +pytorch.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +pytorch.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions || []; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (this._dimensions && this._dimensions.length > 0) { + return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']'; + } + return ''; + } +}; + +pytorch.Metadata = class { + + static open(host) { + if (pytorch.Metadata._metadata) { + return Promise.resolve(pytorch.Metadata._metadata); + } + else { + return host.request(null, 'pytorch-metadata.json', 'utf-8').then((data) => { + pytorch.Metadata._metadata = new pytorch.Metadata(data); + return pytorch.Metadata._metadata; + }).catch(() => { + pytorch.Metadata._metadata = new pytorch.Metadata(null); + return pytorch.Metadata._metadata; + }); + } + } + + constructor(data) { + this._map = new Map(); + this._attributeCache = new Map(); + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map.set(item.name, item.schema); + } + const index = item.name.indexOf(':'); + if (index !== -1) { + const name = item.name.substring(0, index); + if (!this._map.has(name)) { + this._map.set(name, []); + } + this._map.get(name).push(item.name); + } + } + } + } + } + + type(name) { + const schema = this._map.get(name); + if (schema) { + return Array.isArray(schema) ? schema.map((name) => this._map.get(name)) : schema; + } + return null; + } + + attribute(type, name) { + const attributeName = type + ':' + name; + if (!this._attributeCache.has(attributeName)) { + this._attributeCache.set(attributeName, null); + const schema = this.type(type); + if (schema && schema.attributes) { + for (const attribute of schema.attributes) { + this._attributeCache.set(type + ':' + attribute.name, attribute); + } + } + } + return this._attributeCache.get(attributeName); + } +}; + +pytorch.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'Error loading PyTorch model.'; + } +}; + +pytorch.Execution = class { + + constructor(python, sources, exceptionCallback) { + const self = this; + this._python = python; + this._sources = sources; + this._exceptionCallback = exceptionCallback; + this._utf8Decoder = new TextDecoder('utf-8'); + this._unknownNameMap = new Set(); + this._knownPackageMap = new Set([ 'torch', 'torchvision', 'collections', '__builtin__', '_codecs', 'argparse', 'numpy' ]); + this._packages = new Map(); + this._context = new pytorch.Execution.Context(); + this._context.scope.builtins = {}; + this._context.scope.builtins.type = { __module__: 'builtins', __name__: 'type' }; + this._context.scope.builtins.module = { __module__: 'builtins', __name__: 'module', __class__: this._context.scope.builtins.type }; + this._context.scope.builtins.function = { __module__: 'builtins', __name__: 'function', __class__: this._context.scope.builtins.type }; + this._context.scope.builtins.method = { __module__: 'builtins', __name__: 'method', __class__: this._context.scope.builtins.type }; + this._context.scope.builtins.dict = { __module__: 'builtins', __name__: 'dict', __class__: this._context.scope.builtins.type }; + this._context.scope.builtins.list = { __module__: 'builtins', __name__: 'list', __class__: this._context.scope.builtins.type }; + this._context.scope.builtins.str = { __module__: 'builtins', __name__: 'str', __class__: this._context.scope.builtins.type }; + this._context.scope.builtins.tuple = { __module__: 'builtins', __name__: 'tuple', __class__: this._context.scope.builtins.type }; + this._context.scope.typing = { __name__: 'typing', __class__: this._context.scope.builtins.module }; + this._context.scope.typing._GenericAlias = { __module__: 'typing', __name__: '_GenericAlias', __class__: this._context.scope.builtins.type }; + this._context.scope.typing._SpecialForm = { __module__: 'typing', __name__: '_SpecialForm', __class__: this._context.scope.builtins.type }; + this._context.scope.typing._VariadicGenericAlias = { __module__: 'typing', __name__: '_VariadicGenericAlias', __class__: this._context.scope.builtins.type }; + this._context.scope.typing.Dict = { __module__: 'typing', __name__: 'Dict', __class__: this._context.scope.typing._VariadicGenericAlias, __origin__: this._context.scope.builtins.dict }; + this._context.scope.typing.List = { __module__: 'typing', __name__: 'List', __class__: this._context.scope.typing._GenericAlias, __origin__: this._context.scope.builtins.list }; + this._context.scope.typing.Optional = { __module__: 'typing', __class__: this._context.scope.typing._SpecialForm }; + this._context.scope.typing.Tuple = { __module__: 'typing', __name__: 'Tuple', __class__: this._context.scope.typing._GenericAlias, __origin__: this._context.scope.builtins.tuple }; + this._context.scope.torch = { __name__: 'torch', __class__: this._context.scope.builtins.module }; + this._context.scope.torch.Tensor = { __module__: 'torch', __name__: 'Tensor', __class__: this._context.scope.builtins.type }; + this._registerConstructor('argparse.Namespace', function (args) { + this.args = args; + }); + this._registerConstructor('torch.autograd.variable.Variable', function() {}); + this._registerConstructor('torch.backends.cudnn.rnn.Unserializable', function() {}); + this._registerConstructor('torch.device', function(type, index) { + this.type = type; + this.index = index; + }); + this._registerConstructor('torch.distributions.multivariate_normal.MultivariateNormal', function() {}); + this._registerConstructor('torch.nn.backends.thnn._get_thnn_function_backend', function() {}); + this._registerConstructor('torch.nn.intrinsic.modules.fused.ConvReLU2d', function() {}); + this._registerConstructor('torch.nn.modules.activation.CELU', function() {}); + this._registerConstructor('torch.nn.modules.activation.ELU', function() {}); + this._registerConstructor('torch.nn.modules.activation.GELU', function() {}); + this._registerConstructor('torch.nn.modules.activation.GLU', function() {}); + this._registerConstructor('torch.nn.modules.activation.Hardtanh', function() {}); + this._registerConstructor('torch.nn.modules.activation.LeakyReLU', function() {}); + this._registerConstructor('torch.nn.modules.activation.LogSigmoid', function() {}); + this._registerConstructor('torch.nn.modules.activation.LogSoftmax', function() {}); + this._registerConstructor('torch.nn.modules.activation.MultiheadAttention', function() {}); + this._registerConstructor('torch.nn.modules.activation.ReLU', function() {}); + this._registerConstructor('torch.nn.modules.activation.ReLU6', function() {}); + this._registerConstructor('torch.nn.modules.activation.PReLU', function() {}); + this._registerConstructor('torch.nn.modules.activation.RReLU', function() {}); + this._registerConstructor('torch.nn.modules.activation.SELU', function() {}); + this._registerConstructor('torch.nn.modules.activation.Sigmoid', function() {}); + this._registerConstructor('torch.nn.modules.activation.Softmax', function() {}); + this._registerConstructor('torch.nn.modules.activation.Softmax2d', function() {}); + this._registerConstructor('torch.nn.modules.activation.Softplus', function() {}); + this._registerConstructor('torch.nn.modules.activation.Tanh', function() {}); + this._registerConstructor('torch.nn.modules.activation.Threshold', function() {}); + this._registerConstructor('torch.nn.modules.batchnorm.BatchNorm1d', function() {}); + this._registerConstructor('torch.nn.modules.batchnorm.BatchNorm2d', function() {}); + this._registerConstructor('torch.nn.modules.batchnorm.BatchNorm3d', function() {}); + this._registerConstructor('torch.nn.modules.batchnorm.SyncBatchNorm', function() {}); + this._registerConstructor('torch.nn.modules.container.ModuleDict', function() {}); + this._registerConstructor('torch.nn.modules.container.ModuleList', function() {}); + this._registerConstructor('torch.nn.modules.container.ParameterList', function() {}); + this._registerConstructor('torch.nn.modules.container.Sequential', function() {}); + this._registerConstructor('torch.nn.modules.conv.Conv1d', function() {}); + this._registerConstructor('torch.nn.modules.conv.Conv2d', function() {}); + this._registerConstructor('torch.nn.modules.conv.Conv3d', function() {}); + this._registerConstructor('torch.nn.modules.conv.ConvTranspose1d', function() {}); + this._registerConstructor('torch.nn.modules.conv.ConvTranspose2d', function() {}); + this._registerConstructor('torch.nn.modules.conv.ConvTranspose3d', function() {}); + this._registerConstructor('torch.nn.modules.distance.CosineSimilarity', function() {}); + this._registerConstructor('torch.nn.modules.dropout.Dropout', function() {}); + this._registerConstructor('torch.nn.modules.dropout.Dropout2d', function() {}); + this._registerConstructor('torch.nn.modules.dropout.Dropout3d', function() {}); + this._registerConstructor('torch.nn.modules.fold.Unfold', function() {}); + this._registerConstructor('torch.nn.modules.flatten.Flatten', function() {}); + this._registerConstructor('torch.nn.modules.instancenorm.InstanceNorm1d', function() {}); + this._registerConstructor('torch.nn.modules.instancenorm.InstanceNorm2d', function() {}); + this._registerConstructor('torch.nn.modules.instancenorm.InstanceNorm3d', function() {}); + this._registerConstructor('torch.nn.modules.linear.Linear', function() {}); + this._registerConstructor('torch.nn.modules.linear.Identity', function() {}); + this._registerConstructor('torch.nn.modules.loss.BCELoss', function() {}); + this._registerConstructor('torch.nn.modules.loss.BCEWithLogitsLoss', function() {}); + this._registerConstructor('torch.nn.modules.loss.CrossEntropyLoss', function() {}); + this._registerConstructor('torch.nn.modules.loss.L1Loss', function() {}); + this._registerConstructor('torch.nn.modules.loss.MSELoss', function() {}); + this._registerConstructor('torch.nn.modules.loss.NLLLoss', function() {}); + this._registerConstructor('torch.nn.modules.loss.SmoothL1Loss', function() {}); + this._registerConstructor('torch.nn.modules.normalization.CrossMapLRN2d', function() {}); + this._registerConstructor('torch.nn.modules.normalization.GroupNorm', function() {}); + this._registerConstructor('torch.nn.modules.normalization.LayerNorm', function() {}); + this._registerConstructor('torch.nn.modules.normalization.LocalResponseNorm', function() {}); + this._registerConstructor('torch.nn.modules.padding.ReflectionPad1d', function() {}); + this._registerConstructor('torch.nn.modules.padding.ReflectionPad2d', function() {}); + this._registerConstructor('torch.nn.modules.padding.ReplicationPad1d', function() {}); + this._registerConstructor('torch.nn.modules.padding.ReplicationPad2d', function() {}); + this._registerConstructor('torch.nn.modules.padding.ReplicationPad3d', function() {}); + this._registerConstructor('torch.nn.modules.padding.ZeroPad2d', function() {}); + this._registerConstructor('torch.nn.modules.padding.ConstantPad1d', function() {}); + this._registerConstructor('torch.nn.modules.padding.ConstantPad2d', function() {}); + this._registerConstructor('torch.nn.modules.padding.ConstantPad3d', function() {}); + this._registerConstructor('torch.nn.modules.pixelshuffle.PixelShuffle', function() {}); + this._registerConstructor('torch.nn.modules.pooling.AdaptiveAvgPool1d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.AdaptiveAvgPool2d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.AdaptiveAvgPool3d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.AdaptiveMaxPool1d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.AdaptiveMaxPool2d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.AdaptiveMaxPool3d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.AvgPool1d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.AvgPool2d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.AvgPool3d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.FractionalMaxPool2d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.MaxPool1d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.MaxPool2d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.MaxPool3d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.MaxUnpool1d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.MaxUnpool2d', function() {}); + this._registerConstructor('torch.nn.modules.pooling.MaxUnpool3d', function() {}); + this._registerConstructor('torch.nn.modules.rnn.GRU', function() {}); + this._registerConstructor('torch.nn.modules.rnn.GRUCell', function() {}); + this._registerConstructor('torch.nn.modules.rnn.LSTM', function() {}); + this._registerConstructor('torch.nn.modules.rnn.LSTMCell', function() {}); + this._registerConstructor('torch.nn.modules.rnn.RNN', function() {}); + this._registerConstructor('torch.nn.modules.sparse.Embedding', function() {}); + this._registerConstructor('torch.nn.modules.sparse.EmbeddingBag', function() {}); + this._registerConstructor('torch.nn.modules.transformer.TransformerEncoder', function() {}); + this._registerConstructor('torch.nn.modules.transformer.TransformerEncoderLayer', function() {}); + this._registerConstructor('torch.nn.modules.upsampling.Upsample', function() {}); + this._registerConstructor('torch.nn.modules.upsampling.UpsamplingBilinear2d', function() {}); + this._registerConstructor('torch.nn.modules.upsampling.UpsamplingNearest2d', function() {}); + this._registerConstructor('torch.nn.parallel.data_parallel.DataParallel', function() {}); + this._registerConstructor('torch.nn.parallel.distributed.DistributedDataParallel', function() {}); + this._registerConstructor('torch.nn.parameter.Parameter', function(data, requires_grad) { + this.data = data; + this.requires_grad = requires_grad; + }); + this._registerConstructor('torch.nn.quantized.modules.functional_modules.FloatFunctional', function() {}); + this._registerConstructor('torch.nn.utils.spectral_norm.SpectralNorm', function() {}); + this._registerConstructor('torch.nn.utils.spectral_norm.SpectralNormStateDictHook', function() {}); + this._registerConstructor('torch.nn.utils.spectral_norm.SpectralNormLoadStateDictPreHook', function() {}); + this._registerConstructor('torch.nn.utils.weight_norm.WeightNorm', function() {}); + this._registerConstructor('torch.optim.adam.Adam', function() {}); + this._registerConstructor('torch.optim.adagrad.Adagrad', function() {}); + this._registerConstructor('torch.optim.lr_scheduler.MultiStepLR', function() {}); + this._registerConstructor('torch.optim.lr_scheduler.StepLR', function() {}); + this._registerConstructor('torch.optim.rmsprop.RMSprop', function() {}); + this._registerConstructor('torch.optim.sgd.SGD', function() {}); + this._registerConstructor('torch.quantization.stubs.DeQuantStub', function() {}); + this._registerConstructor('torch.quantization.stubs.QuantStub', function() {}); + this._registerConstructor('torchvision.datasets.folder.ImageFolder', function() {}); + this._registerConstructor('torchvision.models.alexnet.AlexNet', function() {}); + this._registerConstructor('torchvision.models.densenet.DenseNet', function() {}); + this._registerConstructor('torchvision.models.densenet._DenseBlock', function() {}); + this._registerConstructor('torchvision.models.densenet._DenseLayer', function() {}); + this._registerConstructor('torchvision.models.densenet._Transition', function() {}); + this._registerConstructor('torchvision.models.detection._utils.BalancedPositiveNegativeSampler', function() {}); + this._registerConstructor('torchvision.models.detection._utils.BoxCoder', function() {}); + this._registerConstructor('torchvision.models.detection._utils.Matcher', function() {}); + this._registerConstructor('torchvision.models.detection.backbone_utils.BackboneWithFPN', function() {}); + this._registerConstructor('torchvision.models.detection.faster_rcnn.FasterRCNN', function() {}); + this._registerConstructor('torchvision.models.detection.faster_rcnn.FastRCNNPredictor', function() {}); + this._registerConstructor('torchvision.models.detection.faster_rcnn.TwoMLPHead', function() {}); + this._registerConstructor('torchvision.models.detection.keypoint_rcnn.KeypointRCNN', function() {}); + this._registerConstructor('torchvision.models.detection.keypoint_rcnn.KeypointRCNNHeads', function() {}); + this._registerConstructor('torchvision.models.detection.keypoint_rcnn.KeypointRCNNPredictor', function() {}); + this._registerConstructor('torchvision.models.detection.mask_rcnn.MaskRCNN', function() {}); + this._registerConstructor('torchvision.models.detection.mask_rcnn.MaskRCNNHeads', function() {}); + this._registerConstructor('torchvision.models.detection.mask_rcnn.MaskRCNNPredictor', function() {}); + this._registerConstructor('torchvision.models.detection.roi_heads.RoIHeads', function() {}); + this._registerConstructor('torchvision.models.detection.rpn.AnchorGenerator', function() {}); + this._registerConstructor('torchvision.models.detection.rpn.RegionProposalNetwork', function() {}); + this._registerConstructor('torchvision.models.detection.rpn.RPNHead', function() {}); + this._registerConstructor('torchvision.models.detection.transform.GeneralizedRCNNTransform', function() {}); + this._registerConstructor('torchvision.models.googlenet.BasicConv2d', function() {}); + this._registerConstructor('torchvision.models.googlenet.GoogLeNet', function() {}); + this._registerConstructor('torchvision.models.googlenet.Inception', function() {}); + this._registerConstructor('torchvision.models.inception.BasicConv2d', function() {}); + this._registerConstructor('torchvision.models.inception.Inception3', function() {}); + this._registerConstructor('torchvision.models.inception.InceptionAux', function() {}); + this._registerConstructor('torchvision.models.inception.InceptionA', function() {}); + this._registerConstructor('torchvision.models.inception.InceptionB', function() {}); + this._registerConstructor('torchvision.models.inception.InceptionC', function() {}); + this._registerConstructor('torchvision.models.inception.InceptionD', function() {}); + this._registerConstructor('torchvision.models.inception.InceptionE', function() {}); + this._registerConstructor('torchvision.models.mobilenet.ConvBNReLU', function() {}); + this._registerConstructor('torchvision.models.mobilenet.MobileNetV2', function() {}); + this._registerConstructor('torchvision.models.mobilenet.InvertedResidual', function() {}); + this._registerConstructor('torchvision.models.resnet.Bottleneck', function() {}); + this._registerConstructor('torchvision.models.resnet.BasicBlock', function() {}); + this._registerConstructor('torchvision.models.quantization.resnet.QuantizableBottleneck', function() {}); + this._registerConstructor('torchvision.models.quantization.resnet.QuantizableResNet', function() {}); + this._registerConstructor('torchvision.models.segmentation.deeplabv3.ASPP', function() {}); + this._registerConstructor('torchvision.models.segmentation.deeplabv3.ASPPConv', function() {}); + this._registerConstructor('torchvision.models.segmentation.deeplabv3.ASPPPooling', function() {}); + this._registerConstructor('torchvision.models.segmentation.deeplabv3.DeepLabHead', function() {}); + this._registerConstructor('torchvision.models.segmentation.deeplabv3.DeepLabV3', function() {}); + this._registerConstructor('torchvision.models.segmentation.fcn.FCN', function() {}); + this._registerConstructor('torchvision.models.segmentation.fcn.FCNHead', function() {}); + this._registerConstructor('torchvision.models.shufflenetv2.ShuffleNetV2', function() {}); + this._registerConstructor('torchvision.models.shufflenetv2.InvertedResidual', function() {}); + this._registerConstructor('torchvision.models.squeezenet.Fire', function() {}); + this._registerConstructor('torchvision.models.squeezenet.SqueezeNet', function() {}); + this._registerConstructor('torchvision.models.resnet.ResNet', function() {}); + this._registerConstructor('torchvision.models.vgg.VGG', function() {}); + this._registerConstructor('torchvision.models.video.resnet.BasicBlock', function() {}); + this._registerConstructor('torchvision.models.video.resnet.BasicStem', function() {}); + this._registerConstructor('torchvision.models.video.resnet.Conv3DNoTemporal', function() {}); + this._registerConstructor('torchvision.models.video.resnet.Conv3DSimple', function() {}); + this._registerConstructor('torchvision.models.video.resnet.VideoResNet', function() {}); + this._registerConstructor('torchvision.models._utils.IntermediateLayerGetter', function() {}); + this._registerConstructor('torchvision.ops.feature_pyramid_network.FeaturePyramidNetwork', function() {}); + this._registerConstructor('torchvision.ops.feature_pyramid_network.LastLevelMaxPool', function() {}); + this._registerConstructor('torchvision.ops.misc.ConvTranspose2d', function() {}); + this._registerConstructor('torchvision.ops.misc.FrozenBatchNorm2d', function() {}); + this._registerConstructor('torchvision.ops.poolers.LevelMapper', function() {}); + this._registerConstructor('torchvision.ops.poolers.MultiScaleRoIAlign', function() {}); + this._registerConstructor('torchvision.transforms.transforms.Compose', function() {}); + this._registerConstructor('torchvision.transforms.transforms.Normalize', function() {}); + this._registerConstructor('torchvision.transforms.transforms.Resize', function() {}); + this._registerConstructor('torchvision.transforms.transforms.ToTensor', function() {}); + this._registerConstructor('torch.ByteStorage', function (size) { + this.size = size; this.dataTypeSize = 1; this.dataType = 'uint8'; + }); + this._registerConstructor('torch.CharStorage', function (size) { + this.size = size; this.dataTypeSize = 1; this.dataType = 'int8'; + }); + this._registerConstructor('torch.ShortStorage', function (size) { + this.size = size; this.dataTypeSize = 2; this.dataType = 'int16'; + }); + this._registerConstructor('torch.IntStorage', function (size) { + this.size = size; this.dataTypeSize = 4; this.dataType = 'int32'; + }); + this._registerConstructor('torch.LongStorage', function (size) { + this.size = size; this.dataTypeSize = 8; this.dataType = 'int64'; + }); + this._registerConstructor('torch.HalfStorage', function (size) { + this.size = size; this.dataTypeSize = 2; this.dataType = 'float16'; + }); + this._registerConstructor('torch.FloatStorage', function (size) { + this.size = size; this.dataTypeSize = 4; this.dataType = 'float32'; + }); + this._registerConstructor('torch.DoubleStorage', function (size) { + this.size = size; this.dataTypeSize = 8; this.dataType = 'float64'; + }); + this._registerConstructor('torch.QInt8Storage', function (size) { + this.size = size; this.dataTypeSize = 1; this.dataType = 'qint8'; + }); + this._registerConstructor('torch.FloatTensor', function () { + this.__setstate__ = function(state) { + this.storage = state[0]; + this.storage_offset = state[1]; + this.size = state[2]; + this.stride = state[3]; + }; + }); + this._registerConstructor('torch.DoubleTensor', function () { + this.__setstate__ = function(state) { + this.storage = state[0]; + this.storage_offset = state[1]; + this.size = state[2]; + this.stride = state[3]; + }; + }); + this._registerConstructor('torch.cuda.FloatTensor', function () { + this.__setstate__ = function(state) { + this.storage = state[0]; + this.storage_offset = state[1]; + this.size = state[2]; + this.stride = state[3]; + }; + }); + this._registerConstructor('torch.cuda.DoubleTensor', function () { + this.__setstate__ = function(state) { + this.storage = state[0]; + this.storage_offset = state[1]; + this.size = state[2]; + this.stride = state[3]; + }; + }); + this._registerConstructor('numpy.dtype', function(obj, align, copy) { + switch (obj) { + case 'i1': this.name = 'int8'; this.itemsize = 1; break; + case 'i2': this.name = 'int16'; this.itemsize = 2; break; + case 'i4': this.name = 'int32'; this.itemsize = 4; break; + case 'i8': this.name = 'int64'; this.itemsize = 8; break; + case 'b1': this.name = 'uint8'; this.itemsize = 1; break; + case 'u1': this.name = 'uint8'; this.itemsize = 1; break; + case 'u2': this.name = 'uint16'; this.itemsize = 2; break; + case 'u4': this.name = 'uint32'; this.itemsize = 4; break; + case 'u8': this.name = 'uint64'; this.itemsize = 8; break; + case 'f4': this.name = 'float32'; this.itemsize = 4; break; + case 'f8': this.name = 'float64'; this.itemsize = 8; break; + default: + if (obj.startsWith('V')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'void' + (this.itemsize * 8).toString(); + } + else if (obj.startsWith('O')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'object'; + } + else if (obj.startsWith('S')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'string'; + } + else if (obj.startsWith('U')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'string'; + } + else if (obj.startsWith('M')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'datetime'; + } + else { + throw new pytorch.Error("Unknown dtype '" + obj.toString() + "'."); + } + break; + } + this.align = align; + this.copy = copy; + this.__setstate__ = function(state) { + switch (state.length) { + case 8: + this.version = state[0]; + this.byteorder = state[1]; + this.subarray = state[2]; + this.names = state[3]; + this.fields = state[4]; + this.elsize = state[5]; + this.alignment = state[6]; + this.int_dtypeflags = state[7]; + break; + default: + throw new pytorch.Error("Unknown numpy.dtype setstate length '" + state.length.toString() + "'."); + } + }; + }); + this._registerConstructor('numpy.core.multiarray._reconstruct', function(subtype, shape, dtype) { + this.subtype = subtype; + this.shape = shape; + this.dtype = dtype; + this.__setstate__ = function(state) { + this.version = state[0]; + this.shape = state[1]; + this.typecode = state[2]; + this.is_f_order = state[3]; + this.rawdata = state[4]; + }; + this.__read__ = function(unpickler) { + const array = {}; + const subtype = this.subtype.split('.'); + array.__name__ = subtype.pop(); + array.__module__ = subtype.join('.'); + array.dtype = this.typecode; + array.shape = this.shape; + let size = array.dtype.itemsize; + for (let i = 0; i < array.shape.length; i++) { + size = size * array.shape[i]; + } + if (typeof this.rawdata == 'string') { + array.data = unpickler.unescape(this.rawdata, size); + if (array.data.length != size) { + throw new pytorch.Error('Invalid string array data size.'); + } + } + else { + array.data = this.rawdata; + if (array.data.length != size) { + // throw new pytorch.Error('Invalid array data size.'); + } + } + return array; + }; + }); + this._registerFunction('__builtin__.bytearray', function(source, encoding /*, errors */) { + if (encoding === 'latin-1') { + const array = new Uint8Array(source.length); + for (let i = 0; i < source.length; i++) { + array[i] = source.charCodeAt(i); + } + return array; + } + throw new pytorch.Error("Unsupported bytearray encoding '" + JSON.stringify(encoding) + "'."); + }); + this._registerFunction('__builtin__.getattr', function(obj, name, defaultValue) { + if (Object.prototype.hasOwnProperty.call(obj, name)) { + return obj[name]; + } + return defaultValue; + }); + this._registerFunction('__builtin__.set', function(iterable) { + return iterable ? iterable : []; + }); + this._registerFunction('__builtin__.slice', function(start, stop , step) { + return [ start, stop, step ]; + }); + this._registerFunction('collections.Counter', function(/* iterable */) { + return {}; + }); + this._registerFunction('collections.OrderedDict', function(args) { + const obj = new Map(); + obj.__setitem__ = function(key, value) { + obj.set(key, value); + }; + if (args) { + for (const arg of args) { + obj.__setitem__(arg[0], arg[1]); + } + } + return obj; + }); + this._registerFunction('numpy.core.multiarray.scalar', function(dtype, rawData) { + let data = rawData; + if (rawData.constructor !== Uint8Array) { + data = new Uint8Array(rawData.length); + for (let i = 0; i < rawData.length; i++) { + data[i] = rawData.charCodeAt(i); + } + } + const dataView = new DataView(data.buffer, data.byteOffset, data.byteLength); + switch (dtype.name) { + case 'float32': + return dataView.getFloat32(0, true); + case 'float64': + return dataView.getFloat64(0, true); + case 'uint8': + return dataView.getUint8(0, true); + case 'int8': + return dataView.getInt8(0, true); + case 'int16': + return dataView.getInt16(0, true); + case 'int32': + return dataView.getInt32(0, true); + case 'int64': + return new long.Long(dataView.getInt32(0, true), dataView.getInt32(4, true), false); + } + throw new pytorch.Error("Unknown scalar type '" + dtype.name + "'."); + }); + this._registerFunction('_codecs.encode', function(obj /*, econding */) { + return obj; + }); + this._registerFunction('collections.defaultdict', function(/* default_factory */) { + return {}; + }); + this._registerFunction('annotate', function(type, value) { + return value; + }); + this._registerFunction('int', function(/* tensor */) { + return NaN; // TODO + }); + this._registerFunction('float', function(/* tensor */) { + return NaN; // TODO + }); + this._registerFunction('getattr', function(obj, name, defaultValue) { + if (Object.prototype.hasOwnProperty.call(obj, name)) { + return obj[name]; + } + return defaultValue; + }); + this._registerFunction('unchecked_cast', function(type, value) { + return value; + }); + this._registerFunction('ops.prim.data', function(tensor) { + return tensor; + }); + this._registerFunction('ops.prim.unchecked_unwrap_optional', function(value) { + return value; + }); + this._registerFunction('ops.prim.NumToTensor', function(value) { + return { __module__: 'torch', __name__: 'Tensor', value: value }; // TODO + }); + this._registerFunction('ops.prim.min', function(value) { + return Math.min.apply(null, value); + }); + this._registerFunction('ops.prim.shape', function(value) { + return value.size; + }); + this._registerFunction('ops.quantized.conv_prepack', function(/* weight, bias, stride, padding, dilation, groups */) { + return { __module__: 'torch', __name__: 'Tensor', __origin__: 'ops.quantized.conv_prepack' }; // TODO + }); + this._registerFunction('ops.quantized.conv2d_prepack', function(/* weight, bias, stride, padding, dilation, groups */) { + return { __module__: 'torch', __name__: 'Tensor', __origin__: 'ops.quantized.conv2d_prepack' }; // TODO + }); + this._registerFunction('ops.quantized.linear_prepack', function(/* weight, bias */) { + return { __module__: 'torch', __name__: 'Tensor', __origin__: 'ops.quantized.linear_prepack' }; // TODO + }); + + this._registerFunction('ops.prim.RaiseException', function(message) { + throw new pytorch.Error(message); + }); + this._registerFunction('range', function(start, stop, step) { + if (start !== undefined && Number.isInteger(start) && stop === undefined && step === undefined) { + return Array(start).keys(); + } + throw new pytorch.Error('Unsupported function range(' + JSON.stringify(start) + ', ' + JSON.stringify(stop) + ', ' + JSON.stringify(step) + ')'); + }); + this._registerFunction('torch._utils._rebuild_tensor', function (storage, storage_offset, size, stride) { + return { + __module__: storage.__module__, + __name__: storage.__name__.replace('Storage', 'Tensor'), + storage: storage, + storage_offset: storage_offset, + size: size, + stride: stride + }; + }); + this._registerFunction('torch._utils._rebuild_tensor_v2', function (storage, storage_offset, size, stride, requires_grad, backward_hooks) { + return { + __module__: storage.__module__, + __name__: storage.__name__.replace('Storage', 'Tensor'), + storage: storage, + storage_offset: storage_offset, + size: size, + stride: stride, + requires_grad: requires_grad, + backward_hooks: backward_hooks + }; + }); + this._registerFunction('torch._utils._rebuild_parameter', function(data, requires_grad, backward_hooks) { + const obj = self.invoke('torch.nn.parameter.Parameter', [ data, requires_grad ]); + obj.backward_hooks = backward_hooks; + return obj; + }); + this._registerFunction('torch._utils._rebuild_qtensor', function(storage, storage_offset, size, stride, quantizer_params, requires_grad, backward_hooks) { + return { + __module__: storage.__module__, + __name__: storage.__name__.replace('Storage', 'Tensor'), + storage: storage, + storage_offset: storage_offset, + size: size, + stride: stride, + quantizer_params: quantizer_params, + requires_grad:requires_grad, + backward_hooks: backward_hooks + }; + }); + this._registerFunction('torch._set_item', function(dict, key, value) { + dict[key] = value; + }); + this._registerFunction('torch.__contains__', function(dict, key) { + return dict[key] !== undefined; + }); + this._registerFunction('torch.__derive_index', function(index, start, step) { + return start + index * step; + }); + this._registerFunction('torch.__is__', function(left, right) { + if (left === null && right === null) { + return true; + } + if ((left !== null && right === null) || (left === null && right !== null)) { + return false; + } + throw new pytorch.Error("Unknown 'torch.__is__' expression type."); + }); + this._registerFunction('torch.__isnot__', function(left, right) { + if (left === null && right === null) { + return false; + } + if ((left !== null && right === null) || (left === null && right !== null)) { + return true; + } + throw new pytorch.Error("Unknown 'torch.__isnot__' expression type."); + }); + this._registerFunction('torch.__not__', function(value) { + if (typeof value === 'boolean') { + return !value; + } + throw new pytorch.Error("Unknown 'torch.__not__' expression type."); + }); + this._registerFunction('torch.__range_length', function(lo, hi, step) { + if (step === 0) { + throw new pytorch.Error('range() arg 3 must not be zero'); + } + if (step > 0 && lo < hi) { + return 1 + (hi - 1 - lo) / step; + } + else if (step < 0 && lo > hi) { + return 1 + (lo - 1 - hi) / (0 - step); + } + return 0; + }); + this._registerFunction('torch._unwrap_optional', function(value) { + return value; // TODO + }); + this._registerFunction('torch.add', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return left * right; + } + throw new pytorch.Error('Unknown torch.add expression type.'); + }); + this._registerFunction('torch.append', function(tensors, tensor) { + tensors.push(tensor); + return tensor; + }); + this._registerFunction('torch.dict', function(args) { + if (args) { + throw new pytorch.Error("'torch.dict' arguments not supported."); + } + return {}; + }); + this._registerFunction('torch.dim', function(tensor) { + if (tensor && tensor.size) { + return tensor.size.length; + } + return 0; // TODO + }); + this._registerFunction('torch.eq', function(left, right) { + if (typeof left === 'string' && typeof right === 'string') { + return left === right; + } + if (typeof left === 'number' && typeof right === 'number') { + return left === right; + } + throw new pytorch.Error("Unknown 'torch.eq' expression type."); + }); + this._registerFunction('torch.floordiv', function(/* left, right */) { + return undefined; + }); + this._registerFunction('torch.gt', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return left > right; + } + throw new pytorch.Error("Unknown 'torch.gt' expression type."); + }); + this._registerFunction('torch.jit._pickle.build_boollist', function(data) { + return data; + }); + this._registerFunction('torch.jit._pickle.build_doublelist', function(data) { + return data; + }); + this._registerFunction('torch.jit._pickle.build_intlist', function(data) { + return data; + }); + this._registerFunction('torch.jit._pickle.build_tensorlist', function(data) { + return data; + }); + this._registerFunction('torch.jit._pickle.build_tensor_from_id', function(data) { + return data; + }); + this._registerFunction('torch.jit._pickle.restore_type_tag', function(value /*, type_str */) { + return value; + }); + this._registerFunction('torch.keys', function(dict) { + return Object.keys(dict); + }); + this._registerFunction('torch.len', function(value) { + if (value) { + return value.length; + } + return NaN; + }); + this._registerFunction('torch.le', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + if (isNaN(left) || isNaN(right)) { + return false; + } + return left <= right; + } + throw new pytorch.Error("Unknown 'torch.le' expression type."); + }); + this._registerFunction('torch.list', function(args) { + return args; + }); + this._registerFunction('torch.list_with_default', function(size /*, defaults */) { + return size; + }); + this._registerFunction('torch.lt', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return left < right; + } + throw new pytorch.Error("Unknown 'torch.lt' expression type."); + }); + this._registerFunction('torch.mul', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return left * right; + } + if (isNaN(left) || isNaN(right)) { + return NaN; + } + throw new pytorch.Error("Unknown 'torch.mul' expression type."); + }); + this._registerFunction('torch.ne', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + if (isNaN(left) || isNaN(right)) { + return false; + } + return left !== right; + } + if (Array.isArray(left) && Array.isArray(right) && left.length === right.length) { + return false; + } + throw new pytorch.Error("Unknown 'torch.ne' expression type."); + }); + this._registerFunction('torch.neg', function(value) { + if (typeof value === 'number') { + return -value; + } + throw new pytorch.Error("Unknown 'torch.neg' expression type."); + }); + this._registerFunction('torch.q_scale', function(/* tensor */) { + return -1; // TODO + }); + this._registerFunction('torch.t', function(tensor) { + return tensor; + }); + this._registerFunction('torch.size', function(tensor, dim) { + if (tensor && Array.isArray(tensor.size)) { + if (dim === undefined) { + return tensor.size; + } + if (Number.isInteger(dim)) { + if (dim >= 0 && dim < tensor.size.length) { + return tensor.size[dim]; + } + if (dim < 0 && -dim < tensor.size.length) { + return tensor.size[tensor.size.length + dim]; + } + } + throw new pytorch.Error('Dimension out of range (expected to be in range of ' + JSON.stringify(tensor.size) + ', but got ' + JSON.stringify(dim) + ').'); + } + return NaN; + }); + this._registerFunction('torch.slice', function(l, start, end, step) { + if (step !== 1) { + throw new pytorch.Error('Slicing only supports step=1'); + } + start = Math.max(0, start); + end = Math.min(l.length, end); + return l.slice(start, end); + }); + this._registerFunction('torch.sub', function(left, right) { + if (typeof left === 'number' && typeof right === 'number') { + return left * right; + } + throw new pytorch.Error("Unknown 'torch.sub' expression type."); + }); + this._registerFunction('torch.values', function(dict) { + return Object.keys(dict).map((key) => dict[key]); + }); + this._registerFunction('torch.warn', function() { + }); + this._registerFunction('uninitialized', function(type) { + if (type && type.__module__ === 'typing' && type.__name__ === 'Tuple') { + return []; + } + if (type && type.__module__ === 'typing' && type.__name__ === 'List') { + return []; + } + if (type && type.__module__ === 'typing' && type.__name__ === 'Dict') { + return {}; + } + if (type && type.__module__ === 'torch' && type.__name__ === 'Tensor') { + return { __module__: type.__module__, __name__: type.__name__ }; + } + throw new pytorch.Error("Unsupported uninitialized argument '" + JSON.stringify(type) + "'."); + }); + } + + get context() { + return this._context; + } + + parse(file) { + const data = this._sources[file]; + if (data) { + const code = this._utf8Decoder.decode(data); + const reader = new this._python.Parser(code, file); + const program = reader.parse(); + if (!program) { + throw new pytorch.Error("Module '" + file + "' parse error."); + } + return program; + } + return null; + } + + package(name, file, raw) { + if (this._python && !this._packages.has(name)) { + file = file || 'code/' + name.split('.').join('/') + '.py'; + const program = this.parse(file); + if (program) { + let globals = this._context.getx(name); + if (globals === undefined) { + globals = {}; + this._context.setx(name, globals); + } + globals.__class__ = this._context.scope.builtins.module; + globals.__name__ = name; + globals.__file__ = file; + this._packages.set(name, globals); + const context = this._context.push(globals); + this._block(program.body, context); + if (raw) { + return program; + } + } + } + return this._packages.get(name); + } + + type(name) { + const type = this._context.getx(name); + if (type !== undefined) { + return type; + } + const parts = name.split('.'); + const className = parts.pop(); + const moduleName = parts.join('.'); + const module = this.package(moduleName); + if (module) { + return module[className]; + } + return null; + } + + invoke(name, args) { + const target = this.type(name); + if (target) { + if (target.__class__ === this._context.scope.builtins.type) { + const obj = {}; + obj.__proto__ = target; + if (obj.__init__ && typeof obj.__init__ === 'function') { + obj.__init__.apply(obj, args); + } + return obj; + } + else if (target.__class__ === this._context.scope.builtins.function) { + if (target.__call__) { + return target.__call__(args); + // throw new pytorch.Error('Unexpected function __call__.'); + } + else { + return target.apply(null, args); + } + } + } + this._raiseUnkownName(name); + const typeParts = name.split('.'); + const typeName = typeParts.pop(); + const typeModule = typeParts.join('.'); + return { + __module__: typeModule, + __name__: typeName + }; + } + + call(target, name, args, context) { + const callTarget = this._target(target, context); + const callArguments = args.map((argument) => this.expression(argument, context)); + if (!callTarget || (name !== null && !callTarget[name])) { + const targetName = pytorch.Utility.target(target) + '.' + name; + if (this.type(targetName)) { + return this.invoke(targetName, callArguments); + } + throw new pytorch.Error("Unsupported function '" + targetName + "'."); + } + const func = name ? callTarget[name] : callTarget; + if (func.__class__ === this._context.scope.builtins.type) { + const obj = {}; + obj.__proto__ = func; + if (obj.__init__ && typeof obj.__init__ === 'function') { + obj.__init__.apply(obj, args); + } + return obj; + } + if (func.__class__ === this._context.scope.builtins.function) { + if (func.__call__) { + return func.__call__(callArguments); + } + } + if (func.__class__ === this._context.scope.builtins.method) { + if (func.__call__) { + return func.__call__([ callTarget ].concat(callArguments)); + } + } + if (typeof func === 'function') { + return func.apply(callTarget, callArguments); + } + throw new pytorch.Error("Unsupported call expression."); + } + + apply(method, args, context) { + const locals = Array.prototype.slice.call(args); + context = context.push(); + for (const parameter of method.parameters) { + context.set(parameter.name, locals.shift()); + } + return this._block(method.body.statements, context); + } + + _block(statements, context) { + statements = Array.prototype.slice.call(statements); + while (statements.length > 0) { + const statement = statements.shift(); + switch (statement.type) { + case 'pass': { + break; + } + case 'return': { + return this.expression(statement.expression, context); + } + case 'def': { + const module = context.get('__name__'); + const self = this; + const parent = context.get('__class__'); + let type = null; + if (parent === this._context.scope.builtins.type) { + type = this._context.scope.builtins.method; + } + else if (parent === this._context.scope.builtins.module) { + type = this._context.scope.builtins.function; + } + else { + throw new pytorch.Error('Invalid function scope.'); + } + const func = { + __class__: type, + __globals__: context, + __module__: module, + __name__: statement.name, + __code__: statement, + __call__: function(args) { + return self.apply(this.__code__, args, this.__globals__); + } + }; + context.set(statement.name, func); + break; + } + case 'class': { + const scope = { + __class__:this._context.scope.builtins.type, + __module__: context.get('__name__'), + __name__: statement.name, + }; + context.set(statement.name, scope); + context = context.push(scope); + this._block(statement.body.statements, context); + context = context.pop(); + break; + } + case 'var': { + context.set(statement.name, undefined); + break; + } + case '=': { + this.expression(statement, context); + break; + } + case 'if': { + const condition = this.expression(statement.condition, context); + if (condition === true || condition) { + statements = statement.then.statements.concat(statements); + break; + } + else if (condition === false) { + statements = statement.else.statements.concat(statements); + break; + } + throw new pytorch.Error("Unknown condition."); + } + case 'for': { + if (statement.target.length == 1 && + statement.variable.length === 1 && statement.variable[0].type === 'id') { + const range = this.expression(statement.target[0], context); + const variable = statement.variable[0]; + let loop = []; + for (const value of range) { + loop.push({ type: '=', target: variable, expression: { type: 'number', value: value }}); + loop = loop.concat(statement.body.statements); + } + statements = loop.concat(statements); + break; + } + throw new pytorch.Error("Unsupported 'for' statement."); + } + case 'call': { + this.expression(statement, context); + break; + } + case 'import': { + for (const module of statement.modules) { + const moduleName = pytorch.Utility.target(module.name); + const globals = this.package(moduleName); + if (module.as) { + context.set(module.as, globals); + } + } + break; + } + default: { + throw new pytorch.Error("Unknown statement '" + statement.type + "'."); + } + } + } + } + + expression(expression, context) { + const self = context.getx('self'); + switch (expression.type) { + case '=': { + const target = expression.target; + if (target.type === 'id') { + context.set(target.value, this.expression(expression.expression, context)); + return; + } + else if (target.type === '[]') { + if (target.target.type === 'id' && + target.arguments.type === 'list' && + target.arguments.value.length === 1) { + const index = this.expression(target.arguments.value[0], context); + if (target.target.value === '__annotations__') { + context.set(target.target.value, context.get(target.target.value) || {}); + } + context.get(target.target.value)[index] = this.expression(expression.expression, context); + return; + } + } + else if (target.type === '.' && + target.member.type === 'id') { + this.expression(target.target, context)[target.member.value] = this.expression(expression.expression, context); + return; + } + else if (target.type === 'tuple') { + const value = this.expression(expression.expression, context); + if (target.value.length == value.length && target.value.every((item) => item.type === 'id')) { + for (let i = 0; i < value.length; i++) { + context.set(target.value[i].value, value[i]); + } + return; + } + } + break; + } + case 'list': { + return expression.value.map((item) => this.expression(item, context)); + } + case 'string': { + return expression.value.substring(1, expression.value.length - 1); + } + case 'number': { + return Number(expression.value); + } + case '[]': { + if (expression.target.type === 'id' && + expression.arguments.type === 'list' && + expression.arguments.value.length === 1) { + if (context.get(expression.target.value)) { + const index = this.expression(expression.arguments.value[0], context); + return context.get(expression.target.value)[index]; + } + } + const target = this.expression(expression.target, context); + if (target && expression.arguments.type === 'list' && + (target.__class__ === this.context.scope.typing._VariadicGenericAlias || + target.__class__ === this.context.scope.typing._GenericAlias || + target.__class__ === this.context.scope.typing._SpecialForm)) { + const type = Object.assign({}, target); + type.__args__ = expression.arguments.value.map((arg) => this.expression(arg, context)); + return type; + } + if (expression.arguments.type === 'list' && expression.arguments.value.length === 1) { + const index = this.expression(expression.arguments.value[0], context); + return target[index]; + } + break; + } + case '.': { + if (expression.member.type == 'id') { + const target = this._target(expression.target, context); + return target[expression.member.value]; + } + throw new pytorch.Error("Unsupported field expression."); + } + case 'call': { + if (expression.target.type === 'id' && expression.target.value === 'annotate' && expression.arguments.length === 2) { + return this.expression(expression.arguments[1], context); + } + if (expression.target.type === 'id' && expression.target.value === 'unchecked_cast' && expression.arguments.length === 2) { + return this.expression(expression.arguments[1], context); + } + if (expression.target.type === '.') { + return this.call(expression.target.target, expression.target.member.value, expression.arguments, context); + } + return this.call(expression.target, null, expression.arguments, context); + } + case 'id': { + switch (expression.value) { + case 'self': return self; + case 'None': return null; + case 'True': return true; + case 'False': return false; + } + const type = + this._context.scope.builtins[expression.value] || + this._context.scope.typing[expression.value] || + this._context.scope.torch[expression.value]; + if (type && + (type.__class__ === this._context.scope.builtins.type || + type.__class__ === this._context.scope.typing._VariadicGenericAlias || + type.__class__ === this._context.scope.typing._GenericAlias || + type.__class__ === this._context.scope.typing._SpecialForm)) { + return type; + } + return context.get(expression.value); + } + case 'tuple': { + return expression.value.map((expression) => this.expression(expression, context)); + } + } + throw new pytorch.Error("Unknown expression '" + expression.type + "'."); + } + + _target(expression, context) { + let current = expression; + let packageName = ''; + for (;;) { + if (current.type === '.' && current.member && current.member.type === 'id') { + packageName = '.' + current.member.value + packageName; + current = current.target; + } + else if (current.type === 'id' && current.value !== 'self' && current.value !== 'CONSTANTS') { + packageName = current.value + packageName; + break; + } + else { + packageName = null; + break; + } + } + if (packageName) { + let target = context.getx(packageName); + if (!target) { + target = this.package(packageName); + if (!target) { + throw new pytorch.Error("Failed to resolve module '" + packageName + "'."); + } + } + return target; + } + return this.expression(expression, context); + } + + _registerFunction(name, callback) { + if (this._context.getx(name)) { + throw new pytorch.Error("Function '" + name + "' is already registered."); + } + const parts = name.split('.'); + callback.__class__ = this._context.scope.builtins.function; + callback.__name__ = parts.pop(); + callback.__module__ = parts.join('.'); + this._context.setx(name, callback); + } + + _registerConstructor(name, callback) { + if (this._context.getx(name)) { + throw new pytorch.Error("Constructor '" + name + "' is already registered."); + } + const parts = name.split('.'); + const typeName = parts.pop(); + const typeModule = parts.join('.'); + const type = { + __class__: this._context.scope.builtins.type, + __name__: typeName, + __module__: typeModule, + __init__: function() { + callback.apply(this, arguments); + } + }; + this._context.setx(name, type); + } + + _raiseUnkownName(name) { + if (name && !this._unknownNameMap.has(name)) { + this._unknownNameMap.add(name); + if (this._knownPackageMap.has(name.split('.').shift())) { + this._exceptionCallback(new pytorch.Error("Unknown function '" + name + "'."), false); + } + } + } +}; + +pytorch.Execution.Context = class { + + constructor(parent, scope) { + this._parent = parent || null; + this._scope = scope || {}; + } + + push(scope) { + return new pytorch.Execution.Context(this, scope); + } + + pop() { + return this._parent; + } + + get scope() { + return this._scope; + } + + set(name, value) { + this._scope[name] = value; + } + + get(name) { + if (name in this._scope) { + return this._scope[name]; + } + if (this._parent) { + return this._parent.get(name); + } + return undefined; + } + + setx(name, value) { + const parts = name.split('.'); + if (parts.length == 1) { + this.set(parts[0], value); + } + else { + let parent = this.get(parts[0]); + if (!parent) { + parent = {}; + this.set(parts[0], parent); + } + parts.shift(); + while (parts.length > 1) { + const part = parts.shift(); + parent[part] = parent[part] || {}; + parent = parent[part]; + } + parent[parts[0]] = value; + } + } + + getx(name) { + const parts = name.split('.'); + let value = this.get(parts[0]); + if (value) { + parts.shift(); + while (parts.length > 0 && value[parts[0]]) { + value = value[parts[0]]; + parts.shift(); + } + if (parts.length === 0) { + return value; + } + } + return undefined; + } +}; + +pytorch.Container = class { + + static open(context, metadata, pickle, python, exception) { + if (context.entries('zip').some((entry) => entry.name === 'model.json' || entry.name === 'data.pkl' || entry.name.endsWith('/model.json') || entry.name.endsWith('/data.pkl'))) { + return new pytorch.Container.Zip(context.entries('zip'), metadata, pickle, python, exception); + } + const buffer = context.buffer; + const signature = [ 0x8a, 0x0a, 0x6c, 0xfc, 0x9c, 0x46, 0xf9, 0x20, 0x6a, 0xa8, 0x50, 0x19 ]; + if (buffer && buffer.length > 14 && buffer[0] == 0x80 && buffer[1] < 0x05 && signature.every((v, i) => v == buffer[i + 2])) { + return new pytorch.Container.Pickle(buffer, pickle, exception); + } + if (context.entries('tar').some((entry) => entry.name == 'pickle')) { + return new pytorch.Container.Tar(context.entries('tar'), pickle, exception); + } + return null; + } +}; + +pytorch.Container.Tar = class { + + constructor(entries, pickle, exceptionCallback) { + this._entries = entries; + this._pickle = pickle; + this._exceptionCallack = exceptionCallback; + } + + get format() { + return 'PyTorch v0.1.1'; + } + + get data() { + this._unpickle(); + return this._data; + } + + get state() { + this._unpickle(); + return this._state; + } + + get littleEndian() { + this._unpickle(); + return this._littleEndian; + } + + _unpickle() { + if (!this._entries) { + return; + } + this._data = null; + this._state = null; + this._littleEndian = true; + + const execution = new pytorch.Execution(null, [], this._exceptionCallback); + + const entries = {}; + for (const entry of this._entries) { + switch (entry.name) { + case 'sys_info': entries.sys_info = entry.data; break; + case 'pickle': entries.pickle = entry.data; break; + case 'storages': entries.storages = entry.data; break; + case 'tensors': entries.tensors = entry.data; break; + } + } + + this._exceptionCallback = null; + this._entries = null; + + if (entries.sys_info) { + const unpickler = new this._pickle.Unpickler(entries.sys_info); + const sys_info = unpickler.load((name, args) => execution.invoke(name, args)); + if (sys_info.protocol_version != 1000) { + throw new pytorch.Error("Unsupported protocol version '" + sys_info.protocol_version + "'."); + } + if (sys_info.type_sizes && + ((sys_info.type_sizes.int && sys_info.type_sizes.int != 4) || + (sys_info.type_sizes.long && sys_info.type_sizes.long != 4) || + (sys_info.type_sizes.short && sys_info.type_sizes.short != 2))) { + throw new pytorch.Error('Unsupported type sizes.'); + } + this._littleEndian = sys_info.little_endian; + } + + const deserialized_objects = {}; + if (entries.storages) { + const unpickler = new this._pickle.Unpickler(entries.storages); + const num_storages = unpickler.load((name, args) => execution.invoke(name, args)); + for (let i = 0; i < num_storages; i++) { + const storage_args = unpickler.load(); + const storage_key = storage_args[0]; + const storage_type = storage_args[2]; + const size = long.Long.fromBytesLE(unpickler.read(8), false).toNumber(); + const storage = execution.invoke(storage_type, [ size ]); + storage.data = unpickler.read(storage.dataTypeSize * storage.size); + deserialized_objects[storage_key] = storage; + } + /* + let storage_views = unpickler.load(); + for target_cdata, root_cdata, offset, size in storage_views: + root = deserialized_objects[root_cdata] + deserialized_objects[target_cdata] = root[offset:offset + size] + */ + } + + if (entries.tensors) { + const unpickler = new this._pickle.Unpickler(entries.tensors); + const num_tensors = unpickler.load((name, args) => execution.invoke(name, args)); + for (let j = 0; j < num_tensors; j++) { + const tensor_args = unpickler.load(); + const tensor_key = tensor_args[0]; + const storage_id = tensor_args[1]; + const storage = deserialized_objects[storage_id]; + const ndim = long.Long.fromBytesLE(unpickler.read(4), false).toNumber(); + unpickler.read(4); + const shape = []; + for (let k = 0; k < ndim; k++) { + shape.push(long.Long.fromBytesLE(unpickler.read(8), false).toNumber()); + } + const stride = []; + for (let l = 0; l < ndim; l++) { + stride.push(long.Long.fromBytesLE(unpickler.read(8), false).toNumber()); + } + const storage_offset = long.Long.fromBytesLE(unpickler.read(8), false).toNumber(); + const tensor_type_name = storage.__name__.replace('Storage', 'Tensor'); + const tensor = execution.invoke(storage.__module__ + '.' + tensor_type_name, []); + tensor.__setstate__([ storage, storage_offset, shape, stride ]); + deserialized_objects[tensor_key] = tensor; + } + } + + if (entries.pickle) { + const unpickler = new this._pickle.Unpickler(entries.pickle); + const persistent_load = (saved_id) => { + return deserialized_objects[saved_id]; + }; + let obj = unpickler.load((name, args) => execution.invoke(name, args), persistent_load); + if (obj) { + if (!(obj instanceof Map)) { + const map = new Map(); + for (const key of Object.keys(obj)) { + map.set(key, obj[key]); + } + obj = map; + } + this._state = []; + const state_map = {}; + if (obj instanceof Map) { + for (const item of obj) { + const key = item[0]; + const value = item[1]; + if (!key || !value) { + this._state = null; + break; + } + const state = {}; + state.id = key; + state.value = null; + if (value && value.__module__ === 'torch.nn.parameter' && value.__name__ === 'Parameter') { + state.value = value[0]; + } + else if (pytorch.Utility.isTensor(value)) { + state.value = value; + } + if (!state.value) { + this._state = null; + break; + } + const split = state.id.split('.'); + if (split.length < 2) { + this._state = null; + break; + } + state.name = split.pop(); + const state_group_name = split.join('.'); + let state_group = state_map[state_group_name]; + if (!state_group) { + state_group = {}; + state_group.name = state_group_name; + state_group.states = []; + state_map[state_group_name] = state_group; + this._state.push(state_group); + } + state_group.states.push(state); + } + } + } + } + } +}; + +pytorch.Container.Pickle = class { + + constructor(buffer, pickle, exception) { + this._buffer = buffer; + this._pickle = pickle; + this._exceptionCallback = exception; + } + + get format() { + return 'PyTorch v0.1.10'; + } + + get data() { + this._unpickle(); + return this._data; + } + + get state() { + this._unpickle(); + return this._state; + } + + get littleEndian() { + this._unpickle(); + return this._littleEndian; + } + + _unpickle() { + if (!this._buffer) { + return; + } + + const execution = new pytorch.Execution(null, [], this._exceptionCallback); + const unpickler = new this._pickle.Unpickler(this._buffer); + + this._buffer = null; + this._pickle = null; + this._exceptionCallback = null; + + unpickler.load(); // magic_number + const protocol_version = unpickler.load(); + if (protocol_version != 1001) { + throw new pytorch.Error("Unsupported protocol version '" + protocol_version + "'."); + } + const sys_info = unpickler.load(); + if (sys_info.protocol_version != 1001) { + throw new pytorch.Error("Unsupported protocol version '" + sys_info.protocol_version + "'."); + } + if (sys_info.type_sizes && + ((sys_info.type_sizes.int && sys_info.type_sizes.int != 4) || + (sys_info.type_sizes.long && sys_info.type_sizes.long != 4) || + (sys_info.type_sizes.short && sys_info.type_sizes.short != 2))) { + throw new pytorch.Error('Unsupported type sizes.'); + } + this._littleEndian = sys_info.little_endian; + + const module_source_map = new Map(); + const deserialized_objects = new Map(); + const persistent_load = (saved_id) => { + const typename = saved_id.shift(); + const data = saved_id; + switch (typename) { + case 'module': { + const module = data[0]; + const source = data[2]; + module_source_map.set(module, source); + return data[0]; + } + case 'storage': { + const data_type = data.shift(); + const root_key = data.shift(); + data.shift(); // location + const size = data.shift(); + const view_metadata = data.shift(); + if (!deserialized_objects.has(root_key)) { + const storage = execution.invoke(data_type, [ size ]); + deserialized_objects.set(root_key, storage); + } + if (view_metadata) { + const view_key = view_metadata.shift(); + view_metadata.shift(); // view_offset + view_metadata.shift(); // view_size + if (!deserialized_objects.has(view_key)) { + const view = null; // storage.slice(view_offset, view_offset + view_size); + deserialized_objects.set(view_key, view); + } + return deserialized_objects.get(view_key); + } + return deserialized_objects.get(root_key); + } + } + throw new pytorch.Error("Unknown persistent load type '" + typename + "'."); + }; + + const data = unpickler.load((name, args) => execution.invoke(name, args), persistent_load); + if (!data) { + throw new pytorch.Error('File format is not PyTorch.'); + } + + const deserialized_storage_keys = unpickler.load(); + for (const deserialized_storage_key of deserialized_storage_keys) { + const storage = deserialized_objects.get(deserialized_storage_key); + const size = long.Long.fromBytesLE(unpickler.read(8), false).toNumber(); + if (size != storage.size) { + throw new pytorch.Error('Storage size mismatch.'); + } + storage.data = unpickler.read(storage.dataTypeSize * storage.size); + } + this._data = this._findRootModule(data); + if (!this._data) { + this._state = this._findStateDict(data); + } + if (!this._data && !this._state) { + throw new pytorch.Error('File does not contain root module or state dictionary.'); + } + } + + _findRootModule(root) { + const candidates = [ root, root.model, root.net ]; + for (const obj of candidates) { + if (obj && obj._modules) { + return obj; + } + } + return null; + } + + _findStateDict(root) { + if (!root) { + return null; + } + if (root.encoder && Array.isArray(root.encoder) && + root.decoder && Array.isArray(root.decoder) && !root.state_dict) { + root = root.encoder.concat(root.decoder); + } + if (root instanceof Map) { + const obj = {}; + for (const pair of root) { + const key = pair[0]; + const value = pair[1]; + obj[key] = value; + } + root = obj; + } + const candidates = [ + root.state_dict, root.state, + root.model_state, root.model, root.model_state_dict, root.net_dict, + root.params, root.generator, root.discriminator, root.g_state, + root.network, root.net, root.netG, root.net_states, + root.state_dict_stylepredictor, root.state_dict_ghiasi, + root + ]; + for (const dict of candidates) { + let state_dict = null; + state_dict = state_dict || this._convertStateDictList(dict); + state_dict = state_dict || this._convertStateDictMap(dict); + state_dict = state_dict || this._convertStateDictGroupMap(dict); + if (state_dict) { + return state_dict; + } + } + return null; + } + + _convertStateDictList(list) { + if (list && list instanceof Map) { + for (const item of list) { + const key = item[0]; + const value = item[1]; + if (!key) { + return null; + } + if (value && !pytorch.Utility.isTensor(value)) { + return null; + } + } + const state_dict = []; + const state_map = {}; + for (const item of list) { + const key = item[0]; + const value = item[1]; + if (value !== null) { + const split = key.split('.'); + if (split.length < 2) { + return null; + } + const state = {}; + state.id = key; + state.name = split.pop(); + state.value = value; + const state_group_name = split.join('.'); + let state_group = state_map[state_group_name]; + if (!state_group) { + state_group = {}; + state_group.name = state_group_name; + state_group.states = []; + state_map[state_group_name] = state_group; + state_dict.push(state_group); + } + state_group.states.push(state); + } + } + return state_dict; + } + return null; + } + + _convertStateDictMap(obj) { + if (!obj || Array.isArray(obj)) { + return null; + } + const state_dict = []; + const state_map = {}; + for (const key in obj) { + const split = key.split('.'); + if (split.length < 1) { + return null; + } + const state = {}; + state.id = key; + state.name = split.pop(); + state.value = obj[key]; + if (state.value && state.value.__module__ === 'torch.nn.parameter' && state.value.__name__ === 'Parameter') { + if (pytorch.Utility.isTensor(state.value.data)) { + state.value = state.value.data; + } + } + if (!pytorch.Utility.isTensor(state.value)) { + return null; + } + const state_group_name = split.join('.'); + let state_group = state_map[state_group_name]; + if (!state_group) { + state_group = {}; + state_group.name = state_group_name; + state_group.states = []; + state_map[state_group_name] = state_group; + state_dict.push(state_group); + } + state_group.states.push(state); + } + return state_dict; + } + + _convertStateDictGroupMap(obj) { + if (!obj || Array.isArray(obj)) { + return null; + } + const state_dict = []; + const state_map = {}; + for (const state_group_name in obj) { + let state_group = state_map[state_group_name]; + if (!state_group) { + state_group = {}; + state_group.name = state_group_name; + state_group.states = []; + state_group.attributes = []; + state_map[state_group_name] = state_group; + state_dict.push(state_group); + } + const item = obj[state_group_name]; + if (!item) { + return null; + } + if (item instanceof Map) { + for (const pair of item) { + const key = pair[0]; + const value = pair[1]; + if (!key) { + return null; + } + if (value && !pytorch.Utility.isTensor(value)) { + return null; + } + state_group.states.push({ + id: state_group_name + '.' + key, + name: key, + value: value + }); + } + } + else if (item instanceof Uint8Array) { + return null; + } + else if (Object(item) === item) { + let hasTensors = false; + for (const key in item) { + const value = item[key]; + if (pytorch.Utility.isTensor(value)) { + state_group.states.push({ name: key, value: value, id: state_group_name + '.' + key }); + hasTensors = true; + } + else if (value !== Object(value)) { + state_group.attributes.push({ name: key, value: value }); + } + else if (value && value.data && value.__module__ === 'torch.nn.parameter' && value.__name__ === 'Parameter') { + state_group.states.push({ name: key, value: value.data, id: state_group_name + '.' + key }); + hasTensors = true; + } + else { + return null; + } + } + if (!hasTensors) { + return null; + } + } + else { + return null; + } + } + return state_dict; + } +}; + +pytorch.Container.Zip = class { + + constructor(entries, metadata, pickle, python, exceptionCallback) { + this._entries = entries; + this._metadata = metadata; + this._pickle = pickle; + this._python = python; + this._exceptionCallback = exceptionCallback; + // https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/docs/serialization.md + const entry = this._entries.find((entry) => entry.name == 'model.json' || entry.name == 'data.pkl' || entry.name.endsWith('/model.json') || entry.name.endsWith('/data.pkl')); + if (!entry) { + throw new pytorch.Error("PyTorch Zip container does not contain 'data.pkl' or 'model.json'."); + } + const lastIndex = entry.name.lastIndexOf('/'); + this._prefix = lastIndex === -1 ? '' : entry.name.substring(0, lastIndex + 1); + this._utf8Decoder = new TextDecoder('utf-8'); + } + + get format() { + if (this._format === undefined) { + if (this._entry('model.json')) { + this._format = this._entry('attributes.pkl') ? 'TorchScript v1.1' : 'TorchScript v1.0'; + } + else if (this._entry('data.pkl')) { + // kProducedFileFormatVersion in ./third_party/src/pytorch/caffe2/serialize/inline_container.h + const versionEntry = this._entry('version'); + const versionNumber = versionEntry ? this._utf8Decoder.decode(versionEntry.data).split('\n').shift() : ''; + const versionTable = { '1': 'v1.3', '2': 'v1.4', '3': 'v1.6' }; + const version = versionTable[versionNumber]; + if (!version) { + this._exceptionCallback(new pytorch.Error("Unsupported PyTorch ZIP version '" + versionNumber + "'.")); + } + this._format = (this._entry('constants.pkl') ? 'TorchScript' : 'PyTorch') + ' ' + (version || 'v#' + versionNumber.toString() ); + } + } + return this._format; + } + + get producer() { + return this.data ? this._producer : ''; + } + + get name() { + return this._name; + } + + get data() { + if (this._data === undefined) { + this._data = null; + const dataEntry = this._entry('data.pkl'); + if (dataEntry && dataEntry.data) { + this._data = this._unpickle(dataEntry.data, this._storage('data')); + } + else { + const modelEntry = this._entry('model.json'); + if (modelEntry) { + const model = JSON.parse(this._utf8Decoder.decode(modelEntry.data)); + this._producer = model.producerName + (model.producerVersion ? ' v' + model.producerVersion : ''); + this._data = model.mainModule || {}; + this._name = this._data.name || ''; + if (this._data.torchscriptArena) { + this._torchscriptArena = this._data.torchscriptArena.key; + } + const queue = [ this._data ]; + const entries = new Map(); + for (const entry of this._entries) { + entries.set(entry.name, entry.data); + } + const tensorTypeMap = new Map([ + [ 'FLOAT', 'Float' ], + [ 'FLOAT16', 'Half' ], + [ 'DOUBLE', 'Double' ], + [ 'INT8', 'Char' ], + [ 'INT32', 'Int' ], + [ 'INT64', 'Long' ] + ]); + this._constants = model.tensors || []; + for (const tensor of this._constants) { + const key = this._prefix + tensor.data.key; + if (!tensorTypeMap.has(tensor.dataType)) { + throw new pytorch.Error("Unknown tensor data type '" + tensor.dataType + "'."); + } + const type = tensorTypeMap.get(tensor.dataType); + tensor.__module__ = 'torch'; + tensor.__name__ = 'Tensor'; + tensor.name = tensor.data.key; + tensor.size = tensor.dims ? tensor.dims.map((dim) => parseInt(dim, 10)) : null; + tensor.storage = this.execution.invoke('torch.' + type + 'Storage', [ tensor.size ]); + tensor.storage.data = entries.get(key); + } + while (queue.length > 0) { + const module = queue.shift(); + if (!module.__module__ && !module.__name__) { + module.__module__ = 'torch.nn.modules.module'; + module.__name__ = 'Module'; + } + if (module.name) { + module.__id__ = module.name; + } + if (module.submodules) { + for (const submodule of module.submodules) { + module[submodule.name] = submodule; + submodule.__parent__ = module; + queue.push(submodule); + } + delete module.submodules; + } + let parameters = []; + if (module.parameters) { + parameters = parameters.concat(module.parameters); + delete module.parameters; + } + if (module.arguments) { + parameters = parameters.concat(module.arguments); + delete module.arguments; + } + for (const parameter of parameters) { + const tensor = this._constants[parameter.tensorId]; + module[parameter.name] = tensor; + if (!parameter.__module__ || !parameter.__name__) { + parameter.__module__ = 'torch'; + parameter.__name__ = 'Tensor'; + } + } + } + } + } + } + return this._data; + } + + get constants() { + if (this._constants === undefined) { + this._constants = []; + const entry = this._entry('constants.pkl'); + if (entry && entry.data) { + this._constants = this._unpickle(entry.data, this._storage('constants')); + } + } + return this._constants; + } + + get execution() { + if (this._execution === undefined) { + this._types = new Map(); // TODO + const sources = {}; + for (const entry of this._entries) { + if (entry.name.startsWith(this._prefix + 'code')) { + const file = entry.name.substring(this._prefix.length); + if (sources[file]) { + throw new pytorch.Error("Duplicate source file '" + file + "'."); + } + sources[file] = entry.data; + } + } + this._execution = new pytorch.Container.Zip.Execution(this._python, sources, this._exceptionCallback, this._metadata); + const constants = {}; + for (let i = 0; i < this.constants.length; i++) { + constants['c' + i.toString()] = this.constants[i]; + } + this._execution.context.set('CONSTANTS', constants); + } + return this._execution; + } + + _entry(name) { + return this._entries.find((entry) => entry.name == this._prefix + name); + } + + _unpickle(data, storage_map) { + const deserialized_objects = new Map(); + const persistent_load = (saved_id) => { + const typename = saved_id.shift(); + if (typename !== 'storage') { + throw new pytorch.Error("Unknown persistent load type '" + typename + "'."); + } + const data_type = saved_id.shift(); + const root_key = saved_id.shift(); + saved_id.shift(); // location + const size = saved_id.shift(); + let storage = null; + if (deserialized_objects.has(root_key)) { + storage = deserialized_objects.get(root_key); + } + else { + storage = this.execution.invoke(data_type, [ size ]); + storage.data = storage_map.get(root_key); + deserialized_objects.set(root_key, storage); + } + const view_metadata = saved_id.shift(); + if (view_metadata) { + const view_key = view_metadata.shift(); + view_metadata.shift(); // view_offset + view_metadata.shift(); // view_size + let view = null; + if (deserialized_objects.has(view_key)) { + view = deserialized_objects.get(root_key); + } + else { + view = null; // storage.slice(view_offset, view_offset + view_size); + deserialized_objects.set(view_key, view); + } + return view; + } + return storage; + }; + return new this._pickle.Unpickler(data).load((name, args) => this.execution.invoke(name, args), persistent_load); + } + + _storage(dirname) { + const map = new Map(); + const prefix = this._prefix + dirname + '/'; + for (const entry of this._entries) { + if (entry.name.startsWith(prefix)) { + const key = entry.name.substring(prefix.length); + map.set(key, entry.data); + } + } + return map; + } + + _type(name) { + if (!this._types.has(name)) { + const parts = name.split('.'); + const className = parts.pop(); + const file = 'code/' + parts.join('/') + '.py'; + const program = this.execution.parse(file); + if (program) { + for (const statement of program.body) { + if (statement.type === 'class' && statement.name == className) { + this._types.set(name, statement); + break; + } + } + } + } + return this._types.get(name); + } + + trace() { + this._inputs = []; + this._outputs = []; + this.execution.reset(); + if (this._torchscriptArena) { + const program = this.execution.parse(this._torchscriptArena); + for (const statement of program.body) { + if (statement.type == 'def') { + const self = this; + const globals = this.execution.context; + const func = { + __class__: this.execution.context.scope.builtins.function, + __name__: statement.name, + __code__: statement, + __call__: function(args) { + return self.execution.apply(this.__code__, args, globals); + } + }; + this.data[statement.name] = func; + } + } + } + if (this.data.forward) { + const args = [ this.data ]; // self + if (this.data.forward.__code__ && this.data.forward.__code__.parameters) { + for (const parameter of this.data.forward.__code__.parameters) { + if (parameter.name !== 'self') { + const type = parameter.parameterType; + if (type.type === 'type' && type.name.type) { + if (type.name.value === 'Tensor') { + this._inputs.push(parameter.name); + args.push({ __module__: 'torch', __name__: 'Tensor', __variable__: parameter.name, __origin__: 'trace-input-tensor' }); + } + if (type.name.value === 'Tuple' && type.arguments.every((item) => item.type === 'type' && item.name.type === 'id' && item.name.value === 'Tensor')) { + this._inputs.push(parameter.name); + args.push(type.arguments.map(() => { return { __module__: 'torch', __name__: 'Tensor', __variable__: parameter.name, __origin__: 'trace-input-tuple' }; })); + } + if (type.name.value === 'List' && type.arguments.every((item) => item.type === 'type' && item.name.type === 'id' && item.name.value === 'Tensor')) { + this._inputs.push(parameter.name); + args.push([ { __module__: 'torch', __name__: 'Tensor', __variable__: parameter.name, size: [ NaN, NaN ], __origin__: 'trace-input-list' } ]); + } + } + } + } + } + const result = this.data.forward.__call__(args); + const outputs = !Array.isArray(result) ? [ result ] : result; + for (const output of outputs) { + if (pytorch.Utility.isTensor(output)) { + this._outputs.push(output.__variable__); + } + } + this._nodes = this.execution.nodes; + return true; + } + throw new pytorch.Error("Module 'forward' not implemented."); + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +pytorch.Container.Zip.Execution = class extends pytorch.Execution { + + constructor(python, sources, exceptionCallback, metadata) { + super(python, sources, exceptionCallback); + this._metadata = metadata; + this.reset(); + } + + reset() { + this._nodes = []; + this._variableIndex = 0; + } + + get nodes() { + return this._nodes; + } + + call(target, name, args, context) { + let callTarget = pytorch.Utility.target(target); + let outputTypes = null; + if (callTarget && callTarget + '.' + name === 'ops.prim.NumToTensor' && + args.length === 1 && args[0].type === 'call' && args[0].target.member.type == 'id') { + const innerCall = args[0]; + callTarget = pytorch.Utility.target(innerCall.target.target); + args = innerCall.arguments; + name = innerCall.target.member.value; + outputTypes = [ 'int64' ]; + } + if (callTarget) { + const type = callTarget + '.' + name; + // ./third_party/src/pytorch/aten/src/ATen/native/native_functions.yaml + let schemas = this._metadata.type(type); + if (schemas) { + if (!Array.isArray(schemas)) { + schemas = [ schemas ]; + } + for (const schema of schemas) { + const callArgs = Array.prototype.slice.call(args); + const node = { + type: schema.name, + inputs: [], + attributes: [], + outputs: [] + }; + const referencedParameters = []; + let next = false; + const inputSchemas = Array.prototype.slice.call(schema.inputs || []); + while (inputSchemas.length > 0) { + const inputSchema = inputSchemas.shift(); + const argument = this.expression(callArgs.shift(), context); + if ((Array.isArray(argument) && inputSchema.type !== 'T[]') || + (!Array.isArray(argument) && inputSchema.type === 'T[]')) { + next = true; + break; + } + const parameters = Array.isArray(argument) ? argument : [ argument ]; + const inputs = []; + for (let parameter of parameters) { + if (parameter !== undefined) { + if (!pytorch.Utility.isTensor(parameter) && parameter !== null) { + next = true; + break; + } + if (parameter === null) { + parameter = {}; + } + if (!parameter.__variable__) { + parameter.__variable__ = this._variable(); + } + inputs.push({ id: parameter.__variable__ }); + referencedParameters.push(parameter); + } + } + if (next) { + break; + } + node.inputs.push(inputs); + } + if (next) { + continue; + } + while (callArgs.length > 0 && callArgs[0].type !== '=') { + const value = this.expression(callArgs.shift(), context); + node.attributes.push(value); + } + while (callArgs.length > 0) { + const arg = callArgs.shift(); + if (arg.type === '=' && arg.target && arg.target.type === 'id') { + const value = this.expression(arg.expression, context); + node.attributes.push({ type: '=', target: arg.target, expression: value }); + } + else { + throw new pytorch.Attribute('Expected named argument.'); + } + } + const outputs = []; + for (let i = 0; i < schema.outputs.length; i++) { + if (schema.outputs[i].type && schema.outputs[i].type !== 'T') { + if (!outputTypes || outputTypes.length !== schema.outputs.length || schema.outputs[i].type !== outputTypes[i]) { + next = true; + break; + } + } + const parameter = { __module__: 'torch', __name__: 'Tensor', __origin__: 'invoke-output-' + type }; + switch (type) { + case 'torch.cat': + case 'torch.conv2d': + case 'torch.dropout': + case 'torch.flatten': + case 'torch.max_pool2d': + case 'torch.quantize_per_tensor': + case 'torch.relu_': + case 'torch.hardtanh_': + case 'torch.slice': { + parameter.size = [ NaN, NaN, NaN, NaN ]; + break; + } + case 'torch.conv3d': { + parameter.size = [ NaN, NaN, NaN, NaN, NaN ]; + break; + } + case 'torch.embedding': { + parameter.size = [ NaN, NaN, NaN ]; + break; + } + case 'torch.ones': + case 'torch.zeros': + case 'torch.zeros_like': { + parameter.size = this.expression(args[0], context); + break; + } + } + parameter.__variable__ = this._variable(); + outputs.push(parameter); + node.outputs.push(parameter.__variable__); + } + if (next) { + continue; + } + for (const parameter of referencedParameters) { + parameter.__count__ = (parameter.__count__ || 0) + 1; + } + this._nodes.push(node); + if (outputs.length > 1) { + return outputs; + } + return outputs[0]; + } + } + } + return super.call(target, name, args, context); + } + + _variable() { + this._variableIndex++; + return this._variableIndex.toString(); + } +}; + +pytorch.Utility = class { + + static target(expression) { + if (expression.type == 'id') { + return expression.value; + } + if (expression.type == '.') { + return pytorch.Utility.target(expression.target) + '.' + pytorch.Utility.target(expression.member); + } + return null; + } + + static isTensor(obj) { + return obj && (obj.__module__ === 'torch' || obj.__module__ === 'torch.cuda') && obj.__name__ && obj.__name__.endsWith('Tensor'); + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = pytorch.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/sklearn-metadata.json b/frontend/packages/core/public/netron/sklearn-metadata.json new file mode 100644 index 00000000..272ef496 --- /dev/null +++ b/frontend/packages/core/public/netron/sklearn-metadata.json @@ -0,0 +1,2244 @@ +[ + { + "name": "sklearn.preprocessing.Binarizer", + "schema": { + "attributes": [ + { + "default": true, + "description": "set to False to perform inplace binarization and avoid a copy (if\nthe input is already a numpy array or a scipy.sparse CSR matrix).\n", + "name": "copy", + "option": "optional", + "type": "boolean" + }, + { + "default": 0.0, + "description": "Feature values below or equal to this are replaced by 0, above it by 1.\nThreshold may not be less than 0 for operations on sparse matrices.\n", + "name": "threshold", + "option": "optional", + "type": "float32" + } + ], + "description": "Binarize data (set feature values to 0 or 1) according to a threshold\n\nValues greater than the threshold map to 1, while values less than\nor equal to the threshold map to 0. With the default threshold of 0,\nonly positive values map to 1.\n\nBinarization is a common operation on text count data where the\nanalyst can decide to only consider the presence or absence of a\nfeature rather than a quantified number of occurrences for instance.\n\nIt can also be used as a pre-processing step for estimators that\nconsider boolean random variables (e.g. modelled using the Bernoulli\ndistribution in a Bayesian setting).\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.preprocessing.MultiLabelBinarizer", + "schema": { + "attributes": [ + { + "description": "Indicates an ordering for the class labels.\nAll entries should be unique (cannot contain duplicate classes).\n", + "name": "classes", + "option": "optional" + }, + { + "description": "Set to true if output binary array is desired in CSR sparse format\n", + "name": "sparse_output" + } + ], + "description": "Transform between iterable of iterables and a multilabel format\n\nAlthough a list of sets or tuples is a very intuitive format for multilabel\ndata, it is unwieldy to process. This transformer converts between this\nintuitive format and the supported multilabel format: a (samples x classes)\nbinary matrix indicating the presence of a class label.\n" + } + }, + { + "name": "sklearn.preprocessing.LabelEncoder", + "schema": { + "description": "Encode target labels with value between 0 and n_classes-1.\n\nThis transformer should be used to encode target values, *i.e.* `y`, and\nnot the input `X`.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.12\n" + } + }, + { + "name": "sklearn.svm.classes.SVC", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive. The penalty\nis a squared l2 penalty.\n", + "name": "C", + "type": "float32" + }, + { + "default": "rbf", + "description": "Specifies the kernel type to be used in the algorithm.\nIt must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or\na callable.\nIf none is given, 'rbf' will be used. If a callable is given it is\nused to pre-compute the kernel matrix from data matrices; that matrix\nshould be an array of shape ``(n_samples, n_samples)``.\n", + "name": "kernel" + }, + { + "default": 3, + "description": "Degree of the polynomial kernel function ('poly').\nIgnored by all other kernels.\n", + "name": "degree", + "type": "int32" + }, + { + "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features.\n\n.. versionchanged:: 0.22\nThe default value of ``gamma`` changed from 'auto' to 'scale'.\n", + "name": "gamma" + }, + { + "default": 0.0, + "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'.\n", + "name": "coef0", + "type": "float32" + }, + { + "default": true, + "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide `.\n", + "name": "shrinking", + "type": "boolean" + }, + { + "default": false, + "description": "Whether to enable probability estimates. This must be enabled prior\nto calling `fit`, will slow down that method as it internally uses\n5-fold cross-validation, and `predict_proba` may be inconsistent with\n`predict`. Read more in the :ref:`User Guide `.\n", + "name": "probability", + "type": "boolean" + }, + { + "default": 0.001, + "description": "Tolerance for stopping criterion.\n", + "name": "tol", + "type": "float32" + }, + { + "default": 200.0, + "description": "Specify the size of the kernel cache (in MB).\n", + "name": "cache_size", + "type": "float32" + }, + { + "default": null, + "description": "Set the parameter C of class i to class_weight[i]*C for\nSVC. If not given, all classes are supposed to have\nweight one.\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n", + "name": "class_weight" + }, + { + "default": false, + "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context.\n", + "name": "verbose", + "type": "boolean" + }, + { + "default": -1, + "description": "Hard limit on iterations within solver, or -1 for no limit.\n", + "name": "max_iter", + "type": "int32" + }, + { + "default": "ovr", + "description": "Whether to return a one-vs-rest ('ovr') decision function of shape\n(n_samples, n_classes) as all other classifiers, or the original\none-vs-one ('ovo') decision function of libsvm which has shape\n(n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one\n('ovo') is always used as multi-class strategy. The parameter is\nignored for binary classification.\n\n.. versionchanged:: 0.19\ndecision_function_shape is 'ovr' by default.\n\n.. versionadded:: 0.17\n*decision_function_shape='ovr'* is recommended.\n\n.. versionchanged:: 0.17\nDeprecated *decision_function_shape='ovo' and None*.\n", + "name": "decision_function_shape" + }, + { + "default": false, + "description": "If true, ``decision_function_shape='ovr'``, and number of classes > 2,\n:term:`predict` will break ties according to the confidence values of\n:term:`decision_function`; otherwise the first class among the tied\nclasses is returned. Please note that breaking ties comes at a\nrelatively high computational cost compared to a simple predict.\n\n.. versionadded:: 0.22\n", + "name": "break_ties", + "type": "boolean" + }, + { + "default": null, + "description": "Controls the pseudo random number generation for shuffling the data for\nprobability estimates. Ignored when `probability` is False.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state" + } + ], + "description": "C-Support Vector Classification.\n\nThe implementation is based on libsvm. The fit time scales at least\nquadratically with the number of samples and may be impractical\nbeyond tens of thousands of samples. For large datasets\nconsider using :class:`sklearn.svm.LinearSVC` or\n:class:`sklearn.linear_model.SGDClassifier` instead, possibly after a\n:class:`sklearn.kernel_approximation.Nystroem` transformer.\n\nThe multiclass support is handled according to a one-vs-one scheme.\n\nFor details on the precise mathematical formulation of the provided\nkernel functions and how `gamma`, `coef0` and `degree` affect each\nother, see the corresponding section in the narrative documentation:\n:ref:`svm_kernels`.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.svm.SVC", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive. The penalty\nis a squared l2 penalty.\n", + "name": "C", + "option": "optional", + "type": "float32" + }, + { + "default": "rbf", + "description": "Specifies the kernel type to be used in the algorithm.\nIt must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or\na callable.\nIf none is given, 'rbf' will be used. If a callable is given it is\nused to pre-compute the kernel matrix from data matrices; that matrix\nshould be an array of shape ``(n_samples, n_samples)``.\n", + "name": "kernel", + "option": "optional", + "type": "string" + }, + { + "default": 3, + "description": "Degree of the polynomial kernel function ('poly').\nIgnored by all other kernels.\n", + "name": "degree", + "option": "optional", + "type": "int32" + }, + { + "default": "auto", + "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features.\n\n.. versionchanged:: 0.22\nThe default value of ``gamma`` changed from 'auto' to 'scale'.\n", + "name": "gamma", + "option": "optional", + "type": "float32" + }, + { + "default": 0.0, + "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'.\n", + "name": "coef0", + "option": "optional", + "type": "float32" + }, + { + "default": false, + "description": "Whether to enable probability estimates. This must be enabled prior\nto calling `fit`, will slow down that method as it internally uses\n5-fold cross-validation, and `predict_proba` may be inconsistent with\n`predict`. Read more in the :ref:`User Guide `.\n", + "name": "probability", + "option": "optional", + "type": "boolean" + }, + { + "default": true, + "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide `.\n", + "name": "shrinking", + "option": "optional", + "type": "boolean" + }, + { + "default": 0.001, + "description": "Tolerance for stopping criterion.\n", + "name": "tol", + "option": "optional", + "type": "float32" + }, + { + "default": 200.0, + "description": "Specify the size of the kernel cache (in MB).\n", + "name": "cache_size", + "option": "optional", + "type": "float32" + }, + { + "default": null, + "description": "Set the parameter C of class i to class_weight[i]*C for\nSVC. If not given, all classes are supposed to have\nweight one.\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n", + "name": "class_weight", + "option": "optional" + }, + { + "default": false, + "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context.\n", + "name": "verbose", + "type": "boolean" + }, + { + "default": -1, + "description": "Hard limit on iterations within solver, or -1 for no limit.\n", + "name": "max_iter", + "option": "optional", + "type": "int32" + }, + { + "default": "ovr", + "description": "Whether to return a one-vs-rest ('ovr') decision function of shape\n(n_samples, n_classes) as all other classifiers, or the original\none-vs-one ('ovo') decision function of libsvm which has shape\n(n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one\n('ovo') is always used as multi-class strategy. The parameter is\nignored for binary classification.\n\n.. versionchanged:: 0.19\ndecision_function_shape is 'ovr' by default.\n\n.. versionadded:: 0.17\n*decision_function_shape='ovr'* is recommended.\n\n.. versionchanged:: 0.17\nDeprecated *decision_function_shape='ovo' and None*.\n", + "name": "decision_function_shape" + }, + { + "default": null, + "description": "Controls the pseudo random number generation for shuffling the data for\nprobability estimates. Ignored when `probability` is False.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state", + "option": "optional", + "type": "int32" + }, + { + "default": false, + "description": "If true, ``decision_function_shape='ovr'``, and number of classes > 2,\n:term:`predict` will break ties according to the confidence values of\n:term:`decision_function`; otherwise the first class among the tied\nclasses is returned. Please note that breaking ties comes at a\nrelatively high computational cost compared to a simple predict.\n\n.. versionadded:: 0.22\n", + "name": "break_ties", + "option": "optional", + "type": "boolean" + } + ], + "description": "C-Support Vector Classification.\n\nThe implementation is based on libsvm. The fit time scales at least\nquadratically with the number of samples and may be impractical\nbeyond tens of thousands of samples. For large datasets\nconsider using :class:`sklearn.svm.LinearSVC` or\n:class:`sklearn.linear_model.SGDClassifier` instead, possibly after a\n:class:`sklearn.kernel_approximation.Nystroem` transformer.\n\nThe multiclass support is handled according to a one-vs-one scheme.\n\nFor details on the precise mathematical formulation of the provided\nkernel functions and how `gamma`, `coef0` and `degree` affect each\nother, see the corresponding section in the narrative documentation:\n:ref:`svm_kernels`.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.svm.SVC", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Regularization parameter. The strength of the regularization is\ninversely proportional to C. Must be strictly positive. The penalty\nis a squared l2 penalty.\n", + "name": "C", + "option": "optional", + "type": "float32" + }, + { + "default": "rbf", + "description": "Specifies the kernel type to be used in the algorithm.\nIt must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or\na callable.\nIf none is given, 'rbf' will be used. If a callable is given it is\nused to pre-compute the kernel matrix from data matrices; that matrix\nshould be an array of shape ``(n_samples, n_samples)``.\n", + "name": "kernel", + "option": "optional", + "type": "string" + }, + { + "default": 3, + "description": "Degree of the polynomial kernel function ('poly').\nIgnored by all other kernels.\n", + "name": "degree", + "option": "optional", + "type": "int32" + }, + { + "default": "auto", + "description": "Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n\n- if ``gamma='scale'`` (default) is passed then it uses\n1 / (n_features * X.var()) as value of gamma,\n- if 'auto', uses 1 / n_features.\n\n.. versionchanged:: 0.22\nThe default value of ``gamma`` changed from 'auto' to 'scale'.\n", + "name": "gamma", + "option": "optional", + "type": "float32" + }, + { + "default": 0.0, + "description": "Independent term in kernel function.\nIt is only significant in 'poly' and 'sigmoid'.\n", + "name": "coef0", + "option": "optional", + "type": "float32" + }, + { + "default": true, + "description": "Whether to use the shrinking heuristic.\nSee the :ref:`User Guide `.\n", + "name": "shrinking", + "option": "optional", + "type": "boolean" + }, + { + "default": false, + "description": "Whether to enable probability estimates. This must be enabled prior\nto calling `fit`, will slow down that method as it internally uses\n5-fold cross-validation, and `predict_proba` may be inconsistent with\n`predict`. Read more in the :ref:`User Guide `.\n", + "name": "probability", + "option": "optional", + "type": "boolean" + }, + { + "default": 0.001, + "description": "Tolerance for stopping criterion.\n", + "name": "tol", + "option": "optional", + "type": "float32" + }, + { + "default": 200.0, + "description": "Specify the size of the kernel cache (in MB).\n", + "name": "cache_size", + "option": "optional", + "type": "float32" + }, + { + "default": null, + "description": "Set the parameter C of class i to class_weight[i]*C for\nSVC. If not given, all classes are supposed to have\nweight one.\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n", + "name": "class_weight", + "option": "optional" + }, + { + "default": false, + "description": "Enable verbose output. Note that this setting takes advantage of a\nper-process runtime setting in libsvm that, if enabled, may not work\nproperly in a multithreaded context.\n", + "name": "verbose", + "type": "boolean" + }, + { + "default": -1, + "description": "Hard limit on iterations within solver, or -1 for no limit.\n", + "name": "max_iter", + "option": "optional", + "type": "int32" + }, + { + "default": "ovr", + "description": "Whether to return a one-vs-rest ('ovr') decision function of shape\n(n_samples, n_classes) as all other classifiers, or the original\none-vs-one ('ovo') decision function of libsvm which has shape\n(n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one\n('ovo') is always used as multi-class strategy. The parameter is\nignored for binary classification.\n\n.. versionchanged:: 0.19\ndecision_function_shape is 'ovr' by default.\n\n.. versionadded:: 0.17\n*decision_function_shape='ovr'* is recommended.\n\n.. versionchanged:: 0.17\nDeprecated *decision_function_shape='ovo' and None*.\n", + "name": "decision_function_shape" + }, + { + "default": null, + "description": "Controls the pseudo random number generation for shuffling the data for\nprobability estimates. Ignored when `probability` is False.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state", + "option": "optional" + }, + { + "default": false, + "description": "If true, ``decision_function_shape='ovr'``, and number of classes > 2,\n:term:`predict` will break ties according to the confidence values of\n:term:`decision_function`; otherwise the first class among the tied\nclasses is returned. Please note that breaking ties comes at a\nrelatively high computational cost compared to a simple predict.\n\n.. versionadded:: 0.22\n", + "name": "break_ties", + "option": "optional", + "type": "boolean" + } + ], + "description": "C-Support Vector Classification.\n\nThe implementation is based on libsvm. The fit time scales at least\nquadratically with the number of samples and may be impractical\nbeyond tens of thousands of samples. For large datasets\nconsider using :class:`sklearn.svm.LinearSVC` or\n:class:`sklearn.linear_model.SGDClassifier` instead, possibly after a\n:class:`sklearn.kernel_approximation.Nystroem` transformer.\n\nThe multiclass support is handled according to a one-vs-one scheme.\n\nFor details on the precise mathematical formulation of the provided\nkernel functions and how `gamma`, `coef0` and `degree` affect each\nother, see the corresponding section in the narrative documentation:\n:ref:`svm_kernels`.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.linear_model._logistic.LogisticRegression", + "schema": { + "attributes": [ + { + "default": "l2", + "description": "Used to specify the norm used in the penalization. The 'newton-cg',\n'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is\nonly supported by the 'saga' solver. If 'none' (not supported by the\nliblinear solver), no regularization is applied.\n\n.. versionadded:: 0.19\nl1 penalty with SAGA solver (allowing 'multinomial' + L1)\n", + "name": "penalty" + }, + { + "default": false, + "description": "Dual or primal formulation. Dual formulation is only implemented for\nl2 penalty with liblinear solver. Prefer dual=False when\nn_samples > n_features.\n", + "name": "dual", + "type": "boolean" + }, + { + "default": 0.0001, + "description": "Tolerance for stopping criteria.\n", + "name": "tol", + "type": "float32" + }, + { + "default": 1.0, + "description": "Inverse of regularization strength; must be a positive float.\nLike in support vector machines, smaller values specify stronger\nregularization.\n", + "name": "C", + "type": "float32" + }, + { + "default": true, + "description": "Specifies if a constant (a.k.a. bias or intercept) should be\nadded to the decision function.\n", + "name": "fit_intercept", + "type": "boolean" + }, + { + "default": 1.0, + "description": "Useful only when the solver 'liblinear' is used\nand self.fit_intercept is set to True. In this case, x becomes\n[x, self.intercept_scaling],\ni.e. a \"synthetic\" feature with constant value equal to\nintercept_scaling is appended to the instance vector.\nThe intercept becomes ``intercept_scaling * synthetic_feature_weight``.\n\nNote! the synthetic feature weight is subject to l1/l2 regularization\nas all other features.\nTo lessen the effect of regularization on synthetic feature weight\n(and therefore on the intercept) intercept_scaling has to be increased.\n", + "name": "intercept_scaling", + "type": "float32" + }, + { + "default": null, + "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n\n.. versionadded:: 0.17\n*class_weight='balanced'*\n", + "name": "class_weight" + }, + { + "default": null, + "description": "Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the\ndata. See :term:`Glossary ` for details.\n", + "name": "random_state", + "type": "int32" + }, + { + "default": "lbfgs", + "description": "\nAlgorithm to use in the optimization problem.\n\n- For small datasets, 'liblinear' is a good choice, whereas 'sag' and\n'saga' are faster for large ones.\n- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'\nhandle multinomial loss; 'liblinear' is limited to one-versus-rest\nschemes.\n- 'newton-cg', 'lbfgs', 'sag' and 'saga' handle L2 or no penalty\n- 'liblinear' and 'saga' also handle L1 penalty\n- 'saga' also supports 'elasticnet' penalty\n- 'liblinear' does not support setting ``penalty='none'``\n\nNote that 'sag' and 'saga' fast convergence is only guaranteed on\nfeatures with approximately the same scale. You can\npreprocess the data with a scaler from sklearn.preprocessing.\n\n.. versionadded:: 0.17\nStochastic Average Gradient descent solver.\n.. versionadded:: 0.19\nSAGA solver.\n.. versionchanged:: 0.22\nThe default solver changed from 'liblinear' to 'lbfgs' in 0.22.\n", + "name": "solver" + }, + { + "default": 100, + "description": "Maximum number of iterations taken for the solvers to converge.\n", + "name": "max_iter", + "type": "int32" + }, + { + "default": "auto", + "description": "If the option chosen is 'ovr', then a binary problem is fit for each\nlabel. For 'multinomial' the loss minimised is the multinomial loss fit\nacross the entire probability distribution, *even when the data is\nbinary*. 'multinomial' is unavailable when solver='liblinear'.\n'auto' selects 'ovr' if the data is binary, or if solver='liblinear',\nand otherwise selects 'multinomial'.\n\n.. versionadded:: 0.18\nStochastic Average Gradient descent solver for 'multinomial' case.\n.. versionchanged:: 0.22\nDefault changed from 'ovr' to 'auto' in 0.22.\n", + "name": "multi_class" + }, + { + "default": 0, + "description": "For the liblinear and lbfgs solvers set verbose to any positive\nnumber for verbosity.\n", + "name": "verbose", + "type": "int32" + }, + { + "default": false, + "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nUseless for liblinear solver. See :term:`the Glossary `.\n\n.. versionadded:: 0.17\n*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.\n", + "name": "warm_start", + "type": "boolean" + }, + { + "default": null, + "description": "Number of CPU cores used when parallelizing over classes if\nmulti_class='ovr'\". This parameter is ignored when the ``solver`` is\nset to 'liblinear' regardless of whether 'multi_class' is specified or\nnot. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors.\nSee :term:`Glossary ` for more details.\n", + "name": "n_jobs", + "type": "int32" + }, + { + "default": null, + "description": "The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only\nused if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent\nto using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent\nto using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a\ncombination of L1 and L2.\n", + "name": "l1_ratio", + "type": "float32" + } + ], + "description": "\nLogistic Regression (aka logit, MaxEnt) classifier.\n\nIn the multiclass case, the training algorithm uses the one-vs-rest (OvR)\nscheme if the 'multi_class' option is set to 'ovr', and uses the\ncross-entropy loss if the 'multi_class' option is set to 'multinomial'.\n(Currently the 'multinomial' option is supported only by the 'lbfgs',\n'sag', 'saga' and 'newton-cg' solvers.)\n\nThis class implements regularized logistic regression using the\n'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note\nthat regularization is applied by default**. It can handle both dense\nand sparse input. Use C-ordered arrays or CSR matrices containing 64-bit\nfloats for optimal performance; any other input format will be converted\n(and copied).\n\nThe 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization\nwith primal formulation, or no regularization. The 'liblinear' solver\nsupports both L1 and L2 regularization, with a dual formulation only for\nthe L2 penalty. The Elastic-Net regularization is only supported by the\n'saga' solver.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.linear_model.LogisticRegression", + "schema": { + "attributes": [ + { + "default": "l2", + "description": "Used to specify the norm used in the penalization. The 'newton-cg',\n'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is\nonly supported by the 'saga' solver. If 'none' (not supported by the\nliblinear solver), no regularization is applied.\n\n.. versionadded:: 0.19\nl1 penalty with SAGA solver (allowing 'multinomial' + L1)\n", + "name": "penalty", + "option": "optional" + }, + { + "default": false, + "description": "Dual or primal formulation. Dual formulation is only implemented for\nl2 penalty with liblinear solver. Prefer dual=False when\nn_samples > n_features.\n", + "name": "dual", + "option": "optional", + "type": "boolean" + }, + { + "default": 0.0001, + "description": "Tolerance for stopping criteria.\n", + "name": "tol", + "option": "optional", + "type": "float32" + }, + { + "default": 1.0, + "description": "Inverse of regularization strength; must be a positive float.\nLike in support vector machines, smaller values specify stronger\nregularization.\n", + "name": "C", + "option": "optional", + "type": "float32" + }, + { + "default": true, + "description": "Specifies if a constant (a.k.a. bias or intercept) should be\nadded to the decision function.\n", + "name": "fit_intercept", + "option": "optional", + "type": "boolean" + }, + { + "default": 1.0, + "description": "Useful only when the solver 'liblinear' is used\nand self.fit_intercept is set to True. In this case, x becomes\n[x, self.intercept_scaling],\ni.e. a \"synthetic\" feature with constant value equal to\nintercept_scaling is appended to the instance vector.\nThe intercept becomes ``intercept_scaling * synthetic_feature_weight``.\n\nNote! the synthetic feature weight is subject to l1/l2 regularization\nas all other features.\nTo lessen the effect of regularization on synthetic feature weight\n(and therefore on the intercept) intercept_scaling has to be increased.\n", + "name": "intercept_scaling", + "option": "optional", + "type": "float32" + }, + { + "default": null, + "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one.\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n\n.. versionadded:: 0.17\n*class_weight='balanced'*\n", + "name": "class_weight", + "option": "optional" + }, + { + "default": null, + "description": "Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the\ndata. See :term:`Glossary ` for details.\n", + "name": "random_state", + "option": "optional", + "type": "int32" + }, + { + "default": "lbfgs", + "description": "\nAlgorithm to use in the optimization problem.\n\n- For small datasets, 'liblinear' is a good choice, whereas 'sag' and\n'saga' are faster for large ones.\n- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'\nhandle multinomial loss; 'liblinear' is limited to one-versus-rest\nschemes.\n- 'newton-cg', 'lbfgs', 'sag' and 'saga' handle L2 or no penalty\n- 'liblinear' and 'saga' also handle L1 penalty\n- 'saga' also supports 'elasticnet' penalty\n- 'liblinear' does not support setting ``penalty='none'``\n\nNote that 'sag' and 'saga' fast convergence is only guaranteed on\nfeatures with approximately the same scale. You can\npreprocess the data with a scaler from sklearn.preprocessing.\n\n.. versionadded:: 0.17\nStochastic Average Gradient descent solver.\n.. versionadded:: 0.19\nSAGA solver.\n.. versionchanged:: 0.22\nThe default solver changed from 'liblinear' to 'lbfgs' in 0.22.\n", + "name": "solver", + "option": "optional" + }, + { + "default": 100, + "description": "Maximum number of iterations taken for the solvers to converge.\n", + "name": "max_iter", + "option": "optional", + "type": "int32" + }, + { + "default": "auto", + "description": "If the option chosen is 'ovr', then a binary problem is fit for each\nlabel. For 'multinomial' the loss minimised is the multinomial loss fit\nacross the entire probability distribution, *even when the data is\nbinary*. 'multinomial' is unavailable when solver='liblinear'.\n'auto' selects 'ovr' if the data is binary, or if solver='liblinear',\nand otherwise selects 'multinomial'.\n\n.. versionadded:: 0.18\nStochastic Average Gradient descent solver for 'multinomial' case.\n.. versionchanged:: 0.22\nDefault changed from 'ovr' to 'auto' in 0.22.\n", + "name": "multi_class", + "option": "optional" + }, + { + "default": 0, + "description": "For the liblinear and lbfgs solvers set verbose to any positive\nnumber for verbosity.\n", + "name": "verbose", + "option": "optional", + "type": "int32" + }, + { + "default": false, + "description": "When set to True, reuse the solution of the previous call to fit as\ninitialization, otherwise, just erase the previous solution.\nUseless for liblinear solver. See :term:`the Glossary `.\n\n.. versionadded:: 0.17\n*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.\n", + "name": "warm_start", + "option": "optional", + "type": "boolean" + }, + { + "default": null, + "description": "Number of CPU cores used when parallelizing over classes if\nmulti_class='ovr'\". This parameter is ignored when the ``solver`` is\nset to 'liblinear' regardless of whether 'multi_class' is specified or\nnot. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors.\nSee :term:`Glossary ` for more details.\n", + "name": "n_jobs", + "option": "optional", + "type": "int32" + }, + { + "default": null, + "description": "The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only\nused if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent\nto using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent\nto using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a\ncombination of L1 and L2.\n", + "name": "l1_ratio", + "option": "optional", + "type": "float32" + } + ], + "description": "\nLogistic Regression (aka logit, MaxEnt) classifier.\n\nIn the multiclass case, the training algorithm uses the one-vs-rest (OvR)\nscheme if the 'multi_class' option is set to 'ovr', and uses the\ncross-entropy loss if the 'multi_class' option is set to 'multinomial'.\n(Currently the 'multinomial' option is supported only by the 'lbfgs',\n'sag', 'saga' and 'newton-cg' solvers.)\n\nThis class implements regularized logistic regression using the\n'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note\nthat regularization is applied by default**. It can handle both dense\nand sparse input. Use C-ordered arrays or CSR matrices containing 64-bit\nfloats for optimal performance; any other input format will be converted\n(and copied).\n\nThe 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization\nwith primal formulation, or no regularization. The 'liblinear' solver\nsupports both L1 and L2 regularization, with a dual formulation only for\nthe L2 penalty. The Elastic-Net regularization is only supported by the\n'saga' solver.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.naive_bayes.BernoulliNB", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Additive (Laplace/Lidstone) smoothing parameter\n(0 for no smoothing).\n", + "name": "alpha", + "option": "optional", + "type": "float32" + }, + { + "default": "0.0", + "description": "Threshold for binarizing (mapping to booleans) of sample features.\nIf None, input is presumed to already consist of binary vectors.\n", + "name": "binarize", + "option": "optional" + }, + { + "default": true, + "description": "Whether to learn class prior probabilities or not.\nIf false, a uniform prior will be used.\n", + "name": "fit_prior", + "option": "optional", + "type": "boolean" + }, + { + "default": null, + "description": "Prior probabilities of the classes. If specified the priors are not\nadjusted according to the data.\n", + "name": "class_prior", + "option": "optional" + } + ], + "description": "Naive Bayes classifier for multivariate Bernoulli models.\n\nLike MultinomialNB, this classifier is suitable for discrete data. The\ndifference is that while MultinomialNB works with occurrence counts,\nBernoulliNB is designed for binary/boolean features.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.naive_bayes.ComplementNB", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).\n", + "name": "alpha", + "option": "optional", + "type": "float32" + }, + { + "default": true, + "description": "Only used in edge case with a single class in the training set.\n", + "name": "fit_prior", + "option": "optional", + "type": "boolean" + }, + { + "default": null, + "description": "Prior probabilities of the classes. Not used.\n", + "name": "class_prior", + "option": "optional" + }, + { + "default": false, + "description": "Whether or not a second normalization of the weights is performed. The\ndefault behavior mirrors the implementations found in Mahout and Weka,\nwhich do not follow the full algorithm described in Table 9 of the\npaper.\n", + "name": "norm", + "option": "optional", + "type": "boolean" + } + ], + "description": "The Complement Naive Bayes classifier described in Rennie et al. (2003).\n\nThe Complement Naive Bayes classifier was designed to correct the \"severe\nassumptions\" made by the standard Multinomial Naive Bayes classifier. It is\nparticularly suited for imbalanced data sets.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.20\n" + } + }, + { + "name": "sklearn.naive_bayes.MultinomialNB", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Additive (Laplace/Lidstone) smoothing parameter\n(0 for no smoothing).\n", + "name": "alpha", + "option": "optional", + "type": "float32" + }, + { + "default": true, + "description": "Whether to learn class prior probabilities or not.\nIf false, a uniform prior will be used.\n", + "name": "fit_prior", + "option": "optional", + "type": "boolean" + }, + { + "default": null, + "description": "Prior probabilities of the classes. If specified the priors are not\nadjusted according to the data.\n", + "name": "class_prior", + "option": "optional" + } + ], + "description": "\nNaive Bayes classifier for multinomial models\n\nThe multinomial Naive Bayes classifier is suitable for classification with\ndiscrete features (e.g., word counts for text classification). The\nmultinomial distribution normally requires integer feature counts. However,\nin practice, fractional counts such as tf-idf may also work.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.neighbors.KNeighborsClassifier", + "schema": { + "attributes": [ + { + "default": 5, + "description": "Number of neighbors to use by default for :meth:`kneighbors` queries.\n", + "name": "n_neighbors", + "option": "optional", + "type": "int32" + }, + { + "default": "uniform", + "description": "weight function used in prediction. Possible values:\n\n- 'uniform' : uniform weights. All points in each neighborhood\nare weighted equally.\n- 'distance' : weight points by the inverse of their distance.\nin this case, closer neighbors of a query point will have a\ngreater influence than neighbors which are further away.\n- [callable] : a user-defined function which accepts an\narray of distances, and returns an array of the same shape\ncontaining the weights.\n", + "name": "weights", + "option": "optional" + }, + { + "default": "auto", + "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\nbased on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force.\n", + "name": "algorithm", + "option": "optional" + }, + { + "default": 30, + "description": "Leaf size passed to BallTree or KDTree. This can affect the\nspeed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem.\n", + "name": "leaf_size", + "option": "optional", + "type": "int32" + }, + { + "default": 2, + "description": "Power parameter for the Minkowski metric. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n", + "name": "p", + "option": "optional", + "type": "int32" + }, + { + "default": "minkowski", + "description": "the distance metric to use for the tree. The default metric is\nminkowski, and with p=2 is equivalent to the standard Euclidean\nmetric. See the documentation of :class:`DistanceMetric` for a\nlist of available metrics.\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square during fit. X may be a :term:`sparse graph`,\nin which case only \"nonzero\" elements may be considered neighbors.\n", + "name": "metric" + }, + { + "default": null, + "description": "Additional keyword arguments for the metric function.\n", + "name": "metric_params", + "option": "optional" + }, + { + "default": null, + "description": "The number of parallel jobs to run for neighbors search.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\nDoesn't affect :meth:`fit` method.\n", + "name": "n_jobs", + "option": "optional", + "type": "int32" + } + ], + "description": "Classifier implementing the k-nearest neighbors vote.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.neighbors.KNeighborsRegressor", + "schema": { + "attributes": [ + { + "default": 5, + "description": "Number of neighbors to use by default for :meth:`kneighbors` queries.\n", + "name": "n_neighbors", + "option": "optional", + "type": "int32" + }, + { + "description": "weight function used in prediction. Possible values:\n\n- 'uniform' : uniform weights. All points in each neighborhood\nare weighted equally.\n- 'distance' : weight points by the inverse of their distance.\nin this case, closer neighbors of a query point will have a\ngreater influence than neighbors which are further away.\n- [callable] : a user-defined function which accepts an\narray of distances, and returns an array of the same shape\ncontaining the weights.\n\nUniform weights are used by default.\n", + "name": "weights" + }, + { + "default": "auto", + "description": "Algorithm used to compute the nearest neighbors:\n\n- 'ball_tree' will use :class:`BallTree`\n- 'kd_tree' will use :class:`KDTree`\n- 'brute' will use a brute-force search.\n- 'auto' will attempt to decide the most appropriate algorithm\nbased on the values passed to :meth:`fit` method.\n\nNote: fitting on sparse input will override the setting of\nthis parameter, using brute force.\n", + "name": "algorithm", + "option": "optional" + }, + { + "default": 30, + "description": "Leaf size passed to BallTree or KDTree. This can affect the\nspeed of the construction and query, as well as the memory\nrequired to store the tree. The optimal value depends on the\nnature of the problem.\n", + "name": "leaf_size", + "option": "optional", + "type": "int32" + }, + { + "default": 2, + "description": "Power parameter for the Minkowski metric. When p = 1, this is\nequivalent to using manhattan_distance (l1), and euclidean_distance\n(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.\n", + "name": "p", + "option": "optional", + "type": "int32" + }, + { + "default": "minkowski", + "description": "the distance metric to use for the tree. The default metric is\nminkowski, and with p=2 is equivalent to the standard Euclidean\nmetric. See the documentation of :class:`DistanceMetric` for a\nlist of available metrics.\nIf metric is \"precomputed\", X is assumed to be a distance matrix and\nmust be square during fit. X may be a :term:`sparse graph`,\nin which case only \"nonzero\" elements may be considered neighbors.\n", + "name": "metric" + }, + { + "default": null, + "description": "Additional keyword arguments for the metric function.\n", + "name": "metric_params", + "option": "optional" + }, + { + "default": null, + "description": "The number of parallel jobs to run for neighbors search.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\nDoesn't affect :meth:`fit` method.\n", + "name": "n_jobs", + "option": "optional", + "type": "int32" + } + ], + "description": "Regression based on k-nearest neighbors.\n\nThe target is predicted by local interpolation of the targets\nassociated of the nearest neighbors in the training set.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.9\n" + } + }, + { + "name": "sklearn.linear_model.LassoLars", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "Constant that multiplies the penalty term. Defaults to 1.0.\n``alpha = 0`` is equivalent to an ordinary least square, solved\nby :class:`LinearRegression`. For numerical reasons, using\n``alpha = 0`` with the LassoLars object is not advised and you\nshould prefer the LinearRegression object.\n", + "name": "alpha", + "type": "float32" + }, + { + "default": true, + "description": "whether to calculate the intercept for this model. If set\nto false, no intercept will be used in calculations\n(i.e. data is expected to be centered).\n", + "name": "fit_intercept", + "type": "boolean" + }, + { + "default": "False", + "description": "Sets the verbosity amount\n", + "name": "verbose", + "option": "optional" + }, + { + "default": true, + "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``\non an estimator with ``normalize=False``.\n", + "name": "normalize", + "option": "optional", + "type": "boolean" + }, + { + "default": "auto", + "description": "Whether to use a precomputed Gram matrix to speed up\ncalculations. If set to ``'auto'`` let us decide. The Gram\nmatrix can also be passed as argument.\n", + "name": "precompute", + "type": "boolean" + }, + { + "default": 500, + "description": "Maximum number of iterations to perform.\n", + "name": "max_iter", + "option": "optional", + "type": "int32" + }, + { + "description": "The machine-precision regularization in the computation of the\nCholesky diagonal factors. Increase this for very ill-conditioned\nsystems. Unlike the ``tol`` parameter in some iterative\noptimization-based algorithms, this parameter does not control\nthe tolerance of the optimization.\nBy default, ``np.finfo(np.float).eps`` is used.\n", + "name": "eps", + "option": "optional", + "type": "float32" + }, + { + "default": true, + "description": "If True, X will be copied; else, it may be overwritten.\n", + "name": "copy_X", + "option": "optional", + "type": "boolean" + }, + { + "default": true, + "description": "If ``True`` the full path is stored in the ``coef_path_`` attribute.\nIf you compute the solution for a large problem or many targets,\nsetting ``fit_path`` to ``False`` will lead to a speedup, especially\nwith a small alpha.\n", + "name": "fit_path", + "type": "boolean" + }, + { + "default": false, + "description": "Restrict coefficients to be >= 0. Be aware that you might want to\nremove fit_intercept which is set True by default.\nUnder the positive restriction the model coefficients will not converge\nto the ordinary-least-squares solution for small values of alpha.\nOnly coefficients up to the smallest alpha value (``alphas_[alphas_ >\n0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso\nalgorithm are typically in congruence with the solution of the\ncoordinate descent Lasso estimator.\n", + "name": "positive", + "type": "boolean" + }, + { + "default": null, + "description": "Upper bound on a uniform noise parameter to be added to the\n`y` values, to satisfy the model's assumption of\none-at-a-time computations. Might help with stability.\n", + "name": "jitter", + "type": "float32" + }, + { + "description": "Determines random number generation for jittering. Pass an int\nfor reproducible output across multiple function calls.\nSee :term:`Glossary `. Ignored if `jitter` is None.\n", + "name": "random_state" + } + ], + "description": "Lasso model fit with Least Angle Regression a.k.a. Lars\n\nIt is a Linear Model trained with an L1 prior as regularizer.\n\nThe optimization objective for Lasso is::\n\n(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.decomposition._pca.PCA", + "schema": { + "attributes": [ + { + "description": "Number of components to keep.\nif n_components is not set all components are kept::\n\nn_components == min(n_samples, n_features)\n\nIf ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's\nMLE is used to guess the dimension. Use of ``n_components == 'mle'``\nwill interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.\n\nIf ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the\nnumber of components such that the amount of variance that needs to be\nexplained is greater than the percentage specified by n_components.\n\nIf ``svd_solver == 'arpack'``, the number of components must be\nstrictly less than the minimum of n_features and n_samples.\n\nHence, the None case results in::\n\nn_components == min(n_samples, n_features) - 1\n", + "name": "n_components" + }, + { + "default": true, + "description": "If False, data passed to fit are overwritten and running\nfit(X).transform(X) will not yield the expected results,\nuse fit_transform(X) instead.\n", + "name": "copy", + "type": "boolean" + }, + { + "default": false, + "description": "When True (False by default) the `components_` vectors are multiplied\nby the square root of n_samples and then divided by the singular values\nto ensure uncorrelated outputs with unit component-wise variances.\n\nWhitening will remove some information from the transformed signal\n(the relative variance scales of the components) but can sometime\nimprove the predictive accuracy of the downstream estimators by\nmaking their data respect some hard-wired assumptions.\n", + "name": "whiten", + "option": "optional", + "type": "boolean" + }, + { + "description": "If auto :\nThe solver is selected by a default policy based on `X.shape` and\n`n_components`: if the input data is larger than 500x500 and the\nnumber of components to extract is lower than 80% of the smallest\ndimension of the data, then the more efficient 'randomized'\nmethod is enabled. Otherwise the exact full SVD is computed and\noptionally truncated afterwards.\nIf full :\nrun exact full SVD calling the standard LAPACK solver via\n`scipy.linalg.svd` and select the components by postprocessing\nIf arpack :\nrun SVD truncated to n_components calling ARPACK solver via\n`scipy.sparse.linalg.svds`. It requires strictly\n0 < n_components < min(X.shape)\nIf randomized :\nrun randomized SVD by the method of Halko et al.\n\n.. versionadded:: 0.18.0\n", + "name": "svd_solver" + }, + { + "default": ".0", + "description": "Tolerance for singular values computed by svd_solver == 'arpack'.\n\n.. versionadded:: 0.18.0\n", + "name": "tol", + "option": "optional" + }, + { + "default": "auto", + "description": "Number of iterations for the power method computed by\nsvd_solver == 'randomized'.\n\n.. versionadded:: 0.18.0\n", + "name": "iterated_power" + }, + { + "default": null, + "description": "Used when ``svd_solver`` == 'arpack' or 'randomized'. Pass an int\nfor reproducible results across multiple function calls.\nSee :term:`Glossary `.\n\n.. versionadded:: 0.18.0\n", + "name": "random_state", + "type": "int32" + } + ], + "description": "Principal component analysis (PCA).\n\nLinear dimensionality reduction using Singular Value Decomposition of the\ndata to project it to a lower dimensional space. The input data is centered\nbut not scaled for each feature before applying the SVD.\n\nIt uses the LAPACK implementation of the full SVD or a randomized truncated\nSVD by the method of Halko et al. 2009, depending on the shape of the input\ndata and the number of components to extract.\n\nIt can also use the scipy.sparse.linalg ARPACK implementation of the\ntruncated SVD.\n\nNotice that this class does not support sparse input. See\n:class:`TruncatedSVD` for an alternative with sparse data.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.decomposition.PCA", + "schema": { + "attributes": [ + { + "description": "Number of components to keep.\nif n_components is not set all components are kept::\n\nn_components == min(n_samples, n_features)\n\nIf ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's\nMLE is used to guess the dimension. Use of ``n_components == 'mle'``\nwill interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.\n\nIf ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the\nnumber of components such that the amount of variance that needs to be\nexplained is greater than the percentage specified by n_components.\n\nIf ``svd_solver == 'arpack'``, the number of components must be\nstrictly less than the minimum of n_features and n_samples.\n\nHence, the None case results in::\n\nn_components == min(n_samples, n_features) - 1\n", + "name": "n_components" + }, + { + "default": true, + "description": "If False, data passed to fit are overwritten and running\nfit(X).transform(X) will not yield the expected results,\nuse fit_transform(X) instead.\n", + "name": "copy", + "type": "boolean" + }, + { + "default": false, + "description": "When True (False by default) the `components_` vectors are multiplied\nby the square root of n_samples and then divided by the singular values\nto ensure uncorrelated outputs with unit component-wise variances.\n\nWhitening will remove some information from the transformed signal\n(the relative variance scales of the components) but can sometime\nimprove the predictive accuracy of the downstream estimators by\nmaking their data respect some hard-wired assumptions.\n", + "name": "whiten", + "option": "optional", + "type": "boolean" + }, + { + "description": "If auto :\nThe solver is selected by a default policy based on `X.shape` and\n`n_components`: if the input data is larger than 500x500 and the\nnumber of components to extract is lower than 80% of the smallest\ndimension of the data, then the more efficient 'randomized'\nmethod is enabled. Otherwise the exact full SVD is computed and\noptionally truncated afterwards.\nIf full :\nrun exact full SVD calling the standard LAPACK solver via\n`scipy.linalg.svd` and select the components by postprocessing\nIf arpack :\nrun SVD truncated to n_components calling ARPACK solver via\n`scipy.sparse.linalg.svds`. It requires strictly\n0 < n_components < min(X.shape)\nIf randomized :\nrun randomized SVD by the method of Halko et al.\n\n.. versionadded:: 0.18.0\n", + "name": "svd_solver", + "type": "string" + }, + { + "default": ".0", + "description": "Tolerance for singular values computed by svd_solver == 'arpack'.\n\n.. versionadded:: 0.18.0\n", + "name": "tol", + "option": "optional" + }, + { + "default": "auto", + "description": "Number of iterations for the power method computed by\nsvd_solver == 'randomized'.\n\n.. versionadded:: 0.18.0\n", + "name": "iterated_power" + }, + { + "default": null, + "description": "Used when ``svd_solver`` == 'arpack' or 'randomized'. Pass an int\nfor reproducible results across multiple function calls.\nSee :term:`Glossary `.\n\n.. versionadded:: 0.18.0\n", + "name": "random_state", + "option": "optional", + "type": "int32" + } + ], + "description": "Principal component analysis (PCA).\n\nLinear dimensionality reduction using Singular Value Decomposition of the\ndata to project it to a lower dimensional space. The input data is centered\nbut not scaled for each feature before applying the SVD.\n\nIt uses the LAPACK implementation of the full SVD or a randomized truncated\nSVD by the method of Halko et al. 2009, depending on the shape of the input\ndata and the number of components to extract.\n\nIt can also use the scipy.sparse.linalg ARPACK implementation of the\ntruncated SVD.\n\nNotice that this class does not support sparse input. See\n:class:`TruncatedSVD` for an alternative with sparse data.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.calibration.CalibratedClassifierCV", + "schema": { + "attributes": [ + { + "description": "The classifier whose output need to be calibrated to provide more\naccurate `predict_proba` outputs.\n", + "name": "base_estimator" + }, + { + "description": "The method to use for calibration. Can be 'sigmoid' which\ncorresponds to Platt's method (i.e. a logistic regression model) or\n'isotonic' which is a non-parametric approach. It is not advised to\nuse isotonic calibration with too few calibration samples\n``(<<1000)`` since it tends to overfit.\n", + "name": "method" + }, + { + "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross-validation,\n- integer, to specify the number of folds.\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, if ``y`` is binary or multiclass,\n:class:`sklearn.model_selection.StratifiedKFold` is used. If ``y`` is\nneither binary nor multiclass, :class:`sklearn.model_selection.KFold`\nis used.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\nIf \"prefit\" is passed, it is assumed that `base_estimator` has been\nfitted already and all data is used for calibration.\n\n.. versionchanged:: 0.22\n``cv`` default value if None changed from 3-fold to 5-fold.\n", + "name": "cv", + "option": "optional", + "type": "int32" + } + ], + "description": "Probability calibration with isotonic regression or logistic regression.\n\nThe calibration is based on the :term:`decision_function` method of the\n`base_estimator` if it exists, else on :term:`predict_proba`.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.feature_extraction.text.CountVectorizer", + "schema": { + "attributes": [ + { + "default": "content", + "description": "If 'filename', the sequence passed as an argument to fit is\nexpected to be a list of filenames that need reading to fetch\nthe raw content to analyze.\n\nIf 'file', the sequence items must have a 'read' method (file-like\nobject) that is called to fetch the bytes in memory.\n\nOtherwise the input is expected to be a sequence of items that\ncan be of type string or byte.\n", + "name": "input", + "type": "string" + }, + { + "default": "utf-8", + "description": "If bytes or files are given to analyze, this encoding is used to\ndecode.\n", + "name": "encoding", + "type": "string" + }, + { + "default": "strict", + "description": "Instruction on what to do if a byte sequence is given to analyze that\ncontains characters not of the given `encoding`. By default, it is\n'strict', meaning that a UnicodeDecodeError will be raised. Other\nvalues are 'ignore' and 'replace'.\n", + "name": "decode_error" + }, + { + "default": null, + "description": "Remove accents and perform other character normalization\nduring the preprocessing step.\n'ascii' is a fast method that only works on characters that have\nan direct ASCII mapping.\n'unicode' is a slightly slower method that works on any characters.\nNone (default) does nothing.\n\nBoth 'ascii' and 'unicode' use NFKD normalization from\n:func:`unicodedata.normalize`.\n", + "name": "strip_accents" + }, + { + "default": true, + "description": "Convert all characters to lowercase before tokenizing.\n", + "name": "lowercase", + "type": "boolean" + }, + { + "default": null, + "description": "Override the preprocessing (string transformation) stage while\npreserving the tokenizing and n-grams generation steps.\nOnly applies if ``analyzer is not callable``.\n", + "name": "preprocessor" + }, + { + "default": null, + "description": "Override the string tokenization step while preserving the\npreprocessing and n-grams generation steps.\nOnly applies if ``analyzer == 'word'``.\n", + "name": "tokenizer" + }, + { + "default": "None", + "description": "If 'english', a built-in stop word list for English is used.\nThere are several known issues with 'english' and you should\nconsider an alternative (see :ref:`stop_words`).\n\nIf a list, that list is assumed to contain stop words, all of which\nwill be removed from the resulting tokens.\nOnly applies if ``analyzer == 'word'``.\n\nIf None, no stop words will be used. max_df can be set to a value\nin the range [0.7, 1.0) to automatically detect and filter stop\nwords based on intra corpus document frequency of terms.\n", + "name": "stop_words", + "type": "string" + }, + { + "description": "Regular expression denoting what constitutes a \"token\", only used\nif ``analyzer == 'word'``. The default regexp select tokens of 2\nor more alphanumeric characters (punctuation is completely ignored\nand always treated as a token separator).\n", + "name": "token_pattern", + "type": "string" + }, + { + "default": "(1, 1)", + "description": "The lower and upper boundary of the range of n-values for different\nword n-grams or char n-grams to be extracted. All values of n such\nsuch that min_n <= n <= max_n will be used. For example an\n``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means\nunigrams and bigrams, and ``(2, 2)`` means only bigrams.\nOnly applies if ``analyzer is not callable``.\n", + "name": "ngram_range" + }, + { + "default": "word", + "description": "Whether the feature should be made of word n-gram or character\nn-grams.\nOption 'char_wb' creates character n-grams only from text inside\nword boundaries; n-grams at the edges of words are padded with space.\n\nIf a callable is passed it is used to extract the sequence of features\nout of the raw, unprocessed input.\n\n.. versionchanged:: 0.21\n\nSince v0.21, if ``input`` is ``filename`` or ``file``, the data is\nfirst read from the file and then passed to the given callable\nanalyzer.\n", + "name": "analyzer", + "type": "string" + }, + { + "default": "1.0", + "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly higher than the given threshold (corpus-specific\nstop words).\nIf float, the parameter represents a proportion of documents, integer\nabsolute counts.\nThis parameter is ignored if vocabulary is not None.\n", + "name": "max_df" + }, + { + "default": "1", + "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly lower than the given threshold. This value is also\ncalled cut-off in the literature.\nIf float, the parameter represents a proportion of documents, integer\nabsolute counts.\nThis parameter is ignored if vocabulary is not None.\n", + "name": "min_df" + }, + { + "default": null, + "description": "If not None, build a vocabulary that only consider the top\nmax_features ordered by term frequency across the corpus.\n\nThis parameter is ignored if vocabulary is not None.\n", + "name": "max_features", + "type": "int32" + }, + { + "default": null, + "description": "Either a Mapping (e.g., a dict) where keys are terms and values are\nindices in the feature matrix, or an iterable over terms. If not\ngiven, a vocabulary is determined from the input documents. Indices\nin the mapping should not be repeated and should not have any gap\nbetween 0 and the largest index.\n", + "name": "vocabulary", + "option": "optional" + }, + { + "default": false, + "description": "If True, all non zero counts are set to 1. This is useful for discrete\nprobabilistic models that model binary events rather than integer\ncounts.\n", + "name": "binary", + "type": "boolean" + }, + { + "default": "np.int64", + "description": "Type of the matrix returned by fit_transform() or transform().\n", + "name": "dtype", + "option": "optional" + } + ], + "description": "Convert a collection of text documents to a matrix of token counts\n\nThis implementation produces a sparse representation of the counts using\nscipy.sparse.csr_matrix.\n\nIf you do not provide an a-priori dictionary and you do not use an analyzer\nthat does some kind of feature selection then the number of features will\nbe equal to the vocabulary size found by analyzing the data.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.feature_extraction.text.TfidfVectorizer", + "schema": { + "attributes": [ + { + "default": "content", + "description": "If 'filename', the sequence passed as an argument to fit is\nexpected to be a list of filenames that need reading to fetch\nthe raw content to analyze.\n\nIf 'file', the sequence items must have a 'read' method (file-like\nobject) that is called to fetch the bytes in memory.\n\nOtherwise the input is expected to be a sequence of items that\ncan be of type string or byte.\n", + "name": "input", + "type": "string" + }, + { + "default": "utf-8", + "description": "If bytes or files are given to analyze, this encoding is used to\ndecode.\n", + "name": "encoding", + "type": "string" + }, + { + "default": "strict", + "description": "Instruction on what to do if a byte sequence is given to analyze that\ncontains characters not of the given `encoding`. By default, it is\n'strict', meaning that a UnicodeDecodeError will be raised. Other\nvalues are 'ignore' and 'replace'.\n", + "name": "decode_error" + }, + { + "default": null, + "description": "Remove accents and perform other character normalization\nduring the preprocessing step.\n'ascii' is a fast method that only works on characters that have\nan direct ASCII mapping.\n'unicode' is a slightly slower method that works on any characters.\nNone (default) does nothing.\n\nBoth 'ascii' and 'unicode' use NFKD normalization from\n:func:`unicodedata.normalize`.\n", + "name": "strip_accents" + }, + { + "default": true, + "description": "Convert all characters to lowercase before tokenizing.\n", + "name": "lowercase", + "type": "boolean" + }, + { + "default": null, + "description": "Override the preprocessing (string transformation) stage while\npreserving the tokenizing and n-grams generation steps.\nOnly applies if ``analyzer is not callable``.\n", + "name": "preprocessor" + }, + { + "default": null, + "description": "Override the string tokenization step while preserving the\npreprocessing and n-grams generation steps.\nOnly applies if ``analyzer == 'word'``.\n", + "name": "tokenizer" + }, + { + "description": "Whether the feature should be made of word or character n-grams.\nOption 'char_wb' creates character n-grams only from text inside\nword boundaries; n-grams at the edges of words are padded with space.\n\nIf a callable is passed it is used to extract the sequence of features\nout of the raw, unprocessed input.\n\n.. versionchanged:: 0.21\n\nSince v0.21, if ``input`` is ``filename`` or ``file``, the data is\nfirst read from the file and then passed to the given callable\nanalyzer.\n", + "name": "analyzer" + }, + { + "default": null, + "description": "If a string, it is passed to _check_stop_list and the appropriate stop\nlist is returned. 'english' is currently the only supported string\nvalue.\nThere are several known issues with 'english' and you should\nconsider an alternative (see :ref:`stop_words`).\n\nIf a list, that list is assumed to contain stop words, all of which\nwill be removed from the resulting tokens.\nOnly applies if ``analyzer == 'word'``.\n\nIf None, no stop words will be used. max_df can be set to a value\nin the range [0.7, 1.0) to automatically detect and filter stop\nwords based on intra corpus document frequency of terms.\n", + "name": "stop_words" + }, + { + "description": "Regular expression denoting what constitutes a \"token\", only used\nif ``analyzer == 'word'``. The default regexp selects tokens of 2\nor more alphanumeric characters (punctuation is completely ignored\nand always treated as a token separator).\n", + "name": "token_pattern", + "type": "string" + }, + { + "default": "(1, 1)", + "description": "The lower and upper boundary of the range of n-values for different\nn-grams to be extracted. All values of n such that min_n <= n <= max_n\nwill be used. For example an ``ngram_range`` of ``(1, 1)`` means only\nunigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means\nonly bigrams.\nOnly applies if ``analyzer is not callable``.\n", + "name": "ngram_range" + }, + { + "default": "1.0", + "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly higher than the given threshold (corpus-specific\nstop words).\nIf float in range [0.0, 1.0], the parameter represents a proportion of\ndocuments, integer absolute counts.\nThis parameter is ignored if vocabulary is not None.\n", + "name": "max_df" + }, + { + "default": "1", + "description": "When building the vocabulary ignore terms that have a document\nfrequency strictly lower than the given threshold. This value is also\ncalled cut-off in the literature.\nIf float in range of [0.0, 1.0], the parameter represents a proportion\nof documents, integer absolute counts.\nThis parameter is ignored if vocabulary is not None.\n", + "name": "min_df" + }, + { + "default": null, + "description": "If not None, build a vocabulary that only consider the top\nmax_features ordered by term frequency across the corpus.\n\nThis parameter is ignored if vocabulary is not None.\n", + "name": "max_features", + "type": "int32" + }, + { + "default": null, + "description": "Either a Mapping (e.g., a dict) where keys are terms and values are\nindices in the feature matrix, or an iterable over terms. If not\ngiven, a vocabulary is determined from the input documents.\n", + "name": "vocabulary", + "option": "optional" + }, + { + "default": false, + "description": "If True, all non-zero term counts are set to 1. This does not mean\noutputs will have only 0/1 values, only that the tf term in tf-idf\nis binary. (Set idf and normalization to False to get 0/1 outputs).\n", + "name": "binary", + "type": "boolean" + }, + { + "default": "float64", + "description": "Type of the matrix returned by fit_transform() or transform().\n", + "name": "dtype", + "option": "optional" + }, + { + "default": "l2", + "description": "Each output row will have unit norm, either:\n* 'l2': Sum of squares of vector elements is 1. The cosine\nsimilarity between two vectors is their dot product when l2 norm has\nbeen applied.\n* 'l1': Sum of absolute values of vector elements is 1.\nSee :func:`preprocessing.normalize`.\n", + "name": "norm" + }, + { + "default": true, + "description": "Enable inverse-document-frequency reweighting.\n", + "name": "use_idf", + "type": "boolean" + }, + { + "default": true, + "description": "Smooth idf weights by adding one to document frequencies, as if an\nextra document was seen containing every term in the collection\nexactly once. Prevents zero divisions.\n", + "name": "smooth_idf", + "type": "boolean" + }, + { + "default": false, + "description": "Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).\n", + "name": "sublinear_tf", + "type": "boolean" + } + ], + "description": "Convert a collection of raw documents to a matrix of TF-IDF features.\n\nEquivalent to :class:`CountVectorizer` followed by\n:class:`TfidfTransformer`.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "lightgbm.sklearn.LGBMRegressor", + "schema": { + "attributes": [ + { + "default": "gbdt", + "name": "boosting_type", + "type": "string" + }, + { + "default": null, + "name": "class_weight" + }, + { + "default": 1.0, + "name": "colsample_bytree" + }, + { + "default": 0.05, + "name": "learning_rate" + }, + { + "default": -1, + "name": "max_depth" + }, + { + "default": 20, + "name": "min_child_samples" + }, + { + "default": 0.001, + "name": "min_child_weight" + }, + { + "default": 0.0, + "name": "min_split_gain" + }, + { + "default": 100, + "name": "n_estimators" + }, + { + "default": -1, + "name": "n_jobs" + }, + { + "default": 31, + "name": "num_leaves" + }, + { + "default": null, + "name": "random_state" + }, + { + "default": 0, + "name": "reg_alpha" + }, + { + "default": 0, + "name": "reg_lambda" + }, + { + "default": true, + "name": "silent", + "type": "boolean" + }, + { + "default": 200000, + "name": "subsample_for_bin" + }, + { + "default": 0, + "name": "subsample_freq" + }, + { + "default": 1.0, + "name": "subsample" + } + ] + } + }, + { + "name": "lightgbm.sklearn.LGBMClassifier", + "schema": { + "attributes": [ + { + "default": "gbdt", + "name": "boosting_type", + "type": "string" + }, + { + "default": null, + "name": "class_weight" + }, + { + "default": 1.0, + "name": "colsample_bytree" + }, + { + "default": 0.05, + "name": "learning_rate" + }, + { + "default": -1, + "name": "max_depth" + }, + { + "default": 20, + "name": "min_child_samples" + }, + { + "default": 0.001, + "name": "min_child_weight" + }, + { + "default": 0.0, + "name": "min_split_gain" + }, + { + "default": 100, + "name": "n_estimators" + }, + { + "default": -1, + "name": "n_jobs" + }, + { + "default": 31, + "name": "num_leaves" + }, + { + "default": null, + "name": "random_state" + }, + { + "default": 0, + "name": "reg_alpha" + }, + { + "default": 0, + "name": "reg_lambda" + }, + { + "default": true, + "name": "silent", + "type": "boolean" + }, + { + "default": 200000, + "name": "subsample_for_bin" + }, + { + "default": 0, + "name": "subsample_freq" + }, + { + "default": 1.0, + "name": "subsample" + } + ] + } + }, + { + "name": "lightgbm.basic.Booster", + "schema": { + "attributes": [ + { + "default": -1, + "name": "best_iteration" + }, + { + "default": false, + "name": "network" + }, + { + "default": null, + "name": "train_set" + }, + { + "default": false, + "name": "stride" + }, + { + "default": null, + "name": "model_file" + }, + { + "default": null, + "name": "params" + }, + { + "default": null, + "name": "pandas_categorical" + } + ] + } + }, + { + "name": "sklearn.linear_model.LinearRegression", + "schema": { + "attributes": [ + { + "default": true, + "description": "Whether to calculate the intercept for this model. If set\nto False, no intercept will be used in calculations\n(i.e. data is expected to be centered).\n", + "name": "fit_intercept", + "option": "optional", + "type": "boolean" + }, + { + "default": false, + "description": "This parameter is ignored when ``fit_intercept`` is set to False.\nIf True, the regressors X will be normalized before regression by\nsubtracting the mean and dividing by the l2-norm.\nIf you wish to standardize, please use\n:class:`sklearn.preprocessing.StandardScaler` before calling ``fit`` on\nan estimator with ``normalize=False``.\n", + "name": "normalize", + "option": "optional", + "type": "boolean" + }, + { + "default": true, + "description": "If True, X will be copied; else, it may be overwritten.\n", + "name": "copy_X", + "option": "optional", + "type": "boolean" + }, + { + "default": null, + "description": "The number of jobs to use for the computation. This will only provide\nspeedup for n_targets > 1 and sufficient large problems.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n", + "name": "n_jobs", + "option": "optional", + "type": "int32" + } + ], + "description": "\nOrdinary least squares Linear Regression.\n\nLinearRegression fits a linear model with coefficients w = (w1, ..., wp)\nto minimize the residual sum of squares between the observed targets in\nthe dataset, and the targets predicted by the linear approximation.\n" + } + }, + { + "name": "sklearn.pipeline.FeatureUnion", + "schema": { + "attributes": [ + { + "description": "List of transformer objects to be applied to the data. The first\nhalf of each tuple is the name of the transformer.\n\n.. versionchanged:: 0.22\nDeprecated `None` as a transformer in favor of 'drop'.\n", + "name": "transformer_list" + }, + { + "default": null, + "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n\n.. versionchanged:: v0.20\n`n_jobs` default changed from 1 to None\n", + "name": "n_jobs", + "type": "int32" + }, + { + "default": null, + "description": "Multiplicative weights for features per transformer.\nKeys are transformer names, values the weights.\n", + "name": "transformer_weights" + }, + { + "default": false, + "description": "If True, the time elapsed while fitting each transformer will be\nprinted as it is completed.\n", + "name": "verbose", + "type": "boolean" + } + ], + "description": "Concatenates results of multiple transformer objects.\n\nThis estimator applies a list of transformer objects in parallel to the\ninput data, then concatenates the results. This is useful to combine\nseveral feature extraction mechanisms into a single transformer.\n\nParameters of the transformers may be set using its name and the parameter\nname separated by a '__'. A transformer may be replaced entirely by\nsetting the parameter with its name to another transformer,\nor removed by setting to 'drop'.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.13\n" + } + }, + { + "name": "sklearn.compose._column_transformer.ColumnTransformer", + "schema": { + "attributes": [ + { + "description": "List of (name, transformer, columns) tuples specifying the\ntransformer objects to be applied to subsets of the data.\n\nname : str\nLike in Pipeline and FeatureUnion, this allows the transformer and\nits parameters to be set using ``set_params`` and searched in grid\nsearch.\ntransformer : {'drop', 'passthrough'} or estimator\nEstimator must support :term:`fit` and :term:`transform`.\nSpecial-cased strings 'drop' and 'passthrough' are accepted as\nwell, to indicate to drop the columns or to pass them through\nuntransformed, respectively.\ncolumns : str, array-like of str, int, array-like of int, array-like of bool, slice or callable\nIndexes the data on its second axis. Integers are interpreted as\npositional columns, while strings can reference DataFrame columns\nby name. A scalar string or int should be used where\n``transformer`` expects X to be a 1d array-like (vector),\notherwise a 2d array will be passed to the transformer.\nA callable is passed the input data `X` and can return any of the\nabove. To select multiple columns by name or dtype, you can use\n:obj:`make_column_selector`.\n", + "name": "transformers" + }, + { + "description": "By default, only the specified columns in `transformers` are\ntransformed and combined in the output, and the non-specified\ncolumns are dropped. (default of ``'drop'``).\nBy specifying ``remainder='passthrough'``, all remaining columns that\nwere not specified in `transformers` will be automatically passed\nthrough. This subset of columns is concatenated with the output of\nthe transformers.\nBy setting ``remainder`` to be an estimator, the remaining\nnon-specified columns will use the ``remainder`` estimator. The\nestimator must support :term:`fit` and :term:`transform`.\nNote that using this feature requires that the DataFrame columns\ninput at :term:`fit` and :term:`transform` have identical order.\n", + "name": "remainder" + }, + { + "default": 0.3, + "description": "If the output of the different transformers contains sparse matrices,\nthese will be stacked as a sparse matrix if the overall density is\nlower than this value. Use ``sparse_threshold=0`` to always return\ndense. When the transformed output consists of all dense data, the\nstacked result will be dense, and this keyword will be ignored.\n", + "name": "sparse_threshold", + "type": "float32" + }, + { + "default": null, + "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n", + "name": "n_jobs", + "type": "int32" + }, + { + "default": null, + "description": "Multiplicative weights for features per transformer. The output of the\ntransformer is multiplied by these weights. Keys are transformer names,\nvalues the weights.\n", + "name": "transformer_weights" + }, + { + "default": false, + "description": "If True, the time elapsed while fitting each transformer will be\nprinted as it is completed.\n", + "name": "verbose", + "type": "boolean" + } + ], + "description": "Applies transformers to columns of an array or pandas DataFrame.\n\nThis estimator allows different columns or column subsets of the input\nto be transformed separately and the features generated by each transformer\nwill be concatenated to form a single feature space.\nThis is useful for heterogeneous or columnar data, to combine several\nfeature extraction mechanisms or transformations into a single transformer.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.20\n" + } + }, + { + "name": "sklearn.preprocessing._encoders.OneHotEncoder", + "schema": { + "attributes": [ + { + "description": "Categories (unique values) per feature:\n\n- 'auto' : Determine categories automatically from the training data.\n- list : ``categories[i]`` holds the categories expected in the ith\ncolumn. The passed categories should not mix strings and numeric\nvalues within a single feature, and should be sorted in case of\nnumeric values.\n\nThe used categories can be found in the ``categories_`` attribute.\n\n.. versionadded:: 0.20\n", + "name": "categories" + }, + { + "description": "Specifies a methodology to use to drop one of the categories per\nfeature. This is useful in situations where perfectly collinear\nfeatures cause problems, such as when feeding the resulting data\ninto a neural network or an unregularized regression.\n\nHowever, dropping one category breaks the symmetry of the original\nrepresentation and can therefore induce a bias in downstream models,\nfor instance for penalized linear classification or regression models.\n\n- None : retain all features (the default).\n- 'first' : drop the first category in each feature. If only one\ncategory is present, the feature will be dropped entirely.\n- 'if_binary' : drop the first category in each feature with two\ncategories. Features with 1 or more than 2 categories are\nleft intact.\n- array : ``drop[i]`` is the category in feature ``X[:, i]`` that\nshould be dropped.\n", + "name": "drop" + }, + { + "default": true, + "description": "Will return sparse matrix if set True else will return an array.\n", + "name": "sparse", + "type": "boolean" + }, + { + "default": "np.float", + "description": "Desired dtype of output.\n", + "name": "dtype" + }, + { + "default": "error", + "description": "Whether to raise an error or ignore if an unknown categorical feature\nis present during transform (default is to raise). When this parameter\nis set to 'ignore' and an unknown category is encountered during\ntransform, the resulting one-hot encoded columns for this feature\nwill be all zeros. In the inverse transform, an unknown category\nwill be denoted as None.\n", + "name": "handle_unknown" + } + ], + "description": "\nEncode categorical features as a one-hot numeric array.\n\nThe input to this transformer should be an array-like of integers or\nstrings, denoting the values taken on by categorical (discrete) features.\nThe features are encoded using a one-hot (aka 'one-of-K' or 'dummy')\nencoding scheme. This creates a binary column for each category and\nreturns a sparse matrix or dense array (depending on the ``sparse``\nparameter)\n\nBy default, the encoder derives the categories based on the unique values\nin each feature. Alternatively, you can also specify the `categories`\nmanually.\n\nThis encoding is needed for feeding categorical data to many scikit-learn\nestimators, notably linear models and SVMs with the standard kernels.\n\nNote: a one-hot encoding of y labels should use a LabelBinarizer\ninstead.\n\nRead more in the :ref:`User Guide `.\n\n.. versionchanged:: 0.20\n" + } + }, + { + "name": "sklearn.feature_selection._univariate_selection.SelectKBest", + "schema": { + "attributes": [ + { + "description": "Function taking two arrays X and y, and returning a pair of arrays\n(scores, pvalues) or a single array with scores.\nDefault is f_classif (see below \"See also\"). The default function only\nworks with classification tasks.\n\n.. versionadded:: 0.18\n", + "name": "score_func" + }, + { + "default": "10", + "description": "Number of top features to select.\nThe \"all\" option bypasses selection, for use in a parameter search.\n", + "name": "k", + "option": "optional" + } + ], + "description": "Select features according to the k highest scores.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.impute._base.SimpleImputer", + "schema": { + "attributes": [ + { + "description": "The placeholder for the missing values. All occurrences of\n`missing_values` will be imputed. For pandas' dataframes with\nnullable integer dtypes with missing values, `missing_values`\nshould be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.\n", + "name": "missing_values" + }, + { + "default": "mean", + "description": "The imputation strategy.\n\n- If \"mean\", then replace missing values using the mean along\neach column. Can only be used with numeric data.\n- If \"median\", then replace missing values using the median along\neach column. Can only be used with numeric data.\n- If \"most_frequent\", then replace missing using the most frequent\nvalue along each column. Can be used with strings or numeric data.\n- If \"constant\", then replace missing values with fill_value. Can be\nused with strings or numeric data.\n\n.. versionadded:: 0.20\nstrategy=\"constant\" for fixed value imputation.\n", + "name": "strategy", + "type": "string" + }, + { + "default": null, + "description": "When strategy == \"constant\", fill_value is used to replace all\noccurrences of missing_values.\nIf left to the default, fill_value will be 0 when imputing numerical\ndata and \"missing_value\" for strings or object data types.\n", + "name": "fill_value" + }, + { + "default": 0, + "description": "Controls the verbosity of the imputer.\n", + "name": "verbose", + "type": "int32" + }, + { + "default": true, + "description": "If True, a copy of X will be created. If False, imputation will\nbe done in-place whenever possible. Note that, in the following cases,\na new copy will always be made, even if `copy=False`:\n\n- If X is not an array of floating values;\n- If X is encoded as a CSR matrix;\n- If add_indicator=True.\n", + "name": "copy", + "type": "boolean" + }, + { + "default": false, + "description": "If True, a :class:`MissingIndicator` transform will stack onto output\nof the imputer's transform. This allows a predictive estimator\nto account for missingness despite imputation. If a feature has no\nmissing values at fit/train time, the feature won't appear on\nthe missing indicator even if there are missing values at\ntransform/test time.\n", + "name": "add_indicator", + "type": "boolean" + } + ], + "description": "Imputation transformer for completing missing values.\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.20\n`SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`\nestimator which is now removed.\n" + } + }, + { + "name": "sklearn.model_selection._search.GridSearchCV", + "schema": { + "attributes": [ + { + "description": "This is assumed to implement the scikit-learn estimator interface.\nEither estimator needs to provide a ``score`` function,\nor ``scoring`` must be passed.\n", + "name": "estimator" + }, + { + "description": "Dictionary with parameters names (`str`) as keys and lists of\nparameter settings to try as values, or a list of such\ndictionaries, in which case the grids spanned by each dictionary\nin the list are explored. This enables searching over any sequence\nof parameter settings.\n", + "name": "param_grid" + }, + { + "default": null, + "description": "A single str (see :ref:`scoring_parameter`) or a callable\n(see :ref:`scoring`) to evaluate the predictions on the test set.\n\nFor evaluating multiple metrics, either give a list of (unique) strings\nor a dict with names as keys and callables as values.\n\nNOTE that when using custom scorers, each scorer should return a single\nvalue. Metric functions returning a list/array of values can be wrapped\ninto multiple scorers that return one value each.\n\nSee :ref:`multimetric_grid_search` for an example.\n\nIf None, the estimator's score method is used.\n", + "name": "scoring" + }, + { + "default": null, + "description": "Number of jobs to run in parallel.\n``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n``-1`` means using all processors. See :term:`Glossary `\nfor more details.\n\n.. versionchanged:: v0.20\n`n_jobs` default changed from 1 to None\n", + "name": "n_jobs", + "type": "int32" + }, + { + "description": "Controls the number of jobs that get dispatched during parallel\nexecution. Reducing this number can be useful to avoid an\nexplosion of memory consumption when more jobs get dispatched\nthan CPUs can process. This parameter can be:\n\n- None, in which case all the jobs are immediately\ncreated and spawned. Use this for lightweight and\nfast-running jobs, to avoid delays due to on-demand\nspawning of the jobs\n\n- An int, giving the exact number of total jobs that are\nspawned\n\n- A str, giving an expression as a function of n_jobs,\nas in '2*n_jobs'\n", + "name": "pre_dispatch" + }, + { + "default": false, + "description": "If True, return the average score across folds, weighted by the number\nof samples in each test set. In this case, the data is assumed to be\nidentically distributed across the folds, and the loss minimized is\nthe total loss per sample, and not the mean loss across the folds.\n\n.. deprecated:: 0.22\nParameter ``iid`` is deprecated in 0.22 and will be removed in 0.24\n", + "name": "iid", + "type": "boolean" + }, + { + "default": null, + "description": "Determines the cross-validation splitting strategy.\nPossible inputs for cv are:\n\n- None, to use the default 5-fold cross validation,\n- integer, to specify the number of folds in a `(Stratified)KFold`,\n- :term:`CV splitter`,\n- An iterable yielding (train, test) splits as arrays of indices.\n\nFor integer/None inputs, if the estimator is a classifier and ``y`` is\neither binary or multiclass, :class:`StratifiedKFold` is used. In all\nother cases, :class:`KFold` is used.\n\nRefer :ref:`User Guide ` for the various\ncross-validation strategies that can be used here.\n\n.. versionchanged:: 0.22\n``cv`` default value if None changed from 3-fold to 5-fold.\n", + "name": "cv", + "type": "int32" + }, + { + "default": true, + "description": "Refit an estimator using the best found parameters on the whole\ndataset.\n\nFor multiple metric evaluation, this needs to be a `str` denoting the\nscorer that would be used to find the best parameters for refitting\nthe estimator at the end.\n\nWhere there are considerations other than maximum score in\nchoosing a best estimator, ``refit`` can be set to a function which\nreturns the selected ``best_index_`` given ``cv_results_``. In that\ncase, the ``best_estimator_`` and ``best_params_`` will be set\naccording to the returned ``best_index_`` while the ``best_score_``\nattribute will not be available.\n\nThe refitted estimator is made available at the ``best_estimator_``\nattribute and permits using ``predict`` directly on this\n``GridSearchCV`` instance.\n\nAlso for multiple metric evaluation, the attributes ``best_index_``,\n``best_score_`` and ``best_params_`` will only be available if\n``refit`` is set and all of them will be determined w.r.t this specific\nscorer.\n\nSee ``scoring`` parameter to know more about multiple metric\nevaluation.\n\n.. versionchanged:: 0.20\nSupport for callable added.\n", + "name": "refit", + "type": "boolean" + }, + { + "description": "Controls the verbosity: the higher, the more messages.\n", + "name": "verbose", + "type": "int32" + }, + { + "description": "Value to assign to the score if an error occurs in estimator fitting.\nIf set to 'raise', the error is raised. If a numeric value is given,\nFitFailedWarning is raised. This parameter does not affect the refit\nstep, which will always raise the error.\n", + "name": "error_score" + }, + { + "default": false, + "description": "If ``False``, the ``cv_results_`` attribute will not include training\nscores.\nComputing training scores is used to get insights on how different\nparameter settings impact the overfitting/underfitting trade-off.\nHowever computing the scores on the training set can be computationally\nexpensive and is not strictly required to select the parameters that\nyield the best generalization performance.\n\n.. versionadded:: 0.19\n\n.. versionchanged:: 0.21\nDefault value was changed from ``True`` to ``False``\n\n", + "name": "return_train_score", + "type": "boolean" + } + ], + "description": "Exhaustive search over specified parameter values for an estimator.\n\nImportant members are fit, predict.\n\nGridSearchCV implements a \"fit\" and a \"score\" method.\nIt also implements \"predict\", \"predict_proba\", \"decision_function\",\n\"transform\" and \"inverse_transform\" if they are implemented in the\nestimator used.\n\nThe parameters of the estimator used to apply these methods are optimized\nby cross-validated grid-search over a parameter grid.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.decomposition._truncated_svd.TruncatedSVD", + "schema": { + "attributes": [ + { + "default": 2, + "description": "Desired dimensionality of output data.\nMust be strictly less than the number of features.\nThe default value is useful for visualisation. For LSA, a value of\n100 is recommended.\n", + "name": "n_components", + "type": "int32" + }, + { + "default": "randomized", + "description": "SVD solver to use. Either \"arpack\" for the ARPACK wrapper in SciPy\n(scipy.sparse.linalg.svds), or \"randomized\" for the randomized\nalgorithm due to Halko (2009).\n", + "name": "algorithm", + "type": "string" + }, + { + "default": 5, + "description": "Number of iterations for randomized SVD solver. Not used by ARPACK. The\ndefault is larger than the default in\n:func:`~sklearn.utils.extmath.randomized_svd` to handle sparse\nmatrices that may have large slowly decaying spectrum.\n", + "name": "n_iter", + "option": "optional", + "type": "int32" + }, + { + "default": null, + "description": "Used during randomized svd. Pass an int for reproducible results across\nmultiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state", + "type": "int32" + }, + { + "description": "Tolerance for ARPACK. 0 means machine precision. Ignored by randomized\nSVD solver.\n", + "name": "tol", + "option": "optional", + "type": "float32" + } + ], + "description": "Dimensionality reduction using truncated SVD (aka LSA).\n\nThis transformer performs linear dimensionality reduction by means of\ntruncated singular value decomposition (SVD). Contrary to PCA, this\nestimator does not center the data before computing the singular value\ndecomposition. This means it can work with sparse matrices\nefficiently.\n\nIn particular, truncated SVD works on term count/tf-idf matrices as\nreturned by the vectorizers in :mod:`sklearn.feature_extraction.text`. In\nthat context, it is known as latent semantic analysis (LSA).\n\nThis estimator supports two algorithms: a fast randomized SVD solver, and\na \"naive\" algorithm that uses ARPACK as an eigensolver on `X * X.T` or\n`X.T * X`, whichever is more efficient.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.ensemble.forest.RandomForestClassifier", + "schema": { + "attributes": [ + { + "default": 100, + "description": "The number of trees in the forest.\n\n.. versionchanged:: 0.22\nThe default value of ``n_estimators`` changed from 10 to 100\nin 0.22.\n", + "name": "n_estimators", + "type": "int32" + }, + { + "default": "\"gini\"", + "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"entropy\" for the information gain.\nNote: this parameter is tree-specific.\n", + "name": "criterion" + }, + { + "default": null, + "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples.\n", + "name": "max_depth", + "type": "int32" + }, + { + "default": "2", + "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n`ceil(min_samples_split * n_samples)` are the minimum\nnumber of samples for each split.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_split" + }, + { + "default": "1", + "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n`ceil(min_samples_leaf * n_samples)` are the minimum\nnumber of samples for each node.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_leaf" + }, + { + "default": 0.0, + "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided.\n", + "name": "min_weight_fraction_leaf", + "type": "float32" + }, + { + "default": "\"auto\"", + "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n`int(max_features * n_features)` features are considered at each\nsplit.\n- If \"auto\", then `max_features=sqrt(n_features)`.\n- If \"sqrt\", then `max_features=sqrt(n_features)` (same as \"auto\").\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features.\n", + "name": "max_features" + }, + { + "default": null, + "description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes.\n", + "name": "max_leaf_nodes", + "type": "int32" + }, + { + "default": 0.0, + "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\nN_t / N * (impurity - N_t_R / N_t * right_impurity\n- N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19\n", + "name": "min_impurity_decrease", + "type": "float32" + }, + { + "default": null, + "description": "Threshold for early stopping in tree growth. A node will split\nif its impurity is above the threshold, otherwise it is a leaf.\n\n.. deprecated:: 0.19\n``min_impurity_split`` has been deprecated in favor of\n``min_impurity_decrease`` in 0.19. The default value of\n``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it\nwill be removed in 0.25. Use ``min_impurity_decrease`` instead.\n\n", + "name": "min_impurity_split", + "type": "float32" + }, + { + "default": true, + "description": "Whether bootstrap samples are used when building trees. If False, the\nwhole dataset is used to build each tree.\n", + "name": "bootstrap", + "type": "boolean" + }, + { + "default": false, + "description": "Whether to use out-of-bag samples to estimate\nthe generalization accuracy.\n", + "name": "oob_score", + "type": "boolean" + }, + { + "default": null, + "description": "The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,\n:meth:`decision_path` and :meth:`apply` are all parallelized over the\ntrees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors. See :term:`Glossary\n` for more details.\n", + "name": "n_jobs", + "type": "int32" + }, + { + "default": null, + "description": "Controls both the randomness of the bootstrapping of the samples used\nwhen building trees (if ``bootstrap=True``) and the sampling of the\nfeatures to consider when looking for the best split at each node\n(if ``max_features < n_features``).\nSee :term:`Glossary ` for details.\n", + "name": "random_state" + }, + { + "default": 0, + "description": "Controls the verbosity when fitting and predicting.\n", + "name": "verbose", + "type": "int32" + }, + { + "default": false, + "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit a whole\nnew forest. See :term:`the Glossary `.\n", + "name": "warm_start", + "type": "boolean" + }, + { + "default": null, + "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nThe \"balanced_subsample\" mode is the same as \"balanced\" except that\nweights are computed based on the bootstrap sample for every tree\ngrown.\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n", + "name": "class_weight" + }, + { + "default": "0.0", + "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22\n", + "name": "ccp_alpha" + }, + { + "default": null, + "description": "If bootstrap is True, the number of samples to draw from X\nto train each base estimator.\n\n- If None (default), then draw `X.shape[0]` samples.\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples. Thus,\n`max_samples` should be in the interval `(0, 1)`.\n\n.. versionadded:: 0.22\n", + "name": "max_samples" + } + ], + "description": "\nA random forest classifier.\n\nA random forest is a meta estimator that fits a number of decision tree\nclassifiers on various sub-samples of the dataset and uses averaging to\nimprove the predictive accuracy and control over-fitting.\nThe sub-sample size is controlled with the `max_samples` parameter if\n`bootstrap=True` (default), otherwise the whole dataset is used to build\neach tree.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.ensemble.weight_boosting.AdaBoostClassifier", + "schema": { + "attributes": [ + { + "default": null, + "description": "The base estimator from which the boosted ensemble is built.\nSupport for sample weighting is required, as well as proper\n``classes_`` and ``n_classes_`` attributes. If ``None``, then\nthe base estimator is ``DecisionTreeClassifier(max_depth=1)``.\n", + "name": "base_estimator" + }, + { + "default": 50, + "description": "The maximum number of estimators at which boosting is terminated.\nIn case of perfect fit, the learning procedure is stopped early.\n", + "name": "n_estimators", + "type": "int32" + }, + { + "default": 1.0, + "description": "Learning rate shrinks the contribution of each classifier by\n``learning_rate``. There is a trade-off between ``learning_rate`` and\n``n_estimators``.\n", + "name": "learning_rate", + "type": "float32" + }, + { + "default": "SAMME.R", + "description": "If 'SAMME.R' then use the SAMME.R real boosting algorithm.\n``base_estimator`` must support calculation of class probabilities.\nIf 'SAMME' then use the SAMME discrete boosting algorithm.\nThe SAMME.R algorithm typically converges faster than SAMME,\nachieving a lower test error with fewer boosting iterations.\n", + "name": "algorithm" + }, + { + "default": null, + "description": "Controls the random seed given at each `base_estimator` at each\nboosting iteration.\nThus, it is only used when `base_estimator` exposes a `random_state`.\nPass an int for reproducible output across multiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state" + } + ], + "description": "An AdaBoost classifier.\n\nAn AdaBoost [1] classifier is a meta-estimator that begins by fitting a\nclassifier on the original dataset and then fits additional copies of the\nclassifier on the same dataset but where the weights of incorrectly\nclassified instances are adjusted such that subsequent classifiers focus\nmore on difficult cases.\n\nThis class implements the algorithm known as AdaBoost-SAMME [2].\n\nRead more in the :ref:`User Guide `.\n\n.. versionadded:: 0.14\n" + } + }, + { + "name": "sklearn.ensemble.forest.ExtraTreesClassifier", + "schema": { + "attributes": [ + { + "default": 100, + "description": "The number of trees in the forest.\n\n.. versionchanged:: 0.22\nThe default value of ``n_estimators`` changed from 10 to 100\nin 0.22.\n", + "name": "n_estimators", + "type": "int32" + }, + { + "default": "\"gini\"", + "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"entropy\" for the information gain.\n", + "name": "criterion" + }, + { + "default": null, + "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples.\n", + "name": "max_depth", + "type": "int32" + }, + { + "default": "2", + "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n`ceil(min_samples_split * n_samples)` are the minimum\nnumber of samples for each split.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_split" + }, + { + "default": "1", + "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n`ceil(min_samples_leaf * n_samples)` are the minimum\nnumber of samples for each node.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_leaf" + }, + { + "default": 0.0, + "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided.\n", + "name": "min_weight_fraction_leaf", + "type": "float32" + }, + { + "default": "\"auto\"", + "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n`int(max_features * n_features)` features are considered at each\nsplit.\n- If \"auto\", then `max_features=sqrt(n_features)`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features.\n", + "name": "max_features" + }, + { + "default": null, + "description": "Grow trees with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes.\n", + "name": "max_leaf_nodes", + "type": "int32" + }, + { + "default": 0.0, + "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\nN_t / N * (impurity - N_t_R / N_t * right_impurity\n- N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19\n", + "name": "min_impurity_decrease", + "type": "float32" + }, + { + "default": null, + "description": "Threshold for early stopping in tree growth. A node will split\nif its impurity is above the threshold, otherwise it is a leaf.\n\n.. deprecated:: 0.19\n``min_impurity_split`` has been deprecated in favor of\n``min_impurity_decrease`` in 0.19. The default value of\n``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it\nwill be removed in 0.25. Use ``min_impurity_decrease`` instead.\n", + "name": "min_impurity_split", + "type": "float32" + }, + { + "default": false, + "description": "Whether bootstrap samples are used when building trees. If False, the\nwhole dataset is used to build each tree.\n", + "name": "bootstrap", + "type": "boolean" + }, + { + "default": false, + "description": "Whether to use out-of-bag samples to estimate\nthe generalization accuracy.\n", + "name": "oob_score", + "type": "boolean" + }, + { + "default": null, + "description": "The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,\n:meth:`decision_path` and :meth:`apply` are all parallelized over the\ntrees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`\ncontext. ``-1`` means using all processors. See :term:`Glossary\n` for more details.\n", + "name": "n_jobs", + "type": "int32" + }, + { + "default": null, + "description": "Controls 3 sources of randomness:\n\n- the bootstrapping of the samples used when building trees\n(if ``bootstrap=True``)\n- the sampling of the features to consider when looking for the best\nsplit at each node (if ``max_features < n_features``)\n- the draw of the splits for each of the `max_features`\n\nSee :term:`Glossary ` for details.\n", + "name": "random_state", + "type": "int32" + }, + { + "default": 0, + "description": "Controls the verbosity when fitting and predicting.\n", + "name": "verbose", + "type": "int32" + }, + { + "default": false, + "description": "When set to ``True``, reuse the solution of the previous call to fit\nand add more estimators to the ensemble, otherwise, just fit a whole\nnew forest. See :term:`the Glossary `.\n", + "name": "warm_start", + "type": "boolean" + }, + { + "default": null, + "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf not given, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nThe \"balanced_subsample\" mode is the same as \"balanced\" except that\nweights are computed based on the bootstrap sample for every tree\ngrown.\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n", + "name": "class_weight" + }, + { + "default": "0.0", + "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22\n", + "name": "ccp_alpha" + }, + { + "default": null, + "description": "If bootstrap is True, the number of samples to draw from X\nto train each base estimator.\n\n- If None (default), then draw `X.shape[0]` samples.\n- If int, then draw `max_samples` samples.\n- If float, then draw `max_samples * X.shape[0]` samples. Thus,\n`max_samples` should be in the interval `(0, 1)`.\n\n.. versionadded:: 0.22\n", + "name": "max_samples" + } + ], + "description": "\nAn extra-trees classifier.\n\nThis class implements a meta estimator that fits a number of\nrandomized decision trees (a.k.a. extra-trees) on various sub-samples\nof the dataset and uses averaging to improve the predictive accuracy\nand control over-fitting.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.neural_network.multilayer_perceptron.MLPRegressor", + "schema": { + "attributes": [ + { + "default": "(100,)", + "description": "The ith element represents the number of neurons in the ith\nhidden layer.\n", + "name": "hidden_layer_sizes" + }, + { + "default": "relu", + "description": "Activation function for the hidden layer.\n\n- 'identity', no-op activation, useful to implement linear bottleneck,\nreturns f(x) = x\n\n- 'logistic', the logistic sigmoid function,\nreturns f(x) = 1 / (1 + exp(-x)).\n\n- 'tanh', the hyperbolic tan function,\nreturns f(x) = tanh(x).\n\n- 'relu', the rectified linear unit function,\nreturns f(x) = max(0, x)\n", + "name": "activation" + }, + { + "default": "adam", + "description": "The solver for weight optimization.\n\n- 'lbfgs' is an optimizer in the family of quasi-Newton methods.\n\n- 'sgd' refers to stochastic gradient descent.\n\n- 'adam' refers to a stochastic gradient-based optimizer proposed by\nKingma, Diederik, and Jimmy Ba\n\nNote: The default solver 'adam' works pretty well on relatively\nlarge datasets (with thousands of training samples or more) in terms of\nboth training time and validation score.\nFor small datasets, however, 'lbfgs' can converge faster and perform\nbetter.\n", + "name": "solver" + }, + { + "default": 0.0001, + "description": "L2 penalty (regularization term) parameter.\n", + "name": "alpha", + "type": "float32" + }, + { + "default": "auto", + "description": "Size of minibatches for stochastic optimizers.\nIf the solver is 'lbfgs', the classifier will not use minibatch.\nWhen set to \"auto\", `batch_size=min(200, n_samples)`\n", + "name": "batch_size", + "type": "int32" + }, + { + "default": "constant", + "description": "Learning rate schedule for weight updates.\n\n- 'constant' is a constant learning rate given by\n'learning_rate_init'.\n\n- 'invscaling' gradually decreases the learning rate ``learning_rate_``\nat each time step 't' using an inverse scaling exponent of 'power_t'.\neffective_learning_rate = learning_rate_init / pow(t, power_t)\n\n- 'adaptive' keeps the learning rate constant to\n'learning_rate_init' as long as training loss keeps decreasing.\nEach time two consecutive epochs fail to decrease training loss by at\nleast tol, or fail to increase validation score by at least tol if\n'early_stopping' is on, the current learning rate is divided by 5.\n\nOnly used when solver='sgd'.\n", + "name": "learning_rate" + }, + { + "default": "0.001", + "description": "The initial learning rate used. It controls the step-size\nin updating the weights. Only used when solver='sgd' or 'adam'.\n", + "name": "learning_rate_init" + }, + { + "default": "0.5", + "description": "The exponent for inverse scaling learning rate.\nIt is used in updating effective learning rate when the learning_rate\nis set to 'invscaling'. Only used when solver='sgd'.\n", + "name": "power_t" + }, + { + "default": 200, + "description": "Maximum number of iterations. The solver iterates until convergence\n(determined by 'tol') or this number of iterations. For stochastic\nsolvers ('sgd', 'adam'), note that this determines the number of epochs\n(how many times each data point will be used), not the number of\ngradient steps.\n", + "name": "max_iter", + "type": "int32" + }, + { + "default": true, + "description": "Whether to shuffle samples in each iteration. Only used when\nsolver='sgd' or 'adam'.\n", + "name": "shuffle", + "type": "boolean" + }, + { + "default": null, + "description": "Determines random number generation for weights and bias\ninitialization, train-test split if early stopping is used, and batch\nsampling when solver='sgd' or 'adam'.\nPass an int for reproducible results across multiple function calls.\nSee :term:`Glossary `.\n", + "name": "random_state", + "type": "int32" + }, + { + "default": 0.0001, + "description": "Tolerance for the optimization. When the loss or score is not improving\nby at least ``tol`` for ``n_iter_no_change`` consecutive iterations,\nunless ``learning_rate`` is set to 'adaptive', convergence is\nconsidered to be reached and training stops.\n", + "name": "tol", + "type": "float32" + }, + { + "default": false, + "description": "Whether to print progress messages to stdout.\n", + "name": "verbose", + "type": "boolean" + }, + { + "default": false, + "description": "When set to True, reuse the solution of the previous\ncall to fit as initialization, otherwise, just erase the\nprevious solution. See :term:`the Glossary `.\n", + "name": "warm_start", + "type": "boolean" + }, + { + "default": 0.9, + "description": "Momentum for gradient descent update. Should be between 0 and 1. Only\nused when solver='sgd'.\n", + "name": "momentum", + "type": "float32" + }, + { + "default": true, + "description": "Whether to use Nesterov's momentum. Only used when solver='sgd' and\nmomentum > 0.\n", + "name": "nesterovs_momentum", + "type": "boolean" + }, + { + "default": false, + "description": "Whether to use early stopping to terminate training when validation\nscore is not improving. If set to true, it will automatically set\naside 10% of training data as validation and terminate training when\nvalidation score is not improving by at least ``tol`` for\n``n_iter_no_change`` consecutive epochs.\nOnly effective when solver='sgd' or 'adam'\n", + "name": "early_stopping", + "type": "boolean" + }, + { + "default": 0.1, + "description": "The proportion of training data to set aside as validation set for\nearly stopping. Must be between 0 and 1.\nOnly used if early_stopping is True\n", + "name": "validation_fraction", + "type": "float32" + }, + { + "default": 0.9, + "description": "Exponential decay rate for estimates of first moment vector in adam,\nshould be in [0, 1). Only used when solver='adam'\n", + "name": "beta_1", + "type": "float32" + }, + { + "default": 0.999, + "description": "Exponential decay rate for estimates of second moment vector in adam,\nshould be in [0, 1). Only used when solver='adam'\n", + "name": "beta_2", + "type": "float32" + }, + { + "default": 1e-08, + "description": "Value for numerical stability in adam. Only used when solver='adam'\n", + "name": "epsilon", + "type": "float32" + }, + { + "default": 10, + "description": "Maximum number of epochs to not meet ``tol`` improvement.\nOnly effective when solver='sgd' or 'adam'\n\n.. versionadded:: 0.20\n", + "name": "n_iter_no_change", + "type": "int32" + }, + { + "default": 15000, + "description": "Only used when solver='lbfgs'. Maximum number of function calls.\nThe solver iterates until convergence (determined by 'tol'), number\nof iterations reaches max_iter, or this number of function calls.\nNote that number of function calls will be greater than or equal to\nthe number of iterations for the MLPRegressor.\n\n.. versionadded:: 0.22\n", + "name": "max_fun", + "type": "int32" + } + ], + "description": "Multi-layer Perceptron regressor.\n\nThis model optimizes the squared-loss using LBFGS or stochastic gradient\ndescent.\n\n.. versionadded:: 0.18\n" + } + }, + { + "name": "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", + "schema": { + "attributes": [ + { + "default": "svd", + "description": "Solver to use, possible values:\n- 'svd': Singular value decomposition (default).\nDoes not compute the covariance matrix, therefore this solver is\nrecommended for data with a large number of features.\n- 'lsqr': Least squares solution, can be combined with shrinkage.\n- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.\n", + "name": "solver" + }, + { + "description": "Shrinkage parameter, possible values:\n- None: no shrinkage (default).\n- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.\n- float between 0 and 1: fixed shrinkage parameter.\n\nNote that shrinkage works only with 'lsqr' and 'eigen' solvers.\n", + "name": "shrinkage" + }, + { + "default": null, + "description": "The class prior probabilities. By default, the class proportions are\ninferred from the training data.\n", + "name": "priors" + }, + { + "default": null, + "description": "Number of components (<= min(n_classes - 1, n_features)) for\ndimensionality reduction. If None, will be set to\nmin(n_classes - 1, n_features). This parameter only affects the\n`transform` method.\n", + "name": "n_components", + "type": "int32" + }, + { + "default": false, + "description": "If True, explicitely compute the weighted within-class covariance\nmatrix when solver is 'svd'. The matrix is always computed\nand stored for the other solvers.\n\n.. versionadded:: 0.17\n", + "name": "store_covariance", + "type": "boolean" + }, + { + "default": 0.0001, + "description": "Absolute threshold for a singular value of X to be considered\nsignificant, used to estimate the rank of X. Dimensions whose\nsingular values are non-significant are discarded. Only used if\nsolver is 'svd'.\n\n.. versionadded:: 0.17\n", + "name": "tol", + "type": "float32" + } + ], + "description": "Linear Discriminant Analysis\n\nA classifier with a linear decision boundary, generated by fitting class\nconditional densities to the data and using Bayes' rule.\n\nThe model fits a Gaussian density to each class, assuming that all classes\nshare the same covariance matrix.\n\nThe fitted model can also be used to reduce the dimensionality of the input\nby projecting it to the most discriminative directions, using the\n`transform` method.\n\n.. versionadded:: 0.17\n*LinearDiscriminantAnalysis*.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.preprocessing._data.StandardScaler", + "schema": { + "attributes": [ + { + "default": true, + "description": "If False, try to avoid a copy and do inplace scaling instead.\nThis is not guaranteed to always work inplace; e.g. if the data is\nnot a NumPy array or scipy.sparse CSR matrix, a copy may still be\nreturned.\n", + "name": "copy", + "option": "optional", + "type": "boolean" + }, + { + "default": true, + "description": "If True, center the data before scaling.\nThis does not work (and will raise an exception) when attempted on\nsparse matrices, because centering them entails building a dense\nmatrix which in common use cases is likely to be too large to fit in\nmemory.\n", + "name": "with_mean", + "type": "boolean" + }, + { + "default": true, + "description": "If True, scale the data to unit variance (or equivalently,\nunit standard deviation).\n", + "name": "with_std", + "type": "boolean" + } + ], + "description": "Standardize features by removing the mean and scaling to unit variance\n\nThe standard score of a sample `x` is calculated as:\n\nz = (x - u) / s\n\nwhere `u` is the mean of the training samples or zero if `with_mean=False`,\nand `s` is the standard deviation of the training samples or one if\n`with_std=False`.\n\nCentering and scaling happen independently on each feature by computing\nthe relevant statistics on the samples in the training set. Mean and\nstandard deviation are then stored to be used on later data using\n:meth:`transform`.\n\nStandardization of a dataset is a common requirement for many\nmachine learning estimators: they might behave badly if the\nindividual features do not more or less look like standard normally\ndistributed data (e.g. Gaussian with 0 mean and unit variance).\n\nFor instance many elements used in the objective function of\na learning algorithm (such as the RBF kernel of Support Vector\nMachines or the L1 and L2 regularizers of linear models) assume that\nall features are centered around 0 and have variance in the same\norder. If a feature has a variance that is orders of magnitude larger\nthat others, it might dominate the objective function and make the\nestimator unable to learn from other features correctly as expected.\n\nThis scaler can also be applied to sparse CSR or CSC matrices by passing\n`with_mean=False` to avoid breaking the sparsity structure of the data.\n\nRead more in the :ref:`User Guide `.\n" + } + }, + { + "name": "sklearn.tree.tree.DecisionTreeClassifier", + "schema": { + "attributes": [ + { + "default": "\"gini\"", + "description": "The function to measure the quality of a split. Supported criteria are\n\"gini\" for the Gini impurity and \"entropy\" for the information gain.\n", + "name": "criterion" + }, + { + "default": "\"best\"", + "description": "The strategy used to choose the split at each node. Supported\nstrategies are \"best\" to choose the best split and \"random\" to choose\nthe best random split.\n", + "name": "splitter" + }, + { + "default": null, + "description": "The maximum depth of the tree. If None, then nodes are expanded until\nall leaves are pure or until all leaves contain less than\nmin_samples_split samples.\n", + "name": "max_depth", + "type": "int32" + }, + { + "default": "2", + "description": "The minimum number of samples required to split an internal node:\n\n- If int, then consider `min_samples_split` as the minimum number.\n- If float, then `min_samples_split` is a fraction and\n`ceil(min_samples_split * n_samples)` are the minimum\nnumber of samples for each split.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_split" + }, + { + "default": "1", + "description": "The minimum number of samples required to be at a leaf node.\nA split point at any depth will only be considered if it leaves at\nleast ``min_samples_leaf`` training samples in each of the left and\nright branches. This may have the effect of smoothing the model,\nespecially in regression.\n\n- If int, then consider `min_samples_leaf` as the minimum number.\n- If float, then `min_samples_leaf` is a fraction and\n`ceil(min_samples_leaf * n_samples)` are the minimum\nnumber of samples for each node.\n\n.. versionchanged:: 0.18\nAdded float values for fractions.\n", + "name": "min_samples_leaf" + }, + { + "default": 0.0, + "description": "The minimum weighted fraction of the sum total of weights (of all\nthe input samples) required to be at a leaf node. Samples have\nequal weight when sample_weight is not provided.\n", + "name": "min_weight_fraction_leaf", + "type": "float32" + }, + { + "default": null, + "description": "The number of features to consider when looking for the best split:\n\n- If int, then consider `max_features` features at each split.\n- If float, then `max_features` is a fraction and\n`int(max_features * n_features)` features are considered at each\nsplit.\n- If \"auto\", then `max_features=sqrt(n_features)`.\n- If \"sqrt\", then `max_features=sqrt(n_features)`.\n- If \"log2\", then `max_features=log2(n_features)`.\n- If None, then `max_features=n_features`.\n\nNote: the search for a split does not stop until at least one\nvalid partition of the node samples is found, even if it requires to\neffectively inspect more than ``max_features`` features.\n", + "name": "max_features", + "type": "int32" + }, + { + "default": null, + "description": "Controls the randomness of the estimator. The features are always\nrandomly permuted at each split, even if ``splitter`` is set to\n``\"best\"``. When ``max_features < n_features``, the algorithm will\nselect ``max_features`` at random at each split before finding the best\nsplit among them. But the best found split may vary across different\nruns, even if ``max_features=n_features``. That is the case, if the\nimprovement of the criterion is identical for several splits and one\nsplit has to be selected at random. To obtain a deterministic behaviour\nduring fitting, ``random_state`` has to be fixed to an integer.\nSee :term:`Glossary ` for details.\n", + "name": "random_state", + "type": "int32" + }, + { + "default": null, + "description": "Grow a tree with ``max_leaf_nodes`` in best-first fashion.\nBest nodes are defined as relative reduction in impurity.\nIf None then unlimited number of leaf nodes.\n", + "name": "max_leaf_nodes", + "type": "int32" + }, + { + "default": 0.0, + "description": "A node will be split if this split induces a decrease of the impurity\ngreater than or equal to this value.\n\nThe weighted impurity decrease equation is the following::\n\nN_t / N * (impurity - N_t_R / N_t * right_impurity\n- N_t_L / N_t * left_impurity)\n\nwhere ``N`` is the total number of samples, ``N_t`` is the number of\nsamples at the current node, ``N_t_L`` is the number of samples in the\nleft child, and ``N_t_R`` is the number of samples in the right child.\n\n``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,\nif ``sample_weight`` is passed.\n\n.. versionadded:: 0.19\n", + "name": "min_impurity_decrease", + "type": "float32" + }, + { + "default": 0.0, + "description": "Threshold for early stopping in tree growth. A node will split\nif its impurity is above the threshold, otherwise it is a leaf.\n\n.. deprecated:: 0.19\n``min_impurity_split`` has been deprecated in favor of\n``min_impurity_decrease`` in 0.19. The default value of\n``min_impurity_split`` has changed from 1e-7 to 0 in 0.23 and it\nwill be removed in 0.25. Use ``min_impurity_decrease`` instead.\n", + "name": "min_impurity_split", + "type": "float32" + }, + { + "default": null, + "description": "Weights associated with classes in the form ``{class_label: weight}``.\nIf None, all classes are supposed to have weight one. For\nmulti-output problems, a list of dicts can be provided in the same\norder as the columns of y.\n\nNote that for multioutput (including multilabel) weights should be\ndefined for each class of every column in its own dict. For example,\nfor four-class multilabel classification weights should be\n[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of\n[{1:1}, {2:5}, {3:1}, {4:1}].\n\nThe \"balanced\" mode uses the values of y to automatically adjust\nweights inversely proportional to class frequencies in the input data\nas ``n_samples / (n_classes * np.bincount(y))``\n\nFor multi-output, the weights of each column of y will be multiplied.\n\nNote that these weights will be multiplied with sample_weight (passed\nthrough the fit method) if sample_weight is specified.\n", + "name": "class_weight" + }, + { + "default": "deprecated", + "description": "This parameter is deprecated and will be removed in v0.24.\n\n.. deprecated:: 0.22\n", + "name": "presort" + }, + { + "default": "0.0", + "description": "Complexity parameter used for Minimal Cost-Complexity Pruning. The\nsubtree with the largest cost complexity that is smaller than\n``ccp_alpha`` will be chosen. By default, no pruning is performed. See\n:ref:`minimal_cost_complexity_pruning` for details.\n\n.. versionadded:: 0.22\n", + "name": "ccp_alpha" + } + ], + "description": "A decision tree classifier.\n\nRead more in the :ref:`User Guide `.\n" + } + } +] diff --git a/frontend/packages/core/public/netron/sklearn.js b/frontend/packages/core/public/netron/sklearn.js new file mode 100644 index 00000000..36a3da62 --- /dev/null +++ b/frontend/packages/core/public/netron/sklearn.js @@ -0,0 +1,1134 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental + +var sklearn = sklearn || {}; +var long = long || { Long: require('long') }; + +sklearn.ModelFactory = class { + + match(context) { + const extension = context.identifier.split('.').pop().toLowerCase(); + if (['pkl', 'joblib', 'model', 'meta', 'pb'].indexOf(extension) !== -1) { + const buffer = context.buffer; + if (buffer) { + // Reject PyTorch models with .pkl file extension. + const torch = [ 0x8a, 0x0a, 0x6c, 0xfc, 0x9c, 0x46, 0xf9, 0x20, 0x6a, 0xa8, 0x50, 0x19 ]; + if (buffer.length > 14 && buffer[0] == 0x80 && torch.every((v, i) => v == buffer[i + 2])) { + return false; + } + if (buffer.length > 1 && buffer[buffer.length - 1] === 0x2E) { + return true; + } + if (buffer.length > 2 && buffer[0] === 0x80 && buffer[1] < 5) { + return true; + } + } + } + return false; + } + + open(context, host) { + return host.require('./pickle').then((pickle) => { + const identifier = context.identifier; + return sklearn.Metadata.open(host).then((metadata) => { + try { + const container = new sklearn.Container(context.buffer, pickle, (error, fatal) => { + const message = error && error.message ? error.message : error.toString(); + host.exception(new sklearn.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."), fatal); + }); + if (!container.weights) { + if (!container.data) { + throw new sklearn.Error('No root object.'); + } + if (Array.isArray(container.data)) { + throw new sklearn.Error('Array is not a valid root object.'); + } + } + return new sklearn.Model(metadata, container.data, container.weights); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new sklearn.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + }); + } +}; + +sklearn.Model = class { + + constructor(metadata, obj, weights) { + if (obj && obj.__module__ && obj.__module__.startsWith('sklearn.')) { + this._format = 'scikit-learn' + (obj._sklearn_version ? ' v' + obj._sklearn_version.toString() : ''); + } + else if (obj && obj.__module__ && obj.__module__.startsWith('xgboost.')) { + this._format = 'XGBoost' + (obj._sklearn_version ? ' v' + obj._sklearn_version.toString() : ''); + } + else if (obj && obj.__module__ && obj.__module__.startsWith('nolearn.lasagne.')) { + this._format = 'Lasagne'; + } + else if (obj && obj.__module__ && obj.__module__.startsWith('gensim.')) { + this._format = 'gensim'; + } + else { + this._format = 'Pickle'; + } + this._graphs = []; + this._graphs.push(new sklearn.Graph(metadata, obj, weights)); + } + + get format() { + return this._format; + } + + get graphs() { + return this._graphs; + } +}; + +sklearn.Graph = class { + + constructor(metadata, obj, weights) { + this._metadata = metadata; + this._nodes = []; + this._groups = false; + + if (obj) { + this._process('', '', obj, ['data']); + } + else if (weights instanceof Map) { + const group_map = new Map(); + const groups = []; + for (const pair of weights) { + const key = pair[0]; + const parts = key.split('_'); + const value = pair[1]; + const name = parts.length > 1 ? parts.pop() : '?'; + const id = parts.join('_'); + let group = group_map.get(id); + if (!group) { + group = { id: id, arrays: [] }; + groups.push(group); + group_map.set(id, group); + } + group.arrays.push({ + key: key, + name: name, + value: value + }); + } + this._nodes = this._nodes.concat(groups.map((group) => { + const inputs = group.arrays.map((array) => { + return new sklearn.Parameter(array.name, [ + new sklearn.Argument(array.key, null, new sklearn.Tensor(array.key, array.value)) + ]); + }); + return new sklearn.Node(this._metadata, '', group.id, { __module__: 'sklearn._', __name__: 'Weights' }, inputs, []); + })); + } + } + + _process(group, name, obj, inputs) { + switch ([ obj.__module__, obj.__name__].join('.')) { + case 'sklearn.pipeline.Pipeline': { + this._groups = true; + name = name || 'pipeline'; + const childGroup = this._concat(group, name); + for (const step of obj.steps) { + inputs = this._process(childGroup, step[0], step[1], inputs); + } + return inputs; + } + case 'sklearn.pipeline.FeatureUnion': { + this._groups = true; + let outputs = []; + name = name || 'union'; + const output = this._concat(group, name); + const subgroup = this._concat(group, name); + this._add(subgroup, output, obj, inputs, [ output ]); + for (const transformer of obj.transformer_list){ + outputs = outputs.concat(this._process(subgroup, transformer[0], transformer[1], [ output ])); + } + return outputs; + } + case 'sklearn.compose._column_transformer.ColumnTransformer': { + this._groups = true; + name = name || 'transformer'; + const output = this._concat(group, name); + const subgroup = this._concat(group, name); + let outputs = []; + this._add(subgroup, output, obj, inputs, [ output ]); + for (const transformer of obj.transformers){ + outputs = outputs.concat(this._process(subgroup, transformer[0], transformer[1], [ output ])); + } + return outputs; + } + default: { + const output = this._concat(group, name); + this._add(group, output, obj, inputs, [ output ]); + return [ output ]; + } + } + } + + _add(group, name, obj, inputs, outputs) { + const initializers = []; + for (const key of Object.keys(obj)) { + if (!key.startsWith('_')) { + const value = obj[key]; + if (sklearn.Utility.isTensor(value)) { + initializers.push(new sklearn.Tensor(key, value)); + } + } + } + inputs = inputs.map((input) => { + return new sklearn.Parameter(input, [ new sklearn.Argument(input, null, null) ]); + }); + inputs = inputs.concat(initializers.map((initializer) => { + return new sklearn.Parameter(initializer.name, [ new sklearn.Argument('', null, initializer) ]); + })); + outputs = outputs.map((output) => { + return new sklearn.Parameter(output, [ new sklearn.Argument(output, null, null) ]); + }); + this._nodes.push(new sklearn.Node(this._metadata, group, name, obj, inputs, outputs)); + } + + _concat(parent, name){ + return (parent === '' ? name : `${parent}/${name}`); + } + + get groups() { + return this._groups; + } + + get inputs() { + return []; + } + + get outputs() { + return []; + } + + get nodes() { + return this._nodes; + } +}; + +sklearn.Parameter = class { + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +sklearn.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new sklearn.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +sklearn.Node = class { + + constructor(metadata, group, name, obj, inputs, outputs) { + this._metadata = metadata; + this._group = group || ''; + this._name = name || ''; + this._type = (obj.__module__ && obj.__name__) ? (obj.__module__ + '.' + obj.__name__) : (obj.__name__ ? obj.__name__ : 'Object'); + this._inputs = inputs; + this._outputs = outputs; + this._attributes = []; + this._initializers = []; + for (const name of Object.keys(obj)) { + if (!name.startsWith('_')) { + const value = obj[name]; + if (value && !Array.isArray(value) && value === Object(value) && sklearn.Utility.isTensor(value)) { + this._initializers.push(new sklearn.Tensor(name, value)); + } + else { + const schema = metadata.attribute(this._type, name); + this._attributes.push(new sklearn.Attribute(schema, name, value)); + } + } + } + } + + get type() { + return this._type; // .split('.').pop(); + } + + get name() { + return this._name; + } + + get group() { + return this._group ? this._group : null; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get attributes() { + return this._attributes; + } +}; + +sklearn.Attribute = class { + + constructor(schema, name, value) { + this._name = name; + this._value = value; + if (schema) { + if (Object.prototype.hasOwnProperty.call(schema, 'option') && schema.option == 'optional' && this._value == null) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + if (sklearn.Attribute._isEquivalent(schema.default, this._value)) { + this._visible = false; + } + } + } + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } + + static _isEquivalent(a, b) { + if (a === b) { + return a !== 0 || 1 / a === 1 / b; + } + if (a == null || b == null) { + return false; + } + if (a !== a) { + return b !== b; + } + const type = typeof a; + if (type !== 'function' && type !== 'object' && typeof b != 'object') { + return false; + } + const className = toString.call(a); + if (className !== toString.call(b)) { + return false; + } + switch (className) { + case '[object RegExp]': + case '[object String]': + return '' + a === '' + b; + case '[object Number]': { + if (+a !== +a) { + return +b !== +b; + } + return +a === 0 ? 1 / +a === 1 / b : +a === +b; + } + case '[object Date]': + case '[object Boolean]': { + return +a === +b; + } + case '[object Array]': { + let length = a.length; + if (length !== b.length) { + return false; + } + while (length--) { + if (!sklearn.Attribute._isEquivalent(a[length], b[length])) { + return false; + } + } + return true; + } + } + + const keys = Object.keys(a); + let size = keys.length; + if (Object.keys(b).length != size) { + return false; + } + while (size--) { + let key = keys[size]; + if (!(Object.prototype.hasOwnProperty.call(b, key) && sklearn.Attribute._isEquivalent(a[key], b[key]))) { + return false; + } + } + return true; + } +}; + +sklearn.Tensor = class { + + constructor(name, value) { + this._name = name; + if (sklearn.Utility.isTensor(value)) { + this._kind = 'Array'; + this._type = new sklearn.TensorType(value.dtype.name, new sklearn.TensorShape(value.shape)); + this._data = value.data; + } + else { + const type = [ value.__module__, value.__name__ ].join('.'); + throw new sklearn.Error("Unknown tensor type '" + type + "'."); + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get kind() { + return this._kind; + } + + get state() { + return this._context().state || null; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + switch (this._type.dataType) { + case 'int64': + case 'uint64': + return sklearn.Tensor._stringify(value, '', ' '); + } + return JSON.stringify(value, null, 4); + } + + _context() { + const context = {}; + context.index = 0; + context.count = 0; + context.state = null; + + if (!this._type) { + context.state = 'Tensor has no data type.'; + return context; + } + if (!this._data) { + context.state = 'Tensor is data is empty.'; + return context; + } + + context.dataType = this._type.dataType; + context.dimensions = this._type.shape.dimensions; + + switch (context.dataType) { + case 'float32': + case 'float64': + case 'int32': + case 'uint32': + case 'int64': + case 'uint64': + context.rawData = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + break; + default: + context.state = "Tensor data type '" + context.dataType + "' is not implemented."; + return context; + } + + return context; + } + + _decode(context, dimension) { + const results = []; + const size = context.dimensions[dimension]; + if (dimension == context.dimensions.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (context.dataType) { + case 'float32': + results.push(context.rawData.getFloat32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'float64': + results.push(context.rawData.getFloat64(context.index, true)); + context.index += 8; + context.count++; + break; + case 'int32': + results.push(context.rawData.getInt32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'uint32': + results.push(context.rawData.getUint32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'int64': + results.push(new long.Long(context.rawData.getUint32(context.index, true), context.rawData.getUint32(context.index + 4, true), false)); + context.index += 8; + context.count++; + break; + case 'uint64': + results.push(new long.Long(context.rawData.getUint32(context.index, true), context.rawData.getUint32(context.index + 4, true), true)); + context.index += 8; + context.count++; + break; + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + return results; + } + + static _stringify(value, indentation, indent) { + if (Array.isArray(value)) { + const result = []; + result.push('['); + const items = value.map((item) => sklearn.Tensor._stringify(item, indentation + indent, indent)); + if (items.length > 0) { + result.push(items.join(',\n')); + } + result.push(']'); + return result.join('\n'); + } + return indentation + value.toString(); + } +}; + +sklearn.TensorType = class { + + constructor(dataType, shape) { + this._dataType = dataType; + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +sklearn.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return this._dimensions ? ('[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']') : ''; + } +}; + +sklearn.Metadata = class { + + static open(host) { + if (sklearn.Metadata._metadata) { + return Promise.resolve(sklearn.Metadata._metadata); + } + return host.request(null, 'sklearn-metadata.json', 'utf-8').then((data) => { + sklearn.Metadata._metadata = new sklearn.Metadata(data); + return sklearn.Metadata._metadata; + }).catch(() => { + sklearn.Metadata._metadata = new sklearn.Metadata(null); + return sklearn.Metadata._metadata; + }); + } + + constructor(data) { + this._map = new Map(); + this._attributeCache = new Map(); + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map.set(item.name, item.schema); + } + } + } + } + } + + type(name) { + return this._map.get(name); + } + + attribute(type, name) { + const key = type + ':' + name; + if (!this._attributeCache.has(key)) { + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + this._attributeCache.set(type + ':' + attribute.name, attribute); + } + } + if (!this._attributeCache.has(key)) { + this._attributeCache.set(key, null); + } + } + return this._attributeCache.get(key); + } +}; + +sklearn.Container = class { + + constructor(buffer, pickle, exception) { + const unpickler = new pickle.Unpickler(buffer); + + const constructorTable = {}; + const functionTable = {}; + + constructorTable['numpy.dtype'] = function(obj, align, copy) { + switch (obj) { + case 'i1': this.name = 'int8'; this.itemsize = 1; break; + case 'i2': this.name = 'int16'; this.itemsize = 2; break; + case 'i4': this.name = 'int32'; this.itemsize = 4; break; + case 'i8': this.name = 'int64'; this.itemsize = 8; break; + case 'u1': this.name = 'uint8'; this.itemsize = 1; break; + case 'u2': this.name = 'uint16'; this.itemsize = 2; break; + case 'u4': this.name = 'uint32'; this.itemsize = 4; break; + case 'u8': this.name = 'uint64'; this.itemsize = 8; break; + case 'f2': this.name = 'float16'; this.itemsize = 2; break; + case 'f4': this.name = 'float32'; this.itemsize = 4; break; + case 'f8': this.name = 'float64'; this.itemsize = 8; break; + case 'b1': this.name = 'int8'; this.itemsize = 1; break; + default: + if (obj.startsWith('V')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'void' + (this.itemsize * 8).toString(); + } + else if (obj.startsWith('O')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'object'; + } + else if (obj.startsWith('S')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'string'; + } + else if (obj.startsWith('U')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'string'; + } + else if (obj.startsWith('M')) { + this.itemsize = Number(obj.substring(1)); + this.name = 'datetime'; + } + else { + throw new sklearn.Error("Unknown dtype '" + obj.toString() + "'."); + } + break; + } + this.align = align; + this.copy = copy; + this.__setstate__ = function(state) { + switch (state.length) { + case 8: + this.version = state[0]; + this.byteorder = state[1]; + this.subarray = state[2]; + this.names = state[3]; + this.fields = state[4]; + this.elsize = state[5]; + this.alignment = state[6]; + this.int_dtypeflags = state[7]; + break; + default: + throw new sklearn.Error("Unknown numpy.dtype setstate length '" + state.length.toString() + "'."); + } + }; + }; + constructorTable['numpy.core.multiarray._reconstruct'] = function(subtype, shape, dtype) { + this.subtype = subtype; + this.shape = shape; + this.dtype = dtype; + this.__setstate__ = function(state) { + this.version = state[0]; + this.shape = state[1]; + this.typecode = state[2]; + this.is_f_order = state[3]; + this.rawdata = state[4]; + }; + this.__read__ = function(unpickler) { + const array = {}; + sklearn.Utility.applyType(array, this.subtype); + array.dtype = this.typecode; + array.shape = this.shape; + const dims = array.shape && array.shape.length > 0 ? array.shape.reduce((a, b) => a * b) : 1; + const size = array.dtype.itemsize * dims; + if (typeof this.rawdata == 'string') { + array.data = unpickler.unescape(this.rawdata, size); + if (array.data.length != size) { + throw new sklearn.Error('Invalid string array data size.'); + } + } + else { + array.data = this.rawdata; + } + return array; + }; + }; + constructorTable['joblib.numpy_pickle.NumpyArrayWrapper'] = function(/* subtype, shape, dtype */) { + this.__setstate__ = function(state) { + this.subclass = state.subclass; + this.dtype = state.dtype; + this.shape = state.shape; + this.order = state.order; + this.allow_mmap = state.allow_mmap; + }; + this.__read__ = function(unpickler) { + if (this.dtype.name == 'object') { + return unpickler.load(function_call, null); + } + else { + const size = this.dtype.itemsize * this.shape.reduce((a, b) => a * b); + this.data = unpickler.read(size); + } + const obj = { + dtype: this.dtype, + shape: this.shape, + data: this.data, + }; + sklearn.Utility.applyType(obj, this.subclass); + return obj; + }; + }; + + constructorTable['gensim.models.doc2vec.Doctag'] = function() {}; + constructorTable['gensim.models.doc2vec.Doc2Vec'] = function() {}; + constructorTable['gensim.models.doc2vec.Doc2VecTrainables'] = function() {}; + constructorTable['gensim.models.doc2vec.Doc2VecVocab'] = function() {}; + constructorTable['gensim.models.keyedvectors.Doc2VecKeyedVectors'] = function() {}; + constructorTable['gensim.models.keyedvectors.Vocab'] = function() {}; + constructorTable['gensim.models.keyedvectors.Word2VecKeyedVectors'] = function() {}; + constructorTable['gensim.models.word2vec.Vocab'] = function() {}; + constructorTable['gensim.models.word2vec.Word2Vec'] = function() {}; + constructorTable['lightgbm.sklearn.LGBMRegressor'] = function() {}; + constructorTable['lightgbm.sklearn.LGBMClassifier'] = function() {}; + constructorTable['lightgbm.basic.Booster'] = function() {}; + constructorTable['nolearn.lasagne.base.BatchIterator'] = function() {}; + constructorTable['nolearn.lasagne.base.Layers'] = function() {}; + constructorTable['nolearn.lasagne.base.NeuralNet'] = function() {}; + constructorTable['nolearn.lasagne.base.TrainSplit'] = function() {}; + constructorTable['nolearn.lasagne.handlers.PrintLayerInfo'] = function() {}; + constructorTable['nolearn.lasagne.handlers.PrintLog'] = function() {}; + constructorTable['sklearn.calibration._CalibratedClassifier'] = function() {}; + constructorTable['sklearn.calibration._SigmoidCalibration'] = function() {}; + constructorTable['sklearn.calibration.CalibratedClassifierCV​'] = function() {}; + constructorTable['sklearn.compose._column_transformer.ColumnTransformer'] = function() {}; + constructorTable['sklearn.compose._target.TransformedTargetRegressor'] = function() {}; + constructorTable['sklearn.cluster._dbscan.DBSCAN'] = function() {}; + constructorTable['sklearn.decomposition._pca.PCA'] = function() {}; + constructorTable['sklearn.decomposition.PCA'] = function() {}; + constructorTable['sklearn.decomposition.pca.PCA'] = function() {}; + constructorTable['sklearn.decomposition._truncated_svd.TruncatedSVD'] = function() {}; + constructorTable['sklearn.decomposition.truncated_svd.TruncatedSVD'] = function() {}; + constructorTable['sklearn.discriminant_analysis.LinearDiscriminantAnalysis'] = function() {}; + constructorTable['sklearn.dummy.DummyClassifier'] = function() {}; + constructorTable['sklearn.externals.joblib.numpy_pickle.NumpyArrayWrapper'] = constructorTable['joblib.numpy_pickle.NumpyArrayWrapper']; + constructorTable['sklearn.externals.joblib.numpy_pickle.NDArrayWrapper'] = function() {}; + constructorTable['sklearn.ensemble._bagging.BaggingClassifier'] = function() {}; + constructorTable['sklearn.ensemble._forest.RandomForestRegressor'] = function() {}; + constructorTable['sklearn.ensemble._forest.RandomForestClassifier'] = function() {}; + constructorTable['sklearn.ensemble._forest.ExtraTreesClassifier'] = function() {}; + constructorTable['sklearn.ensemble._gb_losses.BinomialDeviance'] = function() {}; + constructorTable['sklearn.ensemble._gb_losses.MultinomialDeviance'] = function() {}; + constructorTable['sklearn.ensemble._gb.GradientBoostingClassifier'] = function() {}; + constructorTable['sklearn.ensemble._voting.VotingClassifier'] = function() {}; + constructorTable['sklearn.ensemble.forest.RandomForestClassifier'] = function() {}; + constructorTable['sklearn.ensemble.forest.RandomForestRegressor'] = function() {}; + constructorTable['sklearn.ensemble.forest.ExtraTreesClassifier'] = function() {}; + constructorTable['sklearn.ensemble.gradient_boosting.BinomialDeviance'] = function() {}; + constructorTable['sklearn.ensemble.gradient_boosting.GradientBoostingClassifier'] = function() {}; + constructorTable['sklearn.ensemble.gradient_boosting.LogOddsEstimator'] = function() {}; + constructorTable['sklearn.ensemble.gradient_boosting.MultinomialDeviance'] = function() {}; + constructorTable['sklearn.ensemble.gradient_boosting.PriorProbabilityEstimator'] = function() {}; + constructorTable['sklearn.ensemble.weight_boosting.AdaBoostClassifier'] = function() {}; + constructorTable['sklearn.feature_extraction._hashing.FeatureHasher'] = function() {}; + constructorTable['sklearn.feature_extraction.text.CountVectorizer'] = function() {}; + constructorTable['sklearn.feature_extraction.text.HashingVectorizer'] = function() {}; + constructorTable['sklearn.feature_extraction.text.TfidfTransformer'] = function() {}; + constructorTable['sklearn.feature_extraction.text.TfidfVectorizer'] = function() {}; + constructorTable['sklearn.feature_selection._univariate_selection.SelectKBest'] = function() {}; + constructorTable['sklearn.feature_selection.univariate_selection.SelectKBest'] = function() {}; + constructorTable['sklearn.feature_selection.variance_threshold.VarianceThreshold'] = function() {}; + constructorTable['sklearn.impute._base.SimpleImputer'] = function() {}; + constructorTable['sklearn.impute.SimpleImputer'] = function() {}; + constructorTable['sklearn.linear_model._base.LinearRegression'] = function() {}; + constructorTable['sklearn.linear_model._coordinate_descent.ElasticNet'] = function() {}; + constructorTable['sklearn.linear_model.base.LinearRegression'] = function() {}; + constructorTable['sklearn.linear_model.sgd_fast.Hinge'] = function() {}; + constructorTable['sklearn.linear_model.LogisticRegression'] = function() {}; + constructorTable['sklearn.linear_model.logistic.LogisticRegression'] = function() {}; + constructorTable['sklearn.linear_model._logistic.LogisticRegression'] = function() {}; + constructorTable['sklearn.linear_model.LassoLars​'] = function() {}; + constructorTable['sklearn.linear_model.ridge.Ridge'] = function() {}; + constructorTable['sklearn.linear_model.sgd_fast.Log'] = function() {}; + constructorTable['sklearn.linear_model.stochastic_gradient.SGDClassifier'] = function() {}; + constructorTable['sklearn.metrics.scorer._PredictScorer'] = function() {}; + constructorTable['sklearn.model_selection._search.GridSearchCV'] = function() {}; + constructorTable['sklearn.naive_bayes.BernoulliNB'] = function() {}; + constructorTable['sklearn.naive_bayes.ComplementNB'] = function() {}; + constructorTable['sklearn.naive_bayes.GaussianNB'] = function() {}; + constructorTable['sklearn.naive_bayes.MultinomialNB'] = function() {}; + constructorTable['sklearn.neighbors.classification.KNeighborsClassifier'] = function() {}; + constructorTable['sklearn.neighbors.dist_metrics.newObj'] = function() {}; + constructorTable['sklearn.neighbors.kd_tree.newObj'] = function() {}; + constructorTable['sklearn.neighbors.KNeighborsClassifier​'] = function() {}; + constructorTable['sklearn.neighbors.KNeighborsRegressor'] = function() {}; + constructorTable['sklearn.neighbors.unsupervised.NearestNeighbors'] = function() {}; + constructorTable['sklearn.neural_network._multilayer_perceptron.MLPClassifier'] = function() {}; + constructorTable['sklearn.neural_network._stochastic_optimizers.AdamOptimizer'] = function() {}; + constructorTable['sklearn.neural_network._stochastic_optimizers.SGDOptimizer'] = function() {}; + constructorTable['sklearn.neural_network.rbm.BernoulliRBM'] = function() {}; + constructorTable['sklearn.neural_network.multilayer_perceptron.MLPClassifier'] = function() {}; + constructorTable['sklearn.neural_network.multilayer_perceptron.MLPRegressor'] = function() {}; + constructorTable['sklearn.neural_network.stochastic_gradient.SGDClassifier'] = function() {}; + constructorTable['sklearn.pipeline.Pipeline'] = function() {}; + constructorTable['sklearn.pipeline.FeatureUnion'] = function() {}; + constructorTable['sklearn.preprocessing._data.RobustScaler'] = function() {}; + constructorTable['sklearn.preprocessing._data.StandardScaler'] = function() {}; + constructorTable['sklearn.preprocessing._discretization.KBinsDiscretizer'] = function() {}; + constructorTable['sklearn.preprocessing._encoders.OneHotEncoder'] = function() {}; + constructorTable['sklearn.preprocessing._function_transformer.FunctionTransformer'] = function() {}; + constructorTable['sklearn.preprocessing._label.LabelBinarizer'] = function() {}; + constructorTable['sklearn.preprocessing._label.LabelEncoder'] = function() {}; + constructorTable['sklearn.preprocessing.data.Binarizer'] = function() {}; + constructorTable['sklearn.preprocessing.data.MaxAbsScaler'] = function() {}; + constructorTable['sklearn.preprocessing.data.MinMaxScaler'] = function() {}; + constructorTable['sklearn.preprocessing.data.Normalizer'] = function() {}; + constructorTable['sklearn.preprocessing.data.OneHotEncoder'] = function() {}; + constructorTable['sklearn.preprocessing.data.PolynomialFeatures'] = function() {}; + constructorTable['sklearn.preprocessing.data.PowerTransformer'] = function() {}; + constructorTable['sklearn.preprocessing.data.RobustScaler'] = function() {}; + constructorTable['sklearn.preprocessing.data.QuantileTransformer'] = function() {}; + constructorTable['sklearn.preprocessing.data.StandardScaler'] = function() {}; + constructorTable['sklearn.preprocessing.imputation.Imputer'] = function() {}; + constructorTable['sklearn.preprocessing.label.LabelBinarizer'] = function() {}; + constructorTable['sklearn.preprocessing.label.LabelEncoder'] = function() {}; + constructorTable['sklearn.preprocessing.label.MultiLabelBinarizer'] = function() {}; + constructorTable['sklearn.svm._classes.SVC'] = function() {}; + constructorTable['sklearn.svm.classes.LinearSVC'] = function() {}; + constructorTable['sklearn.svm.classes.SVC'] = function() {}; + constructorTable['sklearn.svm.classes.SVR'] = function() {}; + constructorTable['sklearn.tree._classes.DecisionTreeClassifier'] = function() {}; + constructorTable['sklearn.tree._classes.DecisionTreeRegressor'] = function() {}; + constructorTable['sklearn.tree._classes.ExtraTreeClassifier'] = function() {}; + constructorTable['sklearn.tree._tree.Tree'] = function(n_features, n_classes, n_outputs) { + this.n_features = n_features; + this.n_classes = n_classes; + this.n_outputs = n_outputs; + this.__setstate__ = function(state) { + this.max_depth = state.max_depth; + this.node_count = state.node_count; + this.nodes = state.nodes; + this.values = state.values; + }; + }; + constructorTable['sklearn.tree.tree.DecisionTreeClassifier'] = function() {}; + constructorTable['sklearn.tree.tree.DecisionTreeRegressor'] = function() {}; + constructorTable['sklearn.tree.tree.ExtraTreeClassifier'] = function() {}; + constructorTable['sklearn.utils.deprecation.DeprecationDict'] = function() {}; + constructorTable['xgboost.compat.XGBoostLabelEncoder'] = function() {}; + constructorTable['xgboost.core.Booster'] = function() {}; + constructorTable['xgboost.sklearn.XGBClassifier'] = function() {}; + constructorTable['xgboost.sklearn.XGBRegressor'] = function() {}; + + functionTable['copy_reg._reconstructor'] = function(cls, base, state) { + if (base == '__builtin__.object') { + const obj = {}; + sklearn.Utility.applyType(obj, cls); + return obj; + } + if (base == '__builtin__.tuple') { + return state; + } + throw new sklearn.Error("Unknown base type '" + base + "'."); + }; + functionTable['numpy.core.multiarray.scalar'] = function(dtype, rawData) { + let data = rawData; + if (typeof rawData === 'string' || rawData instanceof String) { + data = new Uint8Array(rawData.length); + for (let i = 0; i < rawData.length; i++) { + data[i] = rawData.charCodeAt(i); + } + } + const dataView = new DataView(data.buffer, data.byteOffset, data.byteLength); + switch (dtype.name) { + case 'uint8': + return dataView.getUint8(0); + case 'float32': + return dataView.getFloat32(0, true); + case 'float64': + return dataView.getFloat64(0, true); + case 'int8': + return dataView.getInt8(0, true); + case 'int16': + return dataView.getInt16(0, true); + case 'int32': + return dataView.getInt32(0, true); + case 'int64': + return new long.Long(dataView.getInt32(0, true), dataView.getInt32(4, true), false); + } + throw new sklearn.Error("Unknown scalar type '" + dtype.name + "'."); + }; + functionTable['numpy.ma.core._mareconstruct'] = function(subtype /* , baseclass, baseshape, basetype */) { + // _data = ndarray.__new__(baseclass, baseshape, basetype) + // _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) + // return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + const obj = {}; + sklearn.Utility.applyType(obj, subtype); + return obj; + }; + functionTable['numpy.random.__RandomState_ctor'] = function() { + return {}; + }; + functionTable['numpy.random._pickle.__randomstate_ctor'] = function() { + return {}; + }; + functionTable['_codecs.encode'] = function(obj /*, econding */) { + return obj; + }; + functionTable['collections.defaultdict'] = function(/* default_factory */) { + return {}; + }; + functionTable['collections.OrderedDict'] = function(args) { + const obj = new Map(); + obj.__setitem__ = function(key, value) { + obj.set(key, value); + }; + if (args) { + for (const arg of args) { + obj.__setitem__(arg[0], arg[1]); + } + } + return obj; + }; + functionTable['__builtin__.bytearray'] = function(source, encoding /*, errors */) { + if (source) { + if (encoding === 'latin-1') { + const array = new Uint8Array(source.length); + for (let i = 0; i < source.length; i++) { + array[i] = source.charCodeAt(i); + } + return array; + } + throw new sklearn.Error("Unsupported bytearray encoding '" + JSON.stringify(encoding) + "'."); + } + return []; + }; + functionTable['__builtin__.bytes'] = function(source, encoding /*, errors */) { + if (source) { + if (encoding === 'latin-1') { + const array = new Uint8Array(source.length); + for (let i = 0; i < source.length; i++) { + array[i] = source.charCodeAt(i); + } + return array; + } + throw new sklearn.Error("Unsupported bytearray encoding '" + JSON.stringify(encoding) + "'."); + } + return []; + }; + functionTable['builtins.bytearray'] = function(data) { + return { data: data }; + }; + functionTable['builtins.set'] = function(iterable) { + return iterable ? iterable : []; + }; + functionTable['builtins.slice'] = function(start, stop, step) { + return { start: start, stop: stop, step: step }; + }; + functionTable['cloudpickle.cloudpickle._builtin_type'] = function(name) { + return name; + }; + + const unknownNameMap = new Set(); + const knownPackageMap = new Set([ + 'sklearn', 'collections', '__builtin__', 'builtins', + 'copy_reg', 'gensim', 'joblib','xgboost', 'lightgbm', 'nolearn', 'numpy' + ]); + + const function_call = (name, args) => { + const func = functionTable[name]; + if (func) { + return func.apply(null, args); + } + const obj = {}; + sklearn.Utility.applyType(obj, name); + const constructor = constructorTable[name]; + if (constructor) { + constructor.apply(obj, args); + } + else if (name && !unknownNameMap.has(name)) { + unknownNameMap.add(name); + if (knownPackageMap.has(name.split('.').shift())) { + exception(new sklearn.Error("Unknown function '" + name + "'."), false); + } + } + return obj; + }; + + this._data = unpickler.load(function_call, null); + + const find_weights = function(objs) { + + for (const dict of objs) { + if (dict && !Array.isArray(dict)) { + const weights = new Map(); + for (const key in dict) { + const value = dict[key]; + if (key != 'weight_order' && key != 'lr') { + if (!key || !sklearn.Utility.isTensor(value)) { + return null; + } + weights.set(key, value); + } + } + return weights; + } + } + + for (const list of objs) { + if (list && Array.isArray(list)) { + const weights = new Map(); + for (let i = 0; i < list.length; i++) { + const value = list[i]; + if (!sklearn.Utility.isTensor(value, 'numpy.ndarray')) { + return null; + } + weights.set(i.toString(), value); + } + return weights; + } + } + return null; + }; + + if (this._data) { + this._weights = find_weights([ this._data, this._data.blobs ]); + if (this._weights) { + this._data = null; + } + } + } + + get data() { + return this._data; + } + + get weights() { + return this._weights; + } +}; + +sklearn.Utility = class { + + static isTensor(obj) { + return obj && obj.__module__ === 'numpy' && obj.__name__ === 'ndarray'; + } + + static applyType(obj, name){ + const parts = name.split('.'); + obj.__name__ = parts.pop(); + obj.__module__ = parts.join('.'); + } +}; + +sklearn.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading scikit-learn model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = sklearn.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/tar.js b/frontend/packages/core/public/netron/tar.js new file mode 100644 index 00000000..5235a57c --- /dev/null +++ b/frontend/packages/core/public/netron/tar.js @@ -0,0 +1,117 @@ +/* jshint esversion: 6 */ + +var tar = tar || {}; + +tar.Archive = class { + + constructor(buffer) { + this._entries = []; + const reader = new tar.Reader(buffer, 0, buffer.length); + while (reader.peek()) { + this._entries.push(new tar.Entry(reader)); + if (reader.match(512, 0)) { + break; + } + } + } + + get entries() { + return this._entries; + } +}; + +tar.Entry = class { + + constructor(reader) { + const header = reader.bytes(512); + reader.skip(-512); + let sum = 0; + for (let i = 0; i < header.length; i++) { + sum += (i >= 148 && i < 156) ? 32 : header[i]; + } + this._name = reader.string(100); + reader.string(8); // file mode + reader.string(8); // owner + reader.string(8); // group + const size = parseInt(reader.string(12).trim(), 8); // size + reader.string(12); // timestamp + const checksum = parseInt(reader.string(8).trim(), 8); // checksum + if (isNaN(checksum) || sum != checksum) { + throw new tar.Error('Invalid tar archive.'); + } + reader.string(1); // link indicator + reader.string(100); // name of linked file + reader.bytes(255); + this._data = reader.bytes(size); + reader.bytes(((size % 512) != 0) ? (512 - (size % 512)) : 0); + } + + get name() { + return this._name; + } + + get data() { + return this._data; + } +}; + +tar.Reader = class { + + constructor(buffer) { + this._buffer = buffer; + this._position = 0; + this._end = buffer.length; + } + + skip(offset) { + this._position += offset; + if (this._position > this._buffer.length) { + throw new tar.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + peek() { + return this._position < this._end; + } + + match(size, value) { + if (this._position + size <= this._end) { + if (this._buffer.subarray(this._position, this._position + size).every((c) => c == value)) { + this._position += size; + return true; + } + } + return false; + } + + bytes(size) { + const position = this._position; + this.skip(size); + return this._buffer.subarray(position, this._position); + } + + string(size) { + const buffer = this.bytes(size); + let position = 0; + let str = ''; + for (let i = 0; i < size; i++) { + let c = buffer[position++]; + if (c == 0) { + break; + } + str += String.fromCharCode(c); + } + return str; + } +}; + +tar.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'tar Error'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.Archive = tar.Archive; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/tengine-metadata.json b/frontend/packages/core/public/netron/tengine-metadata.json new file mode 100755 index 00000000..9bceff52 --- /dev/null +++ b/frontend/packages/core/public/netron/tengine-metadata.json @@ -0,0 +1,1044 @@ +[ + { + "name": "Accuracy", + "schema": { + } + }, + { + "name": "BatchNormalization", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "rescale_factor", "type": "float32", "default": 1.0 }, + { "name": "eps", "type": "float32", "default": 1e-5 }, + { "name": "caffe_flavor", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" }, + { "name": "mean" }, + { "name": "var" } + ] + } + }, + { + "name": "BilinearResize", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "scale_x", "type": "float32", "default": 0 }, + { "name": "scale_y", "type": "float32", "default": 0 }, + { "name": "type", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Concat", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "axis", "type": "int32", "default": 1 } + ], + "inputs": [ + { "name": "inputs", "option": "variadic" } + ] + } + }, + { + "name": "Const", + "schema": { + } + }, + { + "name": "Convolution", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "kernel_h", "type": "int32", "default": 1 }, + { "name": "kernel_w", "type": "int32", "default": 1 }, + { "name": "stride_h", "type": "int32", "default": 1 }, + { "name": "stride_w", "type": "int32", "default": 1 }, + { "name": "dilation_h", "type": "int32", "default": 1, "visible": false }, + { "name": "dilation_w", "type": "int32", "default": 1, "visible": false }, + { "name": "input_channel", "type": "int32", "default": 1 }, + { "name": "output_channel", "type": "int32", "default": 1 }, + { "name": "group", "type": "int32", "default": 1, "visible": false }, + { "name": "activation","type": "int32", "default": -1 }, + { "name": "pad_h0", "type": "int32", "default": 0, "visible": false }, + { "name": "pad_w0", "type": "int32", "default": 0, "visible": false }, + { "name": "pad_h1", "type": "int32", "default": 0, "visible": false }, + { "name": "pad_w1", "type": "int32", "default": 0, "visible": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ] + } + }, + { + "name": "Deconvolution", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 1 }, + { "name": "kernel_h", "type": "int32", "default": 1 }, + { "name": "kernel_w", "type": "int32", "default": 1 }, + { "name": "stride_h", "type": "int32", "default": 1 }, + { "name": "stride_w", "type": "int32", "default": 1 }, + { "name": "pad_w0", "type": "int32", "default": 0 }, + { "name": "pad_h0", "type": "int32", "default": 0 }, + { "name": "pad_w1", "type": "int32", "default": 0 }, + { "name": "pad_h1", "type": "int32", "default": 0 }, + { "name": "dilation_h", "type": "int32", "default": 1 }, + { "name": "dilation_w", "type": "int32", "default": 1 }, + { "name": "group", "type": "int32", "default": 1 }, + { "name": "activation","type": "int32", "default": -1 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ] + } + }, + { + "name": "DetectionOutput", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "num_classes", "type": "int32", "default": 0 }, + { "name": "keep_top_k", "type": "int32", "default": 0 }, + { "name": "nms_top_k", "type": "int32", "default": 0 }, + { "name": "confidence_threshold", "type": "float32", "default": 0 }, + { "name": "nms_threshold", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "DropOut", + "schema": { + "category": "Dropout" + } + }, + { + "name": "Eltwise", + "schema": { + "attributes": [ + { "name": "type", "type": "uint32", "default": 0 }, + { "name": "caffe_flavor", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "inputs", "option": "variadic" } + ] + } + }, + { + "name": "Flatten", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "end_axis", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "FullyConnected", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 10 } + ], + "inputs": [ + { "name": "input" }, + { "name": "weight" }, + { "name": "bias" } + ] + } + }, + { + "name": "LRN", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "local_size", "type": "int32", "default": 0 }, + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 }, + { "name": "norm_region", "type": "int32", "default": 0 }, + { "name": "k", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "Normalize", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "across_spatial", "type": "int32", "default": 0 }, + { "name": "channel_shared", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Permute", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "flag", "type": "int32", "default": 0 }, + { "name": "order0", "type": "int32", "default": 0 }, + { "name": "order1", "type": "int32", "default": 0 }, + { "name": "order2", "type": "int32", "default": 0 }, + { "name": "order3", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Pooling", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "alg", "type": "int32", "default": 0 }, + { "name": "kernel_h", "type": "int32", "default": 0 }, + { "name": "kernel_w", "type": "int32", "default": 0 }, + { "name": "stride_h", "type": "int32", "default": 0 }, + { "name": "stride_w", "type": "int32", "default": 0 }, + { "name": "global", "type": "int32", "default": 0 }, + { "name": "caffe_flavor", "type": "int32", "default": 0 }, + { "name": "pad_h0", "type": "int32", "default": 0 }, + { "name": "pad_w0", "type": "int32", "default": 0 }, + { "name": "pad_h1", "type": "int32", "default": 0 }, + { "name": "pad_w1", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Prelu", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" }, + { "name": "slope" } + ] + } + }, + { + "name": "PriorBox", + "schema": { + "attributes": [ + { "name": "min_size", "type": "float32[]", "default": [] }, + { "name": "max_size", "type": "float32[]", "default": [] }, + { "name": "variance", "type": "float32[]", "default": [] }, + { "name": "aspect_ratio", "type": "float32[]", "default": [] }, + { "name": "flip", "type": "int32", "default": 0 }, + { "name": "clip", "type": "int32", "default": 0 }, + { "name": "img_size", "type": "int32", "default": 0 }, + { "name": "img_h", "type": "int32", "default": 0 }, + { "name": "img_w", "type": "int32", "default": 0 }, + { "name": "step_w", "type": "float32", "default": 0 }, + { "name": "step_h", "type": "float32", "default": 0 }, + { "name": "offset", "type": "float32", "default": 0 }, + { "name": "num_priors", "type": "int32", "default": 0 }, + { "name": "out_dim", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Region", + "schema": { + "attributes": [ + { "name": "num_classes", "type": "int32", "default": 0 }, + { "name": "side", "type": "int32", "default": 0 }, + { "name": "num_box", "type": "int32", "default": 0 }, + { "name": "coords", "type": "int32", "default": 0 }, + { "name": "confidence_threshold", "type": "float32", "default": 0 }, + { "name": "nms_threshold", "type": "float32", "default": 0 }, + { "name": "biases", "type": "float32[]", "default": [] } + ] + } + }, + { + "name": "ReLU", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "negative_slope", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "ReLU6", + "schema": { + "category": "Activation" + } + }, + { + "name": "Reorg", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "stride", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Reshape", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "dim_0", "type": "int32", "default": 0 }, + { "name": "dim_1", "type": "int32", "default": 0 }, + { "name": "dim_2", "type": "int32", "default": 0 }, + { "name": "dim_3", "type": "int32", "default": 0 }, + { "name": "dim_size", "type": "int32", "default": 0 }, + { "name": "axis", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "shape" } + ] + } + }, + { + "name": "Reshape", + "version": 2, + "schema": { + "category": "Shape", + "attributes": [ + { "name": "is_mxnet", "type": "int32", "default": 0 }, + { "name": "reverse", "type": "int32", "default": 0 }, + { "name": "shape", "type": "int32[]", "default": [] } + ], + "inputs": [ + { "name": "input" }, + { "name": "shape" } + ] + } + }, + { + "name": "RoiPooling", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "pooled_h", "type": "int32", "default": 0 }, + { "name": "pooled_w", "type": "int32", "default": 0 }, + { "name": "spatial_scale", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "RPN", + "schema": { + "attributes": [ + { "name": "ratios", "type": "float32[]", "default": [] }, + { "name": "anchor_scales", "type": "float32[]", "default": [] }, + { "name": "feat_stride", "type": "int32", "default": 0 }, + { "name": "basesize", "type": "int32", "default": 0 }, + { "name": "min_size", "type": "int32", "default": 0 }, + { "name": "per_nms_topn", "type": "int32", "default": 0 }, + { "name": "post_nms_topn", "type": "int32", "default": 0 }, + { "name": "nms_thresh", "type": "float32", "default": 0 }, + { "name": "anchors", "default": 0 } + ] + } + }, + { + "name": "Scale", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "num_axes", "type": "int32", "default": 0 }, + { "name": "bias_term", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "gamma" }, + { "name": "beta" } + ] + } + }, + { + "name": "Slice", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "slice_points", "type": "int32[]", "default": [] }, + { "name": "begins", "type": "int32[]", "default": [] }, + { "name": "sizes", "type": "int32[]", "default": [] }, + { "name": "iscaffe", "type": "int32", "default": 0 }, + { "name": "ismxnet", "type": "int32", "default": 0 }, + { "name": "isonnx", "type": "int32", "default": 0 }, + { "name": "begin", "type": "int32", "default": 0 }, + { "name": "end", "type": "int32", "default": 0 } + ], + "outputs": [ + { "name": "outputs", "option": "variadic" } + ] + } + }, + { + "name": "SoftMax", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Split", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "split_dim", "type": "int32", "default": 0 }, + { "name": "is_caffe", "type": "boolean", "default": false }, + { "name": "is_onnx", "type": "boolean", "default": false }, + { "name": "split_sizes", "type": "int32[]", "default": [] } + ] + } + }, + { + "name": "DetectionPostProcess", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "max_detections", "type": "int32", "default": 0 }, + { "name": "max_classes_per_detection", "type": "int32", "default": 0 }, + { "name": "nms_score_threshold", "type": "float32", "default": 0 }, + { "name": "nms_iou_threshold", "type": "float32", "default": 0 }, + { "name": "num_classes", "type": "int32", "default": 0 }, + { "name": "scales", "type": "float32[]", "default": [] } + ], + "inputs": [ + { "name": "input" }, + { "name": "score" }, + { "name": "anchor" } + ], + "outputs": [ + { "name": "detect_boxes" }, + { "name": "detect_classes" }, + { "name": "detect_scores" }, + { "name": "detect_num" } + ] + } + }, + { + "name": "Gemm", + "schema": { + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 }, + { "name": "transA", "type": "int32", "default": 0 }, + { "name": "transB", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Generic", + "schema": { + "attributes": [ + { "name": "max_input_num", "type": "int32", "default": 0 }, + { "name": "max_output_num", "type": "int32", "default": 0 }, + { "name": "opname", "type": "string", "default": "" } + ] + } + }, + { + "name": "Logistic", + "schema": { + "category": "Activation" + } + }, + { + "name": "LSTM", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "forget_bias", "type": "float32", "default": 0 }, + { "name": "clip", "type": "float32", "default": 0 }, + { "name": "output_len", "type": "int32", "default": 0 }, + { "name": "sequence_len", "type": "int32", "default": 0 }, + { "name": "input_size", "type": "int32", "default": 0 }, + { "name": "hidden_size", "type": "int32", "default": 0 }, + { "name": "cell_size", "type": "int32", "default": 0 }, + { "name": "has_peephole", "type": "int32", "default": 0 }, + { "name": "has_projection", "type": "int32", "default": 0 }, + { "name": "has_clip", "type": "int32", "default": 0 }, + { "name": "has_bias", "type": "int32", "default": 0 }, + { "name": "has_init_state", "type": "int32", "default": 0 }, + { "name": "forget_act", "type": "int32", "default": 0 }, + { "name": "input_act", "type": "int32", "default": 0 }, + { "name": "output_act", "type": "int32", "default": 0 }, + { "name": "cellin_act", "type": "int32", "default": 0 }, + { "name": "cellout_act", "type": "int32", "default": 0 }, + { "name": "mxnet_flag", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "RNN", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "clip", "type": "float32", "default": 0 }, + { "name": "output_len", "type": "int32", "default": 0 }, + { "name": "sequence_len", "type": "int32", "default": 0 }, + { "name": "input_size", "type": "int32", "default": 0 }, + { "name": "hidden_size", "type": "int32", "default": 0 }, + { "name": "has_clip", "type": "int32", "default": 0 }, + { "name": "has_bias", "type": "int32", "default": 0 }, + { "name": "has_init_state", "type": "int32", "default": 0 }, + { "name": "activation", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "TanH", + "schema": { + "category": "Activation" + } + }, + { + "name": "Sigmoid", + "schema": { + "category": "Activation" + } + }, + { + "name": "Squeeze", + "schema": { + "category": "Transform", + "attributes": [ + { "name": "dim_0", "type": "int32", "default": 0 }, + { "name": "dim_1", "type": "int32", "default": 0 }, + { "name": "dim_2", "type": "int32", "default": 0 }, + { "name": "dim_3", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "FusedbnScaleRelu", + "schema": { + "category": "Activation" + } + }, + { + "name": "Pad", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "pad_n_0", "type": "int32", "default": -1 }, + { "name": "pad_n_1", "type": "int32", "default": -1 }, + { "name": "pad_c_0", "type": "int32", "default": -1 }, + { "name": "pad_c_1", "type": "int32", "default": -1 }, + { "name": "pad_h_0", "type": "int32", "default": -1 }, + { "name": "pad_h_1", "type": "int32", "default": -1 }, + { "name": "pad_w_0", "type": "int32", "default": -1 }, + { "name": "pad_w_1", "type": "int32", "default": -1 }, + { "name": "mode", "type": "int32", "default": 0 }, + { "name": "value", "type": "float32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "paddings" } + ] + } + }, + { + "name": "StridedSlice", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "begine_n", "type": "int32", "default": 0 }, + { "name": "end_n", "type": "int32", "default": 0 }, + { "name": "stride_n", "type": "int32", "default": 0 }, + { "name": "begine_c", "type": "int32", "default": 0 }, + { "name": "end_c", "type": "int32", "default": 0 }, + { "name": "stride_c", "type": "int32", "default": 0 }, + { "name": "begine_h", "type": "int32", "default": 0 }, + { "name": "end_h", "type": "int32", "default": 0 }, + { "name": "stride_h", "type": "int32", "default": 0 }, + { "name": "begine_w", "type": "int32", "default": 0 }, + { "name": "end_w", "type": "int32", "default": 0 }, + { "name": "stride_w", "type": "int32", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "begin" }, + { "name": "end" }, + { "name": "strides" } + ] + } + }, + { + "name": "ArgMax", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "ArgMin", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "TopKV2", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "k", "type": "int32", "default": 0 }, + { "name": "sorted", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Reduction", + "schema": { + "attributes": [ + { "name": "dim_0", "type": "int32", "default": -2 }, + { "name": "dim_1", "type": "int32", "default": -2 }, + { "name": "dim_2", "type": "int32", "default": -2 }, + { "name": "dim_3", "type": "int32", "default": -2 }, + { "name": "type", "type": "int32", "default": 0 }, + { "name": "keepdim", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Max", + "schema": { + "category": "Layer" + } + }, + { + "name": "Min", + "schema": { + "category": "Layer" + } + }, + { + "name": "GRU", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "clip", "type": "float32", "default": 0 }, + { "name": "output_len", "type": "int32", "default": 0 }, + { "name": "sequence_len", "type": "int32", "default": 0 }, + { "name": "input_size", "type": "int32", "default": 0 }, + { "name": "hidden_size", "type": "int32", "default": 0 }, + { "name": "has_clip", "type": "int32", "default": 0 }, + { "name": "has_gate_bias", "type": "int32", "default": 0 }, + { "name": "has_candidate_bias", "type": "int32", "default": 0 }, + { "name": "has_init_state", "type": "int32", "default": 0 }, + { "name": "mxnet_flag", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Addn", + "schema": { + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "SwapAxis", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "dim_0", "type": "int32", "default": 0 }, + { "name": "dim_1", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Upsample", + "schema": { + "category": "Data", + "attributes": [ + { "name": "scale", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "SpaceToBatchND", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "dilation_x", "type": "int32", "default": 0 }, + { "name": "dilation_y", "type": "int32", "default": 0 }, + { "name": "pad_top", "type": "int32", "default": 0 }, + { "name": "pad_bottom", "type": "int32", "default": 0 }, + { "name": "pad_left", "type": "int32", "default": 0 }, + { "name": "pad_right", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "BatchToSpaceND", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "dilation_x", "type": "int32", "default": 0 }, + { "name": "dilation_y", "type": "int32", "default": 0 }, + { "name": "crop_top", "type": "int32", "default": 0 }, + { "name": "crop_bottom", "type": "int32", "default": 0 }, + { "name": "crop_left", "type": "int32", "default": 0 }, + { "name": "crop_right", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Resize", + "schema": { + "category": "Data", + "attributes": [ + { "name": "scale_x", "type": "float32", "default": 0 }, + { "name": "scale_y", "type": "float32", "default": 0 }, + { "name": "type", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "ShuffleChannel", + "schema": { + "category": "shape", + "attributes": [ + { "name": "group", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Crop", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "num_args", "type": "int32", "default": 0 }, + { "name": "offset_c", "type": "int32", "default": 0 }, + { "name": "offset_h", "type": "int32", "default": 0 }, + { "name": "offset_w", "type": "int32", "default": 0 }, + { "name": "crop_h", "type": "int32", "default": 0 }, + { "name": "crop_w", "type": "int32", "default": 0 }, + { "name": "center_crop", "type": "bool", "default": 0 }, + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "flag", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "ROIAlign", + "schema": { + "attributes": [ + { "name": "pooled_width", "type": "int32", "default": 0 }, + { "name": "pooled_height", "type": "int32", "default": 0 }, + { "name": "spatial_scale", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "Psroipooling", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "pooled_w", "type": "int32", "default": 0 }, + { "name": "pooled_h", "type": "int32", "default": 0 }, + { "name": "spatial_scale", "type": "float32", "default": 0 }, + { "name": "output_dim", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Unary", + "schema": { + "attributes": [ + { "name": "type", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Expanddims", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Bias", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "bias_size", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Noop", + "schema": { + "category": "Layer" + } + }, + { + "name": "Threshold", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "Threshold", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "Hardsigmoid", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "Embed", + "schema": { + "category": "Transform", + "attributes": [ + { "name": "num_output", "type": "int32", "default": 0 }, + { "name": "input_dim", "type": "int32", "default": 0 }, + { "name": "bias_term", "type": "int32", "default": 0 }, + { "name": "weight_data_size", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "InstanceNorm", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "eps", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "MVN", + "schema": { + "attributes": [ + { "name": "across_channels", "type": "int32", "default": 0 }, + { "name": "normalize_variance", "type": "int32", "default": 0 }, + { "name": "eps", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "Absval", + "schema": { + "category": "Data" + } + }, + { + "name": "Cast", + "schema": { + "attributes": [ + { "name": "type_from", "type": "int32", "default": 0 }, + { "name": "type_to", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "HardSwish", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "beta", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "Interp", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "resize_type", "type": "int32", "default": 0 }, + { "name": "width_scale", "type": "float32", "default": 0 }, + { "name": "height_scale", "type": "float32", "default": 0 }, + { "name": "output_width", "type": "int32", "default": 0 }, + { "name": "output_height", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "SELU", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 }, + { "name": "lambda", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "ELU", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "alpha", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "BroadMul", + "schema": { + "category": "Layer" + } + }, + { + "name": "Logical", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "type", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Gather", + "schema": { + "category": "Data", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "indices_num", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Transpose", + "schema": { + "category": "Transform", + "attributes": [ + { "name": "shape", "type": "int32[]", "default": [] } + ] + } + }, + { + "name": "Comparison", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "type", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "SpaceToDepth", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "block_size", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "DepthToSpace", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "block_size", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Reverse", + "schema": { + "category": "Shape" + } + }, + { + "name": "SparseToDense", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "output_shape_size0", "type": "int32", "default": 0 }, + { "name": "output_shape_size1", "type": "int32", "default": 0 }, + { "name": "default_value", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Ceil", + "schema": { + "category": "Layer" + } + }, + { + "name": "SquaredDifference", + "schema": { + "category": "Layer" + } + }, + { + "name": "Round", + "schema": { + "category": "Layer" + } + }, + { + "name": "ZerosLike", + "schema": { + "category": "Layer" + } + }, + { + "name": "Clip", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "max", "type": "float32", "default": 0 }, + { "name": "min", "type": "float32", "default": 0 } + ] + } + }, + { + "name": "MatMul", + "schema": { + "category": "Layer" + } + }, + { + "name": "ReduceL2", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "axis", "type": "int32", "default": 0 }, + { "name": "keepdim", "type": "int32", "default": 0 } + ] + } + }, + { + "name": "Unsqueeze", + "schema": { + "category": "Transform", + "attributes": [ + { "name": "axises[]", "type": "int32[]", "default": [] } + ] + } + }, + { + "name": "Num", + "schema": { + "category": "Layer" + } + } +] diff --git a/frontend/packages/core/public/netron/tengine.js b/frontend/packages/core/public/netron/tengine.js new file mode 100755 index 00000000..600c4f19 --- /dev/null +++ b/frontend/packages/core/public/netron/tengine.js @@ -0,0 +1,964 @@ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental + +var tengine = tengine || {}; +var base = base || require('./base'); + +tengine.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'tmfile') { + const buffer = context.buffer; + if (buffer.length > 4) { + const majorVersion = buffer[0] | buffer[1] << 8 ; + if (majorVersion < 4) { + return true; + } + } + } + return false; + } + + open(context, host) { + return tengine.Metadata.open(host).then((metadata) => { + const identifier = context.identifier.toLowerCase(); + try { + const buffer = context.buffer; + const majorVersion = buffer[0] | buffer[1] << 8; + const minorVersion = buffer[2] | buffer[3] << 8; + if (majorVersion !== 2) { + throw new tengine.Error("Unsupported format version 'v" + majorVersion.toString() + "." + minorVersion.toString() + "'."); + } + return new tengine.Model(metadata, buffer); + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new tengine.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + } +}; + +tengine.Model = class { + + constructor(metadata, buffer) { + const reader = new tengine.ModelFileReader(buffer); + this._version = reader.version; + this._source = reader.source; + this._graphs = reader.graphs.map((graph) => new tengine.Graph(metadata, graph)); + } + + get format() { + return "Tengine v" + this._version; + } + + get source() { + return this._source; + } + + get graphs() { + return this._graphs; + } +}; + +tengine.Graph = class { + + constructor(metadata, graph) { + this._name = graph.id.toString(); + this._inputs = []; + this._outputs = []; + this._nodes = []; + + const tensors = graph.tensors.map((tensor) => new tengine.Argument(tensor)); + + for (const input of graph.inputs) { + const argument = tensors[input]; + this._inputs.push(new tengine.Parameter(argument.name, true, [ argument ])); + } + + for (const output of graph.outputs) { + const argument = tensors[output]; + if (argument.type && argument.type.shape && argument.type.shape.dimensions && argument.type.shape.dimensions.length == 0 && argument.initializer !== null) { + continue; + } + this._outputs.push(new tengine.Parameter(argument.name, true, [ argument ])); + } + + for (const node of graph.nodes) { + if (node.type !== 'INPUT') { + this._nodes.push(new tengine.Node(metadata, node, tensors)); + } + } + } + + get name() { + return this._name; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +tengine.Parameter = class { + + constructor(name, visible, args) { + this._name = name; + this._visible = visible; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get arguments() { + return this._arguments; + } +}; + +tengine.Argument = class { + + constructor(tensor) { + this._name = tensor.name; + this._type = new tengine.TensorType(tensor.dataType, new tengine.TensorShape(tensor.dims)); + this._initializer = (tensor.type === 2) ? new tengine.Tensor(this._type, tensor.buffer) : null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get quantization() { + return null; + } + + get initializer() { + return this._initializer; + } +}; + + +tengine.Node = class { + + constructor(metadata, node, tensors) { + this._metadata = metadata; + this._name = node.name; + this._type = node.type + (node.version && node.version !== 1 ? ':' + node.version.toString() : ''); + this._inputs = []; + this._outputs = []; + this._attributes = []; + + const schema = metadata.type(this._type); + + for (let i = 0; i < node.params.length; i++) { + const attributeSchema = (schema && schema.attributes && i < schema.attributes.length) ? schema.attributes[i] : null; + const attributeName = attributeSchema ? attributeSchema.name : i.toString(); + this._attributes.push(new tengine.Attribute(attributeSchema, attributeName, node.params[i])); + } + + const inputs = node.inputs; + let inputIndex = 0; + if (schema && schema.inputs) { + for (const inputDef of schema.inputs) { + if (inputIndex < inputs.length || inputDef.option != 'optional') { + const inputCount = (inputDef.option == 'variadic') ? (inputs.length - inputIndex) : 1; + const inputArguments = inputs.slice(inputIndex, inputIndex + inputCount).filter((id) => id != '' || inputDef.option != 'optional').map((id) => tensors[id]); + this._inputs.push(new tengine.Parameter(inputDef.name, true, inputArguments)); + inputIndex += inputCount; + } + } + } + else { + this._inputs = this._inputs.concat(inputs.slice(inputIndex).map((id, index) => { + const inputName = ((inputIndex + index) == 0) ? 'input' : (inputIndex + index).toString(); + return new tengine.Parameter(inputName, true, [ tensors[id] ]); + })); + } + + const outputs = node.outputs; + let outputIndex = 0; + if (schema && schema.outputs) { + for (const outputDef of schema.outputs) { + if (outputIndex < outputs.length || outputDef.option != 'optional') { + const outputCount = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1; + const outputArguments = outputs.slice(outputIndex, outputIndex + outputCount).map((id) => tensors[id]); + this._outputs.push(new tengine.Parameter(outputDef.name, true, outputArguments)); + outputIndex += outputCount; + } + } + } + else { + this._outputs = this._outputs.concat(outputs.slice(outputIndex).map((id, index) => { + const outputName = ((outputIndex + index) == 0) ? 'output' : (outputIndex + index).toString(); + return new tengine.Parameter(outputName, true, [ tensors[id] ]); + })); + } + } + + get type() { + return this._type.split(':')[0]; + } + + get name() { + return this._name; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } +}; + +tengine.Attribute = class { + + constructor(schema, key, value) { + this._type = ''; + this._name = key; + this._value = value; + if (schema) { + this._name = schema.name; + if (schema.type) { + this._type = schema.type; + } + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + if (this._value == schema.default || (this._value && this._value.toString() == schema.default.toString())) { + this._visible = false; + } + } + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +tengine.Tensor = class { + + constructor(type, data, kind) { + this._type = type; + this._data = data; + this._kind = kind; + } + + get kind() { + return this._kind; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state || null; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + const context = {}; + context.index = 0; + context.count = 0; + context.state = null; + + if (this._type.dataType == '?') { + context.state = 'Tensor has unknown data type.'; + return context; + } + if (!this._type.shape || (this._type.shape.dimensions && this._type.shape.dimensions.length == 0)) { + context.state = 'Tensor has no dimensions.'; + return context; + } + + if (!this._data) { + context.state = 'Tensor data is empty.'; + return context; + } + + switch (this._type.dataType) { + case 'int8': + case 'uint8': + case 'float16': + case 'float32': + case 'int32': + case 'int16': + context.data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + break; + default: + context.state = 'Tensor data type is not implemented.'; + break; + } + + context.dataType = this._type.dataType; + context.shape = this._type.shape.dimensions; + return context; + } + + _decode(context, dimension) { + const shape = context.shape.length == 0 ? [ 1 ] : context.shape; + const results = []; + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (this._type.dataType) { + case 'float32': + results.push(context.data.getFloat32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'float16': + results.push(context.data.getFloat16(context.index, true)); + context.index += 2; + context.count++; + break; + case 'int8': + results.push(context.data.getInt8(context.index, true)); + context.index += 1; + context.count++; + break; + case 'uint8': + results.push(context.data.getUint8(context.index, true)); + context.index += 1; + context.count++; + break; + case 'int32': + results.push(context.data.getInt32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'int16': + results.push(context.data.getInt16(context.index, true)); + context.index += 2; + context.count++; + break; + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + if (context.shape.length == 0) { + return results[0]; + } + return results; + } +}; + +tengine.TensorType = class { + + constructor(dataType, shape) { + switch (dataType) { + case 0: this._dataType = 'float32'; break; + case 1: this._dataType = 'float16'; break; + case 2: this._dataType = 'int8'; break; + case 3: this._dataType = 'uint8'; break; + case 4: this._dataType = 'int32'; break; + case 5: this._dataType = 'int16'; break; + default: throw new tengine.Error("Unknown data type'" + dataType + "'."); + } + this._shape = shape; + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return this._dataType + this._shape.toString(); + } +}; + +tengine.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + return this._dimensions ? ('[' + this._dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',') + ']') : ''; + } +}; + +tengine.Metadata = class { + + static open(host) { + if (tengine.Metadata._metadata) { + return Promise.resolve(tengine.Metadata._metadata); + } + return host.request(null, 'tengine-metadata.json', 'utf-8').then((data) => { + tengine.Metadata._metadata = new tengine.Metadata(data); + return tengine.Metadata._metadata; + }).catch(() => { + tengine.Metadata._metadata = new tengine.Metadata(null); + return tengine.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + this._attributeCache = {}; + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + const name = item.name + (item.version && item.version !== 1 ? ':' + item.version.toString() : ''); + this._map[name] = item.schema; + } + } + } + } + } + + type(name) { + return this._map[name] || null; + } + + attribute(type, name) { + let map = this._attributeCache[type]; + if (!map) { + map = {}; + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[type] = map; + } + return map[name] || null; + } +}; + +tengine.ModelFileReader = class { + + constructor(buffer) { + + // ./third_party/src/tengine/serializer/include/tengine/v2/tm2_format.h + // https://github.com/OAID/Tengine/wiki/The-format-of-tmfile + + const types = new Map(); + const register = (index, version, name, params) => { + types.set(index.toString() + ':' + version.toString(), { name: name, params: params }); + }; + register( 0, 1, 'Accuracy', []); + register( 1, 1, 'BatchNormalization', [ 'f', 'f', 'i' ]); + register( 2, 1, 'BilinearResize', [ 'f', 'f', 'i' ]); + register( 3, 1, 'Concat', [ 'i' ]); + register( 4, 1, 'Const', []); + register( 5, 1, 'Convolution', [ 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register( 6, 1, 'DeConvolution', [ 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register( 7, 1, 'DetectionOutput', [ 'i', 'i', 'i', 'f', 'f' ]); + register( 8, 1, 'DropOut', []); + register( 9, 1, 'Eltwise', [ 'i', 'i' ]); + register(10, 1, 'Flatten', [ 'i' ]); + register(11, 1, 'FullyConnected', [ 'i' ]); + register(12, 1, 'INPUT', []); + register(13, 1, 'LRN', [ 'i', 'f', 'f', 'i', 'f' ]); + register(14, 1, 'Normalize', [ 'i', 'i' ]); + register(15, 1, 'Permute', [ 'i', 'i', 'i', 'i', 'i' ]); + register(16, 1, 'Pooling', [ 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(17, 1, 'Prelu', []); + register(18, 1, 'PriorBox', [ 'f[]', 'f[]', 'f[]', 'f[]', 'i', 'i', 'i', 'i', 'i', 'f', 'f', 'f', 'i', 'i' ]); + register(19, 1, 'Region', [ 'i', 'i', 'i', 'i', 'f', 'f', 'f[]' ]); + register(20, 1, 'ReLU', [ 'f' ]); + register(21, 1, 'ReLU6', []); + register(22, 1, 'Reorg', [ 'i' ]); + register(23, 1, 'Reshape', [ 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(23, 2, 'Reshape', [ 'i', 'i', 'i[]' ]); + register(24, 1, 'RoiPooling', [ 'i', 'i', 'f' ]); + register(25, 1, 'RPN', [ 'f[]', 'f[]', 'i', 'i', 'i', 'i', 'i', 'f', 'anchors' ]); + register(26, 1, 'Scale', [ 'i', 'i', 'i' ]); + register(27, 1, 'Slice', [ 'i', 'i[]', 'i[]', 'i[]', 'i', 'i', 'i', 'i', 'i' ]); + register(28, 1, 'SoftMax', [ 'i' ]); + register(29, 1, 'Split', [ 'i', 'i', 'boolean', 'boolean', 'i[]' ]); + register(30, 1, 'DetectionPostProcess', [ 'i', 'i', 'f', 'f', 'i', 'f[]' ]); + register(31, 1, 'Gemm', [ 'f', 'f', 'i', 'i' ]); + register(32, 1, 'Generic', [ 'i', 'i', 'string' ]); + register(33, 1, 'Logistic', []); + register(34, 1, 'LSTM', [ 'f', 'f', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(35, 1, 'RNN', [ 'f', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(36, 1, 'TanH', []); + register(37, 1, 'Sigmoid', []); + register(38, 1, 'Squeeze', [ 'i', 'i', 'i', 'i' ]); + register(39, 1, 'FusedbnScaleRelu', []); + register(40, 1, 'Pad', [ 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'f' ]); + register(41, 1, 'StridedSlice', [ 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(42, 1, 'ArgMax', [ 'i' ]); + register(43, 1, 'ArgMin', [ 'i' ]); + register(44, 1, 'TopKV2', [ 'i', 'i' ]); + register(45, 1, 'Reduction', [ 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(46, 1, 'Max', []); + register(47, 1, 'Min', []); + register(48, 1, 'GRU', [ 'f', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(49, 1, 'Addn', 'i'); + register(50, 1, 'SwapAxis', [ 'i', 'i' ]); + register(51, 1, 'Upsample', [ 'f' ]); + register(52, 1, 'SpaceToBatchND', [ 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(53, 1, 'BatchToSpaceND', [ 'i', 'i', 'i', 'i', 'i', 'i' ]); + register(54, 1, 'Resize', [ 'f', 'f', 'i' ]); + register(55, 1, 'ShuffleChannel', [ 'i' ]); + register(56, 1, 'Crop', [ 'i', 'i', 'i', 'i', 'i', 'i', 'boolean', 'i', 'i' ]); + register(57, 1, 'ROIAlign', [ 'i', 'i', 'f' ]); + register(58, 1, 'Psroipooling', [ 'i', 'i', 'f', 'i' ]); + register(59, 1, 'Unary', [ 'i' ]); + register(60, 1, 'Expanddims', [ 'i' ]); + register(61, 1, 'Bias', [ 'i' ]); + register(62, 1, 'Noop', []); + register(63, 1, 'Threshold', [ 'f' ]); + register(64, 1, 'Hardsigmoid', [ 'f', 'f' ]); + register(65, 1, 'Embed', [ 'f', 'f', 'f', 'f' ]); + register(66, 1, 'InstanceNorm', [ 'f' ]); + register(67, 1, 'MVN', [ 'i', 'i', 'f' ]); + register(68, 1, 'Absval', []); + register(69, 1, 'Cast', [ 'i', 'i' ]); + register(70, 1, 'HardSwish', [ 'f', 'f' ]); + register(71, 1, 'Interp', [ 'i', 'i', 'f', 'f', 'i' ]); + register(72, 1, 'SELU', [ 'f', 'f' ]); + register(73, 1, 'ELU', [ 'f' ]); + register(74, 1, 'BroadMul', []); + register(75, 1, 'Logical', [ 'i' ]); + register(76, 1, 'Gather', [ 'i', 'i' ]); + register(77, 1, 'Transpose', [ 'i[]' ]); + register(78, 1, 'Comparison', [ 'i' ]); + register(79, 1, 'SpaceToDepth', [ 'i' ]); + register(80, 1, 'DepthToSpace', [ 'i' ]); + register(81, 1, 'Reverse', []); + register(82, 1, 'SparseToDense', [ 'i','i','i' ]); + register(83, 1, 'Ceil', []); + register(84, 1, 'SquaredDifference', []); + register(85, 1, 'Round', []); + register(86, 1, 'ZerosLike', []); + register(87, 1, 'Clip', [ 'f','f' ]); + register(88, 1, 'MatMul', []); + register(89, 1, 'ReduceL2', [ 'i','i' ]); + register(90, 1, 'Unsqueeze', [ 'i[]' ]); /* need fix*/ + register(91, 1, 'Num', []); + + const reader = new tengine.BinaryReader(buffer); + this._majorVersion = reader.uint16(); + this._minorVersion = reader.uint16(); + this._compileVersion = reader.uint16(); + reader.skip(2); // struct align + reader.seek(reader.uint32()); // root table + this._originalFormat = reader.int32(); + this._subFormat = reader.int32(); + this._graphs = []; + const subgraphOffsets = reader.uint32s(); + for (const subgraphOffset of subgraphOffsets) { + reader.seek(subgraphOffset); + + const subgraph = {}; + subgraph.id = reader.int32(); + subgraph.graphLayout = reader.int32(); + /* + if (graphLayout == 0) { + return "NCHW"; + } + if (graphLayout == 1) { + return "NHWC"; + } + */ + subgraph.originalLayout = reader.int32(); + subgraph.inputs = reader.uint32s(); + subgraph.outputs = reader.uint32s(); + const nodeOffsets = reader.uint32s(); + const tensorOffsets = reader.uint32s(); + const bufferOffsets = reader.uint32s(); + subgraph.name = reader.string(); + subgraph.nodes = []; + subgraph.tensors = []; + this._graphs.push(subgraph); + + // nodes + for (const nodeOffset of nodeOffsets) { + reader.seek(nodeOffset); + const node = {}; + node.id = reader.int32(); + node.inputs = reader.uint32s(); + node.outputs = reader.uint32s(); + const typeOffset = reader.int32(); + node.name = reader.string(); + const attributeOffsets = reader.uint32s(); + node.dynamicShape = reader.boolean() ? true : false; + + reader.seek(typeOffset); + node.version = reader.int32(); + const index = reader.int32(); + const paramsOffset = reader.uint32(); + + const type = index.toString() + ':' + node.version.toString(); + const schema = types.has(type) ? types.get(type) : null; + node.type = schema ? schema.name : index.toString(); + const paramTypes = schema ? schema.params : []; + + node.params = []; + if (paramsOffset) { + reader.seek(paramsOffset); + for (const paramType of paramTypes) { + if (paramType !== 'boolean') { + reader.align(4); + } + switch (paramType) { + case 'i': + node.params.push(reader.int32()); + break; + case 'f': + node.params.push(reader.float32()); + break; + case 'i[]': + node.params.push(reader.int32s()); + break; + case 'f[]': + node.params.push(reader.float32s()); + break; + case 'boolean': + node.params.push(reader.boolean()); + break; + case 'string': + node.params.push(reader.string()); + break; + case 'anchors': + node.params.push(reader.anchors(4)); + break; + default: + throw new tengine.Error("Unsupported param type '" + paramType + "' in '" + node.type + "'."); + } + } + } + + if (node.type === 'Slice') { + node.params[6] = (this._originalFormat == 5) ? node.params[6] : 0; + } + + node.attributes = []; + for (const attributeOffset of attributeOffsets) { + reader.seek(attributeOffset); + const name = reader.string(); + const value = reader.string(); + const type = reader.int32(); + node.attributes.push({ name: name, value: value, type: type }); + } + + if (node.type !== 'Const') { + subgraph.nodes.push(node); + } + } + + // buffers + const buffers = []; + for (const buffersOffset of bufferOffsets) { + reader.seek(buffersOffset); + const size = reader.uint32(); + reader.seek(reader.int32()); + buffers.push(reader.bytes(size)); + } + + // tensors + for (const tensorOffset of tensorOffsets) { + reader.seek(tensorOffset); + const tensor = {}; + tensor.id = reader.int32(); + tensor.buffer = buffers[reader.int32()]; + tensor.dims = reader.int32s(); + tensor.name = reader.string(); + const quantparamsOffset = reader.int32(); + tensor.layout = reader.int32(); + tensor.type = reader.int32(); // ar = 1, const = 2, input = 3, vdep, unknown + tensor.dataType = reader.int32(); + if (quantparamsOffset) { + reader.seek(quantparamsOffset); + tensor.quantparams = { + zeroPoint: reader.int32(), + scale: reader.float32(), + width: reader.int32() + }; + } + subgraph.tensors.push(tensor); + } + + for (const node of subgraph.nodes) { + if (node.type === 'Convolution') { + switch (subgraph.graphLayout) { + case 0: // NCHW + node.params[6] = subgraph.tensors[node.inputs[1]].dims[1]; + break; + case 1: // NHWC + node.params[6] = subgraph.tensors[node.inputs[1]].dims[3]; + break; + } + } + } + + } + } + + get version() { + return this._majorVersion + '.' + this._minorVersion; + } + + get source() { + switch (this._originalFormat) { + case 0: return ''; + case 1: return 'Tengine'; + case 2: return 'Caffe'; + case 3: return 'ONNX'; + case 4: return 'MXNet'; + case 5: return 'TensorFlow'; + case 6: return 'TensorFlow Lite'; + case 7: return 'Darknet'; + case 8: return 'DLA v' + this._subFormat; + default: throw new tengine.Error("Unknown source '" + this._originalFormat.toString() + "'."); + } + } + + get graphs() { + return this._graphs; + } +}; + +tengine.BinaryReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._dataView = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._position = 0; + } + + seek(position) { + this._position = position; + if (this._position > this._buffer.length) { + throw new tengine.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + skip(offset) { + this._position += offset; + if (this._position > this._buffer.length) { + throw new tengine.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + align(mod) { + if (this._position % mod != 0) { + this.skip(mod - (this._position % mod)); + } + } + + bytes(length) { + const position = this._position; + this.skip(length); + return this._buffer.slice(position, this._position); + } + + byte() { + this.skip(1); + return this._dataView.getUint8(this._position); + } + + + boolean() { + return this.byte() == 0x00 ? true : false; + } + + uint16() { + const position = this._position; + this.skip(2); + return this._dataView.getUint16(position, true); + } + + uint32() { + const position = this._position; + this.skip(4); + return this._dataView.getUint32(position, true); + } + + uint32s() { + const values = []; + const offset = this.uint32(); + if (offset) { + const next = this._position; + this.seek(offset); + const count = this.uint32(); + for (let i = 0; i < count; i++) { + values.push(this.uint32()); + } + this.seek(next); + } + return values; + } + + int32() { + const position = this._position; + this.skip(4); + return this._dataView.getInt32(position, true); + } + + int32s() { + const values = []; + const offset = this.uint32(); + if (offset) { + const next = this._position; + this.seek(offset); + const count = this.uint32(); + for (let i = 0; i < count; i++) { + values.push(this.int32()); + } + this.seek(next); + } + return values; + } + + float32() { + const position = this._position; + this.skip(4); + return this._dataView.getFloat32(position, true); + } + + float32s() { + const values = []; + const offset = this.uint32(); + if (offset) { + const next = this._position; + this.seek(offset); + const count = this.uint32(); + for (let i = 0; i < count; i++) { + values.push(this.float32()); + } + this.seek(next); + } + return values; + } + + anchors(length) { + const arrays = []; + const offset = this.uint32(); + if (offset) { + const next = this._position; + this.seek(offset); + const count = this.uint32(); + for (let i = 0; i < count; i++) { + const array = []; + for (let j = 0; j < length; j++) { + array.push(this.float32()); + } + arrays.push(array); + } + this.seek(next); + } + return arrays; + } + + string() { + const position = this.uint32(); + let text = ''; + if (position) { + const next = this._position; + this.seek(position); + const size = this.uint32(); + this.seek(this.uint32()); + for(let i = 0; i < size - 1; i++) { + text += String.fromCharCode(this._buffer[this._position++]); + } + this.seek(next); + } + return text; + } +}; + +tengine.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Tengine model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = tengine.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/tf-metadata.json b/frontend/packages/core/public/netron/tf-metadata.json new file mode 100644 index 00000000..3cc26687 --- /dev/null +++ b/frontend/packages/core/public/netron/tf-metadata.json @@ -0,0 +1,58063 @@ +[ + { + "name": "Abort", + "schema": { + "attributes": [ + { + "default": "", + "description": "A string which is the message associated with the exception.", + "name": "error_msg", + "type": "string" + }, + { + "default": false, + "name": "exit_without_error", + "type": "boolean" + } + ], + "description": "If exit_without_error is true, the process will exit normally,\notherwise it will exit with a SIGABORT signal.\n\nReturns nothing but an exception.", + "summary": "Raise a exception to abort the process when called." + } + }, + { + "name": "Abs", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "Given a tensor `x`, this operation returns a tensor containing the absolute\nvalue of each element in `x`. For example, if x is an input element and y is\nan output element, this operation computes \\\\(y = |x|\\\\).", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes the absolute value of a tensor." + } + }, + { + "name": "AccumulateNV2", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Shape of elements of `inputs`.", + "name": "shape", + "type": "shape" + } + ], + "description": "`tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not\nwait for all of its inputs to be ready before beginning to sum. This can\nsave memory if inputs are ready at different times, since minimum temporary\nstorage is proportional to the output size rather than the inputs size.\n\nUnlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.\n\nReturns a `Tensor` of same shape and type as the elements of `inputs`.", + "inputs": [ + { + "description": "A list of `Tensor` objects, each with same shape and type.", + "name": "inputs", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "sum", + "typeAttr": "T" + } + ], + "summary": "Returns the element-wise sum of a list of tensors." + } + }, + { + "name": "AccumulatorApplyGradient", + "schema": { + "attributes": [ + { + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + } + ], + "description": "Does not add if local_step is lesser than the accumulator's global_step.", + "inputs": [ + { + "description": "The handle to a accumulator.", + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "description": "The local_step value at which the gradient was computed.", + "name": "local_step", + "type": 9 + }, + { + "description": "A tensor of the gradient to be accumulated.", + "name": "gradient", + "typeAttr": "dtype" + } + ], + "summary": "Applies a gradient to a given accumulator." + } + }, + { + "name": "AccumulatorNumAccumulated", + "schema": { + "inputs": [ + { + "description": "The handle to an accumulator.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "outputs": [ + { + "description": "The number of gradients aggregated in the given accumulator.", + "name": "num_accumulated", + "type": 3 + } + ], + "summary": "Returns the number of gradients aggregated in the given accumulators." + } + }, + { + "name": "AccumulatorSetGlobalStep", + "schema": { + "description": "Logs warning if the accumulator's value is already higher than\nnew_global_step.", + "inputs": [ + { + "description": "The handle to an accumulator.", + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "description": "The new global_step value to set.", + "name": "new_global_step", + "type": 9 + } + ], + "summary": "Updates the accumulator with a new value for global_step." + } + }, + { + "name": "AccumulatorTakeGradient", + "schema": { + "attributes": [ + { + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + } + ], + "description": "The op blocks until sufficient (i.e., more than num_required)\ngradients have been accumulated. If the accumulator has already\naggregated more than num_required gradients, it returns the average of\nthe accumulated gradients. Also automatically increments the recorded\nglobal_step in the accumulator by 1, and resets the aggregate to 0.", + "inputs": [ + { + "description": "The handle to an accumulator.", + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "description": "Number of gradients required before we return an aggregate.", + "name": "num_required", + "type": 3 + } + ], + "outputs": [ + { + "description": "The average of the accumulated gradients.", + "name": "average", + "typeAttr": "dtype" + } + ], + "summary": "Extracts the average gradient in the given ConditionalAccumulator." + } + }, + { + "name": "Acos", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes acos of x element-wise." + } + }, + { + "name": "Acosh", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Given an input tensor, the function computes inverse hyperbolic cosine of every element.\nInput range is `[1, inf]`. It returns `nan` if the input lies outside the range.\n\n```python\nx = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\ntf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes inverse hyperbolic cosine of x element-wise." + } + }, + { + "name": "Add", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `string`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns x + y element-wise." + } + }, + { + "name": "AddManySparseToTensorsMap", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "", + "description": "The container name for the `SparseTensorsMap` created by this op.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation's unique name is used.", + "name": "shared_name", + "type": "string" + } + ], + "description": "A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,\n`sparse_values`, and `sparse_shape`, where\n\n```sparse_indices.shape[1] == sparse_shape.shape[0] == R```\n\nAn `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`\nhaving a first `sparse_indices` column taking values between `[0, N)`, where\nthe minibatch size `N == sparse_shape[0]`.\n\nThe input `SparseTensor` must have rank `R` greater than 1, and the first\ndimension is treated as the minibatch dimension. Elements of the `SparseTensor`\nmust be sorted in increasing order of this first dimension. The stored\n`SparseTensor` objects pointed to by each row of the output `sparse_handles`\nwill have rank `R-1`.\n\nThe `SparseTensor` values can then be read out as part of a minibatch by passing\nthe given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure\nthe correct `SparseTensorsMap` is accessed, ensure that the same\n`container` and `shared_name` are passed to that Op. If no `shared_name`\nis provided here, instead use the *name* of the Operation created by calling\n`AddManySparseToTensorsMap` as the `shared_name` passed to\n`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.", + "inputs": [ + { + "description": "2-D. The `indices` of the minibatch `SparseTensor`.\n`sparse_indices[:, 0]` must be ordered values in `[0, N)`.", + "name": "sparse_indices", + "type": 9 + }, + { + "description": "1-D. The `values` of the minibatch `SparseTensor`.", + "name": "sparse_values", + "typeAttr": "T" + }, + { + "description": "1-D. The `shape` of the minibatch `SparseTensor`.\nThe minibatch size `N == sparse_shape[0]`.", + "name": "sparse_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "1-D. The handles of the `SparseTensor` now stored in the\n`SparseTensorsMap`. Shape: `[N]`.", + "name": "sparse_handles", + "type": 9 + } + ], + "summary": "Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles." + } + }, + { + "name": "AddN", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`, `variant`.", + "name": "T", + "type": "type" + } + ], + "description": " Inputs must be of same size and shape.\n\n ```python\n x = [9, 7, 10]\n tf.math.add_n(x) ==> 26\n ```", + "inputs": [ + { + "name": "inputs", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "sum", + "typeAttr": "T" + } + ], + "summary": "Add all input tensors element wise." + } + }, + { + "name": "AddSparseToTensorsMap", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "", + "description": "The container name for the `SparseTensorsMap` created by this op.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "The shared name for the `SparseTensorsMap` created by this op.\nIf blank, the new Operation's unique name is used.", + "name": "shared_name", + "type": "string" + } + ], + "description": "A `SparseTensor` is represented by three tensors: `sparse_indices`,\n`sparse_values`, and `sparse_shape`.\n\nThis operator takes the given `SparseTensor` and adds it to a container\nobject (a `SparseTensorsMap`). A unique key within this container is generated\nin the form of an `int64`, and this is the value that is returned.\n\nThe `SparseTensor` can then be read out as part of a minibatch by passing\nthe key as a vector element to `TakeManySparseFromTensorsMap`. To ensure\nthe correct `SparseTensorsMap` is accessed, ensure that the same\n`container` and `shared_name` are passed to that Op. If no `shared_name`\nis provided here, instead use the *name* of the Operation created by calling\n`AddSparseToTensorsMap` as the `shared_name` passed to\n`TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.", + "inputs": [ + { + "description": "2-D. The `indices` of the `SparseTensor`.", + "name": "sparse_indices", + "type": 9 + }, + { + "description": "1-D. The `values` of the `SparseTensor`.", + "name": "sparse_values", + "typeAttr": "T" + }, + { + "description": "1-D. The `shape` of the `SparseTensor`.", + "name": "sparse_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "0-D. The handle of the `SparseTensor` now stored in the\n`SparseTensorsMap`.", + "name": "sparse_handle", + "type": 9 + } + ], + "summary": "Add a `SparseTensor` to a `SparseTensorsMap` return its handle." + } + }, + { + "name": "AddV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns x + y element-wise." + } + }, + { + "name": "AdjustContrast", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `uint8`, `int8`, `int16`, `int32`, `int64`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "images", + "typeAttr": "T" + }, + { + "name": "contrast_factor", + "type": 1 + }, + { + "name": "min_value", + "type": 1 + }, + { + "name": "max_value", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "type": 1 + } + ], + "summary": "Deprecated. Disallowed in GraphDef version >= 2." + } + }, + { + "name": "AdjustContrastv2", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "`images` is a tensor of at least 3 dimensions. The last 3 dimensions are\ninterpreted as `[height, width, channels]`. The other dimensions only\nrepresent a collection of images, such as `[batch, height, width, channels].`\n\nContrast is adjusted independently for each channel of each image.\n\nFor each channel, the Op first computes the mean of the image pixels in the\nchannel and then adjusts each component of each pixel to\n`(x - mean) * contrast_factor + mean`.", + "inputs": [ + { + "description": "Images to adjust. At least 3-D.", + "name": "images", + "typeAttr": "T" + }, + { + "description": "A float multiplier for adjusting contrast.", + "name": "contrast_factor", + "type": 1 + } + ], + "outputs": [ + { + "description": "The contrast-adjusted image or images.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Adjust the contrast of one or more images." + } + }, + { + "name": "AdjustHue", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "`images` is a tensor of at least 3 dimensions. The last dimension is\ninterpreted as channels, and must be three.\n\nThe input image is considered in the RGB colorspace. Conceptually, the RGB\ncolors are first mapped into HSV. A delta is then applied all the hue values,\nand then remapped back to RGB colorspace.", + "inputs": [ + { + "description": "Images to adjust. At least 3-D.", + "name": "images", + "typeAttr": "T" + }, + { + "description": "A float delta to add to the hue.", + "name": "delta", + "type": 1 + } + ], + "outputs": [ + { + "description": "The hue-adjusted image or images.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Adjust the hue of one or more images." + } + }, + { + "name": "AdjustSaturation", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "`images` is a tensor of at least 3 dimensions. The last dimension is\ninterpreted as channels, and must be three.\n\nThe input image is considered in the RGB colorspace. Conceptually, the RGB\ncolors are first mapped into HSV. A scale is then applied all the saturation\nvalues, and then remapped back to RGB colorspace.", + "inputs": [ + { + "description": "Images to adjust. At least 3-D.", + "name": "images", + "typeAttr": "T" + }, + { + "description": "A float scale to add to the saturation.", + "name": "scale", + "type": 1 + } + ], + "outputs": [ + { + "description": "The hue-adjusted image or images.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Adjust the saturation of one or more images." + } + }, + { + "name": "All", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "inputs": [ + { + "description": "The tensor to reduce.", + "name": "input", + "type": 10 + }, + { + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "name": "reduction_indices", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "The reduced tensor.", + "name": "output", + "type": 10 + } + ], + "summary": "Computes the \"logical and\" of elements across dimensions of a tensor." + } + }, + { + "name": "AllCandidateSampler", + "schema": { + "attributes": [ + { + "description": "Number of true labels per context.", + "minimum": 1, + "name": "num_true", + "type": "int64" + }, + { + "description": "Number of candidates to produce.", + "minimum": 1, + "name": "num_sampled", + "type": "int64" + }, + { + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities.", + "name": "unique", + "type": "boolean" + }, + { + "default": 0, + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "An second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + } + ], + "description": "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "inputs": [ + { + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "name": "true_classes", + "type": 9 + } + ], + "outputs": [ + { + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "name": "sampled_candidates", + "type": 9 + }, + { + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "name": "true_expected_count", + "type": 1 + }, + { + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "name": "sampled_expected_count", + "type": 1 + } + ], + "summary": "Generates labels for candidate sampling with a learned unigram distribution." + } + }, + { + "name": "AllToAll", + "schema": { + "attributes": [ + { + "description": "The type of elements to be exchanged. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`, `bool`.", + "name": "T", + "type": "type" + }, + { + "description": "The dimension number to concatenate.", + "name": "concat_dimension", + "type": "int64" + }, + { + "description": "The dimension number to split.", + "name": "split_dimension", + "type": "int64" + }, + { + "description": "The number of splits, this number must equal to the sub-group\nsize(group_assignment.get_shape()[1])", + "name": "split_count", + "type": "int64" + } + ], + "description": "On each replica, the input is split into `split_count` blocks along\n`split_dimension` and send to the other replicas given group_assignment. After\nreceiving `split_count` - 1 blocks from other replicas, we concatenate the\nblocks along `concat_dimension` as the output.\n\nFor example, suppose there are 2 TPU replicas:\nreplica 0 receives input: `[[A, B]]`\nreplica 1 receives input: `[[C, D]]`\n\ngroup_assignment=`[[0, 1]]`\nconcat_dimension=0\nsplit_dimension=1\nsplit_count=2\n\nreplica 0's output: `[[A], [C]]`\nreplica 1's output: `[[B], [D]]`", + "inputs": [ + { + "description": "The local input to the sum.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "An int32 tensor with shape\n[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the\nreplica ids in the ith subgroup.", + "name": "group_assignment", + "type": 3 + } + ], + "outputs": [ + { + "description": "The exchanged result.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "An Op to exchange data across TPU replicas." + } + }, + { + "name": "Angle", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "Tout", + "type": "type" + } + ], + "description": "Given a tensor `input` of complex numbers, this operation returns a tensor of\ntype `float` that is the argument of each element in `input`. All elements in\n`input` must be complex numbers of the form \\\\(a + bj\\\\), where *a*\nis the real part and *b* is the imaginary part.\n\nThe argument returned by this operation is of the form \\\\(atan2(b, a)\\\\).\n\nFor example:\n\n```\n# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\ntf.angle(input) ==> [2.0132, 1.056]\n```\n\n@compatibility(numpy)\nEquivalent to np.angle.\n@end_compatibility", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tout" + } + ], + "summary": "Returns the argument of a complex number." + } + }, + { + "name": "AnonymousIterator", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "outputs": [ + { + "description": "A handle to the iterator that can be passed to a \"MakeIterator\" or\n\"IteratorGetNext\" op. In contrast to Iterator, AnonymousIterator prevents\nresource sharing by name, and does not keep a reference to the resource\ncontainer.", + "name": "handle", + "type": 20 + } + ], + "summary": "A container for an iterator resource." + } + }, + { + "name": "AnonymousIteratorV2", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "outputs": [ + { + "description": "A handle to the iterator that can be passed to a \"MakeIterator\" or\n\"IteratorGetNext\" op. In contrast to Iterator, AnonymousIterator prevents\nresource sharing by name, and does not keep a reference to the resource\ncontainer.", + "name": "handle", + "type": 20 + }, + { + "description": "A variant deleter that should be passed into the op that deletes the iterator.", + "name": "deleter", + "type": 21 + } + ], + "summary": "A container for an iterator resource." + } + }, + { + "name": "AnonymousMemoryCache", + "schema": { + "outputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + } + }, + { + "name": "AnonymousMultiDeviceIterator", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "devices", + "type": "string[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "outputs": [ + { + "description": "A handle to a multi device iterator that can be passed to a\n\"MultiDeviceIteratorGetNextFromShard\" op. In contrast to MultiDeviceIterator,\nAnonymousIterator prevents resource sharing by name, and does not keep a\nreference to the resource container.", + "name": "handle", + "type": 20 + }, + { + "description": "A variant deleter that should be passed into the op that deletes the iterator.", + "name": "deleter", + "type": 21 + } + ], + "summary": "A container for a multi device iterator resource." + } + }, + { + "name": "AnonymousRandomSeedGenerator", + "schema": { + "inputs": [ + { + "name": "seed", + "type": 9 + }, + { + "name": "seed2", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + } + }, + { + "name": "AnonymousSeedGenerator", + "schema": { + "inputs": [ + { + "name": "seed", + "type": 9 + }, + { + "name": "seed2", + "type": 9 + }, + { + "name": "reshuffle", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + } + }, + { + "name": "Any", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "inputs": [ + { + "description": "The tensor to reduce.", + "name": "input", + "type": 10 + }, + { + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "name": "reduction_indices", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "The reduced tensor.", + "name": "output", + "type": 10 + } + ], + "summary": "Computes the \"logical or\" of elements across dimensions of a tensor." + } + }, + { + "name": "ApplyAdaMax", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nv_t <- max(beta2 * v_{t-1}, abs(g))\nvariable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "m", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "v", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "beta1_power", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Momentum factor. Must be a scalar.", + "name": "beta1", + "typeAttr": "T" + }, + { + "description": "Momentum factor. Must be a scalar.", + "name": "beta2", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the AdaMax algorithm." + } + }, + { + "name": "ApplyAdadelta", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "accum = rho() * accum + (1 - rho()) * grad.square();\nupdate = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;\nupdate_accum = rho() * update_accum + (1 - rho()) * update.square();\nvar -= update;", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum_update", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay factor. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "description": "Constant factor. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the adadelta scheme." + } + }, + { + "name": "ApplyAdagrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": true, + "name": "update_slots", + "type": "boolean" + } + ], + "description": "accum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the adagrad scheme." + } + }, + { + "name": "ApplyAdagradDA", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "gradient_accumulator", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "gradient_squared_accumulator", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "Training step number. Must be a scalar.", + "name": "global_step", + "type": 9 + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the proximal adagrad scheme." + } + }, + { + "name": "ApplyAdagradV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": true, + "name": "update_slots", + "type": "boolean" + } + ], + "description": "accum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Constant factor. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the adagrad scheme." + } + }, + { + "name": "ApplyAdam", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "description": "If `True`, uses the nesterov update.", + "name": "use_nesterov", + "type": "boolean" + } + ], + "description": "$$lr_t := \\text{learning\\_rate} * \\sqrt{1 - beta_2^t} / (1 - beta_1^t)$$\n$$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$\n$$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$\n$$variable := variable - lr_t * m_t / (\\sqrt{v_t} + \\epsilon)$$", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "m", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "v", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "beta1_power", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "beta2_power", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Momentum factor. Must be a scalar.", + "name": "beta1", + "typeAttr": "T" + }, + { + "description": "Momentum factor. Must be a scalar.", + "name": "beta2", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the Adam algorithm." + } + }, + { + "name": "ApplyAddSign", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and m tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nupdate <- (alpha + sign_decay * sign(g) *sign(m)) * g\nvariable <- variable - lr_t * update", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "m", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "alpha", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "sign_decay", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "beta", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the AddSign update." + } + }, + { + "name": "ApplyCenteredRMSProp", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\n\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nmg <- rho * mg_{t-1} + (1-rho) * grad\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)\nvar <- var - mom", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "mg", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "ms", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "mom", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay rate. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the centered RMSProp algorithm." + } + }, + { + "name": "ApplyFtrl", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "name": "multiply_linear_by_lr", + "type": "boolean" + } + ], + "description": "accum_new = accum + grad * grad\nlinear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "linear", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr_power", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the Ftrl-proximal scheme." + } + }, + { + "name": "ApplyFtrlV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "name": "multiply_linear_by_lr", + "type": "boolean" + } + ], + "description": "grad_with_shrinkage = grad + 2 * l2_shrinkage * var\naccum_new = accum + grad * grad\nlinear += grad_with_shrinkage -\n (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "linear", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 shrinkage regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "name": "l2_shrinkage", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr_power", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the Ftrl-proximal scheme." + } + }, + { + "name": "ApplyGradientDescent", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "alpha", + "typeAttr": "T" + }, + { + "description": "The change.", + "name": "delta", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' by subtracting 'alpha' * 'delta' from it." + } + }, + { + "name": "ApplyMomentum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "description": "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum.", + "name": "use_nesterov", + "type": "boolean" + } + ], + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\naccum = accum * momentum + grad\nvar -= lr * accum", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "Momentum. Must be a scalar.", + "name": "momentum", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the momentum scheme." + } + }, + { + "name": "ApplyPowerSign", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and m tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nupdate <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g\nvariable <- variable - lr_t * update", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "m", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "logbase", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "sign_decay", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "beta", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the AddSign update." + } + }, + { + "name": "ApplyProximalAdagrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "accum += grad * grad\nprox_v = var - lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' and '*accum' according to FOBOS with Adagrad learning rate." + } + }, + { + "name": "ApplyProximalGradientDescent", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "prox_v = var - alpha * delta\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "alpha", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "The change.", + "name": "delta", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' as FOBOS algorithm with fixed learning rate." + } + }, + { + "name": "ApplyRMSProp", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "ms", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "mom", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay rate. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the RMSProp algorithm." + } + }, + { + "name": "ApproximateEqual", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": 9.999999747378752e-06, + "name": "tolerance", + "type": "float32" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ], + "summary": "Returns the truth value of abs(x-y) < tolerance element-wise." + } + }, + { + "name": "ArgMax", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`, `bool`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "output_type", + "type": "type" + } + ], + "description": "Note that in case of ties the identity of the return value is not guaranteed.\n\nUsage:\n ```python\n import tensorflow as tf\n a = [1, 10, 26.9, 2.8, 166.32, 62.3]\n b = tf.math.argmax(input = a)\n c = tf.keras.backend.eval(b)\n # c = 4\n # here a[4] = 166.32 which is the largest element of a across axis 0\n ```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "description": "int32 or int64, must be in the range `[-rank(input), rank(input))`.\nDescribes which dimension of the input Tensor to reduce across. For vectors,\nuse dimension = 0.", + "name": "dimension", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "output_type" + } + ], + "summary": "Returns the index with the largest value across dimensions of a tensor." + } + }, + { + "name": "ArgMin", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`, `bool`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "output_type", + "type": "type" + } + ], + "description": "Note that in case of ties the identity of the return value is not guaranteed.\n\nUsage:\n ```python\n import tensorflow as tf\n a = [1, 10, 26.9, 2.8, 166.32, 62.3]\n b = tf.math.argmin(input = a)\n c = tf.keras.backend.eval(b)\n # c = 0\n # here a[0] = 1 which is the smallest element of a across axis 0\n ```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "description": "int32 or int64, must be in the range `[-rank(input), rank(input))`.\nDescribes which dimension of the input Tensor to reduce across. For vectors,\nuse dimension = 0.", + "name": "dimension", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "output_type" + } + ], + "summary": "Returns the index with the smallest value across dimensions of a tensor." + } + }, + { + "name": "AsString", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `float32`, `float64`, `bool`.", + "name": "T", + "type": "type" + }, + { + "default": -1, + "description": "The post-decimal precision to use for floating point numbers.\nOnly used if precision > -1.", + "name": "precision", + "type": "int64" + }, + { + "default": false, + "description": "Use scientific notation for floating point numbers.", + "name": "scientific", + "type": "boolean" + }, + { + "default": false, + "description": "Use shortest representation (either scientific or standard) for\nfloating point numbers.", + "name": "shortest", + "type": "boolean" + }, + { + "default": -1, + "description": "Pad pre-decimal numbers to this width.\nApplies to both floating point and integer numbers.\nOnly used if width > -1.", + "name": "width", + "type": "int64" + }, + { + "default": "", + "description": "The value to pad if width > -1. If empty, pads with spaces.\nAnother typical value is '0'. String cannot be longer than 1 character.", + "name": "fill", + "type": "string" + } + ], + "description": "Supports many numeric types and boolean.\n\nFor Unicode, see the\n[https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text)\ntutorial.\n\nExamples:\n\n>>> tf.strings.as_string([3, 2])\n\n>>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy()\narray([b'3.14', b'2.72'], dtype=object)", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": 7 + } + ], + "summary": "Converts each entry in the given tensor to strings." + } + }, + { + "name": "Asin", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that\nif `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`.\n\n**Note**: The output of `tf.math.asin` will lie within the invertible range\nof sine, i.e [-pi/2, pi/2].\n\nFor example:\n\n```python\n# Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]\nx = tf.constant([1.047, 0.785])\ny = tf.math.sin(x) # [0.8659266, 0.7068252]\n\ntf.math.asin(y) # [1.047, 0.785] = x\n```\n", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes the trignometric inverse sine of x element-wise." + } + }, + { + "name": "Asinh", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": " Given an input tensor, this function computes inverse hyperbolic sine\n for every element in the tensor. Both input and output has a range of\n `[-inf, inf]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -2, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\n tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf]\n ```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes inverse hyperbolic sine of x element-wise." + } + }, + { + "name": "Assert", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "T", + "type": "type[]" + }, + { + "default": 3, + "description": "Print this many entries of each tensor.", + "name": "summarize", + "type": "int64" + } + ], + "description": "If `condition` evaluates to false, print the list of tensors in `data`.\n`summarize` determines how many entries of the tensors to print.", + "inputs": [ + { + "description": "The condition to evaluate.", + "name": "condition", + "type": 10 + }, + { + "description": "The tensors to print out when condition is false.", + "name": "data", + "typeListAttr": "T" + } + ], + "summary": "Asserts that the given condition is true." + } + }, + { + "name": "AssertCardinalityDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "cardinality", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "AssertNextDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "This transformation checks whether the camel-case names (i.e. \"FlatMap\", not\n\"flat_map\") of the transformations following this transformation match the list\nof names in the `transformations` argument. If there is a mismatch, the\ntransformation raises an exception.\n\nThe check occurs when iterating over the contents of the dataset, which\nmeans that the check happens *after* any static optimizations are applied\nto the dataset graph.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.\n`AssertNextDataset` passes through the outputs of its input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A `tf.string` vector `tf.Tensor` identifying the transformations that are\nexpected to happen next.", + "name": "transformations", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "A transformation that asserts which transformations happen next." + } + }, + { + "name": "Assign", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": true, + "description": "If true, the operation will validate that the shape\nof 'value' matches the shape of the Tensor being assigned to. If false,\n'ref' will take on the shape of 'value'.", + "name": "validate_shape", + "type": "boolean" + }, + { + "default": true, + "description": "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "category": "Control", + "description": "This operation outputs \"ref\" after the assignment is done.\nThis makes it easier to chain operations that need to use the reset value.", + "inputs": [ + { + "description": "Should be from a `Variable` node. May be uninitialized.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "The value to be assigned to the variable.", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "= Same as \"ref\". Returned as a convenience for operations that want\nto use the new value after the variable has been reset.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Update 'ref' by assigning 'value' to it." + } + }, + { + "name": "AssignAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "This operation outputs \"ref\" after the update is done.\nThis makes it easier to chain operations that need to use the reset value.", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "The value to be added to the variable.", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "= Same as \"ref\". Returned as a convenience for operations that want\nto use the new value after the variable has been updated.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Update 'ref' by adding 'value' to it." + } + }, + { + "name": "AssignAddVariableOp", + "schema": { + "attributes": [ + { + "description": "the dtype of the value.", + "name": "dtype", + "type": "type" + } + ], + "description": "Any ReadVariableOp with a control dependency on this op is guaranteed to\nsee the incremented value or a subsequent newer one.", + "inputs": [ + { + "description": "handle to the resource in which to store the variable.", + "name": "resource", + "type": 20 + }, + { + "description": "the value by which the variable will be incremented.", + "name": "value", + "typeAttr": "dtype" + } + ], + "summary": "Adds a value to the current value of a variable." + } + }, + { + "name": "AssignSub", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "This operation outputs \"ref\" after the update is done.\nThis makes it easier to chain operations that need to use the reset value.", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "The value to be subtracted to the variable.", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "= Same as \"ref\". Returned as a convenience for operations that want\nto use the new value after the variable has been updated.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Update 'ref' by subtracting 'value' from it." + } + }, + { + "name": "AssignSubVariableOp", + "schema": { + "attributes": [ + { + "description": "the dtype of the value.", + "name": "dtype", + "type": "type" + } + ], + "description": "Any ReadVariableOp with a control dependency on this op is guaranteed to\nsee the decremented value or a subsequent newer one.", + "inputs": [ + { + "description": "handle to the resource in which to store the variable.", + "name": "resource", + "type": 20 + }, + { + "description": "the value by which the variable will be incremented.", + "name": "value", + "typeAttr": "dtype" + } + ], + "summary": "Subtracts a value from the current value of a variable." + } + }, + { + "name": "AssignVariableOp", + "schema": { + "attributes": [ + { + "description": "the dtype of the value.", + "name": "dtype", + "type": "type" + } + ], + "description": "Any ReadVariableOp with a control dependency on this op is guaranteed to return\nthis value or a subsequent newer value of the variable.", + "inputs": [ + { + "description": "handle to the resource in which to store the variable.", + "name": "resource", + "type": 20 + }, + { + "description": "the value to set the new tensor to use.", + "name": "value", + "typeAttr": "dtype" + } + ], + "summary": "Assigns a new value to a variable." + } + }, + { + "name": "Atan", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that\nif `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`.\n\n**Note**: The output of `tf.math.atan` will lie within the invertible range\nof tan, i.e (-pi/2, pi/2).\n\nFor example:\n\n```python\n# Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]\nx = tf.constant([1.047, 0.785])\ny = tf.math.tan(x) # [1.731261, 0.99920404]\n\ntf.math.atan(y) # [1.047, 0.785] = x\n```\n", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes the trignometric inverse tangent of x element-wise." + } + }, + { + "name": "Atan2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "This is the angle \\( \\theta \\in [-\\pi, \\pi] \\) such that\n\\[ x = r \\cos(\\theta) \\]\nand\n\\[ y = r \\sin(\\theta) \\]\nwhere \\(r = \\sqrt(x^2 + y^2) \\).", + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Computes arctangent of `y/x` element-wise, respecting signs of the arguments." + } + }, + { + "name": "Atanh", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": " Given an input tensor, this function computes inverse hyperbolic tangent\n for every element in the tensor. Input range is `[-1,1]` and output range is\n `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the\n input is `1`, output will be `inf`. Values outside the range will have\n `nan` as output.\n\n ```python\n x = tf.constant([-float(\"inf\"), -1, -0.5, 1, 0, 0.5, 10, float(\"inf\")])\n tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan]\n ```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes inverse hyperbolic tangent of x element-wise." + } + }, + { + "name": "AudioSpectrogram", + "schema": { + "attributes": [ + { + "description": "How wide the input window is in samples. For the highest efficiency\nthis should be a power of two, but other values are accepted.", + "name": "window_size", + "type": "int64" + }, + { + "description": "How widely apart the center of adjacent sample windows should be.", + "name": "stride", + "type": "int64" + }, + { + "default": false, + "description": "Whether to return the squared magnitude or just the\nmagnitude. Using squared magnitude can avoid extra calculations.", + "name": "magnitude_squared", + "type": "boolean" + } + ], + "description": "Spectrograms are a standard way of representing audio information as a series of\nslices of frequency information, one slice for each window of time. By joining\nthese together into a sequence, they form a distinctive fingerprint of the sound\nover time.\n\nThis op expects to receive audio data as an input, stored as floats in the range\n-1 to 1, together with a window width in samples, and a stride specifying how\nfar to move the window between slices. From this it generates a three\ndimensional output. The first dimension is for the channels in the input, so a\nstereo audio input would have two here for example. The second dimension is time,\nwith successive frequency slices. The third dimension has an amplitude value for\neach frequency during that time slice.\n\nThis means the layout when converted and saved as an image is rotated 90 degrees\nclockwise from a typical spectrogram. Time is descending down the Y axis, and\nthe frequency decreases from left to right.\n\nEach value in the result represents the square root of the sum of the real and\nimaginary parts of an FFT on the current window of samples. In this way, the\nlowest dimension represents the power of each frequency in the current window,\nand adjacent windows are concatenated in the next dimension.\n\nTo get a more intuitive and visual look at what this operation does, you can run\ntensorflow/examples/wav_to_spectrogram to read in an audio file and save out the\nresulting spectrogram as a PNG image.", + "inputs": [ + { + "description": "Float representation of audio data.", + "name": "input", + "type": 1 + } + ], + "outputs": [ + { + "description": "3D representation of the audio frequencies as an image.", + "name": "spectrogram", + "type": 1 + } + ], + "summary": "Produces a visualization of audio data over time." + } + }, + { + "name": "AudioSummary", + "schema": { + "attributes": [ + { + "description": "The sample rate of the signal in hertz.", + "name": "sample_rate", + "type": "float32" + }, + { + "default": 3, + "description": "Max number of batch elements to generate audio for.", + "minimum": 1, + "name": "max_outputs", + "type": "int64" + } + ], + "description": "The summary has up to `max_outputs` summary values containing audio. The\naudio is built from `tensor` which must be 3-D with shape `[batch_size,\nframes, channels]` or 2-D with shape `[batch_size, frames]`. The values are\nassumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.\n\nThe `tag` argument is a scalar `Tensor` of type `string`. It is used to\nbuild the `tag` of the summary values:\n\n* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.\n* If `max_outputs` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.", + "inputs": [ + { + "description": "Scalar. Used to build the `tag` attribute of the summary values.", + "name": "tag", + "type": 7 + }, + { + "description": "2-D of shape `[batch_size, frames]`.", + "name": "tensor", + "type": 1 + } + ], + "outputs": [ + { + "description": "Scalar. Serialized `Summary` protocol buffer.", + "name": "summary", + "type": 7 + } + ], + "summary": "Outputs a `Summary` protocol buffer with audio." + } + }, + { + "name": "AudioSummaryV2", + "schema": { + "attributes": [ + { + "default": 3, + "description": "Max number of batch elements to generate audio for.", + "minimum": 1, + "name": "max_outputs", + "type": "int64" + } + ], + "description": "The summary has up to `max_outputs` summary values containing audio. The\naudio is built from `tensor` which must be 3-D with shape `[batch_size,\nframes, channels]` or 2-D with shape `[batch_size, frames]`. The values are\nassumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.\n\nThe `tag` argument is a scalar `Tensor` of type `string`. It is used to\nbuild the `tag` of the summary values:\n\n* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.\n* If `max_outputs` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.", + "inputs": [ + { + "description": "Scalar. Used to build the `tag` attribute of the summary values.", + "name": "tag", + "type": 7 + }, + { + "description": "2-D of shape `[batch_size, frames]`.", + "name": "tensor", + "type": 1 + }, + { + "description": "The sample rate of the signal in hertz.", + "name": "sample_rate", + "type": 1 + } + ], + "outputs": [ + { + "description": "Scalar. Serialized `Summary` protocol buffer.", + "name": "summary", + "type": 7 + } + ], + "summary": "Outputs a `Summary` protocol buffer with audio." + } + }, + { + "name": "AutoShardDataset", + "schema": { + "attributes": [ + { + "default": 0, + "name": "auto_shard_policy", + "type": "int64" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "Creates a dataset that shards the input dataset by num_workers, returning a\nsharded dataset for the index-th worker. This attempts to automatically shard\na dataset by examining the Dataset graph and inserting a shard op before the\ninputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).\n\nThis dataset will throw a NotFound error if we cannot shard the dataset\nautomatically.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of workers to distribute this dataset across.", + "name": "num_workers", + "type": 9 + }, + { + "description": "A scalar representing the index of the current worker out of num_workers.", + "name": "index", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that shards the input dataset." + } + }, + { + "name": "AvgPool", + "schema": { + "attributes": [ + { + "description": "The size of the sliding window for each dimension of `value`.", + "minimum": 4, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "The stride of the sliding window for each dimension of `value`.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "category": "Pool", + "description": "Each entry in `output` is the mean of the corresponding size `ksize`\nwindow in `value`.", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The average pooled output tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Performs average pooling on the input." + } + }, + { + "name": "AvgPool3D", + "schema": { + "attributes": [ + { + "description": "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`.", + "minimum": 5, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NDHWC", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "name": "data_format", + "type": "string" + }, + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "Shape `[batch, depth, rows, cols, channels]` tensor to pool over.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The average pooled output tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Performs 3D average pooling on the input." + } + }, + { + "name": "AvgPool3DGrad", + "schema": { + "attributes": [ + { + "description": "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`.", + "minimum": 5, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NDHWC", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "name": "data_format", + "type": "string" + }, + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The original input dimensions.", + "name": "orig_input_shape", + "type": 3 + }, + { + "description": "Output backprop of shape `[batch, depth, rows, cols, channels]`.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The backprop for input.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes gradients of average pooling function." + } + }, + { + "name": "AvgPoolGrad", + "schema": { + "attributes": [ + { + "description": "The size of the sliding window for each dimension of the input.", + "minimum": 4, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "The stride of the sliding window for each dimension of the input.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "1-D. Shape of the original input to `avg_pool`.", + "name": "orig_input_shape", + "type": 3 + }, + { + "description": "4-D with shape `[batch, height, width, channels]`. Gradients w.r.t.\nthe output of `avg_pool`.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "4-D. Gradients w.r.t. the input of `avg_pool`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes gradients of the average pooling function." + } + }, + { + "name": "Barrier", + "schema": { + "attributes": [ + { + "description": "The type of each component in a value.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": [], + "description": "The shape of each component in a value. Each shape must be 1 in the\nfirst dimension. The length of this attr must be the same as the length of\ncomponent_types.", + "minimum": 0, + "name": "shapes", + "type": "shape[]" + }, + { + "default": -1, + "description": "The capacity of the barrier. The default capacity is MAX_INT32,\nwhich is the largest capacity of the underlying queue.", + "name": "capacity", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this barrier is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this barrier will be shared under the given name\nacross multiple sessions.", + "name": "shared_name", + "type": "string" + } + ], + "description": "A barrier represents a key-value map, where each key is a string, and\neach value is a tuple of tensors.\n\nAt runtime, the barrier contains 'complete' and 'incomplete'\nelements. A complete element has defined tensors for all components of\nits value tuple, and may be accessed using BarrierTakeMany. An\nincomplete element has some undefined components in its value tuple,\nand may be updated using BarrierInsertMany.", + "outputs": [ + { + "description": "The handle to the barrier.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "Defines a barrier that persists across different graph executions." + } + }, + { + "name": "BarrierClose", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, all pending enqueue requests that are\nblocked on the barrier's queue will be canceled. InsertMany will fail, even\nif no new key is introduced.", + "name": "cancel_pending_enqueues", + "type": "boolean" + } + ], + "description": "This operation signals that no more new elements will be inserted in the\ngiven barrier. Subsequent InsertMany that try to introduce a new key will fail.\nSubsequent InsertMany operations that just add missing components to already\nexisting elements will continue to succeed. Subsequent TakeMany operations will\ncontinue to succeed if sufficient completed elements remain in the barrier.\nSubsequent TakeMany operations that would block will fail immediately.", + "inputs": [ + { + "description": "The handle to a barrier.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "Closes the given barrier." + } + }, + { + "name": "BarrierIncompleteSize", + "schema": { + "inputs": [ + { + "description": "The handle to a barrier.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "outputs": [ + { + "description": "The number of incomplete elements (i.e. those with some of their value\ncomponents not set) in the barrier.", + "name": "size", + "type": 3 + } + ], + "summary": "Computes the number of incomplete elements in the given barrier." + } + }, + { + "name": "BarrierInsertMany", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "The component of the barrier elements that is being assigned.", + "name": "component_index", + "type": "int64" + } + ], + "description": "If a key is not found in the barrier, this operation will create a new\nincomplete element. If a key is found in the barrier, and the element\nalready has a value at component_index, this operation will fail with\nINVALID_ARGUMENT, and leave the barrier in an undefined state.", + "inputs": [ + { + "description": "The handle to a barrier.", + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "description": "A one-dimensional tensor of keys, with length n.", + "name": "keys", + "type": 7 + }, + { + "description": "An any-dimensional tensor of values, which are associated with the\nrespective keys. The 0th dimension must have length n.", + "name": "values", + "typeAttr": "T" + } + ], + "summary": "For each key, assigns the respective value to the specified component." + } + }, + { + "name": "BarrierReadySize", + "schema": { + "inputs": [ + { + "description": "The handle to a barrier.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "outputs": [ + { + "description": "The number of complete elements (i.e. those with all of their value\ncomponents set) in the barrier.", + "name": "size", + "type": 3 + } + ], + "summary": "Computes the number of complete elements in the given barrier." + } + }, + { + "name": "BarrierTakeMany", + "schema": { + "attributes": [ + { + "description": "The type of each component in a value.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": false, + "description": "Allow to return less than num_elements items if barrier is\nalready closed.", + "name": "allow_small_batch", + "type": "boolean" + }, + { + "default": false, + "name": "wait_for_incomplete", + "type": "boolean" + }, + { + "default": -1, + "description": "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet.", + "name": "timeout_ms", + "type": "int64" + } + ], + "description": "This operation concatenates completed-element component tensors along\nthe 0th dimension to make a single component tensor.\n\nElements come out of the barrier when they are complete, and in the order\nin which they were placed into the barrier. The indices output provides\ninformation about the batch in which each element was originally inserted\ninto the barrier.", + "inputs": [ + { + "description": "The handle to a barrier.", + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "description": "A single-element tensor containing the number of elements to\ntake.", + "name": "num_elements", + "type": 3 + } + ], + "outputs": [ + { + "description": "A one-dimensional tensor of indices, with length num_elems.\nThese indices refer to the batch in which the values were placed into the\nbarrier (starting with MIN_LONG and increasing with each BarrierInsertMany).", + "name": "indices", + "type": 9 + }, + { + "description": "A one-dimensional tensor of keys, with length num_elements.", + "name": "keys", + "type": 7 + }, + { + "description": "One any-dimensional tensor per component in a barrier element. All\nvalues have length num_elements in the 0th dimension.", + "name": "values", + "typeListAttr": "component_types" + } + ], + "summary": "Takes the given number of completed elements from a barrier." + } + }, + { + "name": "Batch", + "schema": { + "attributes": [ + { + "name": "num_batch_threads", + "type": "int64" + }, + { + "name": "max_batch_size", + "type": "int64" + }, + { + "default": 10, + "name": "max_enqueued_batches", + "type": "int64" + }, + { + "name": "batch_timeout_micros", + "type": "int64" + }, + { + "default": [], + "name": "allowed_batch_sizes", + "type": "int64[]" + }, + { + "name": "grad_timeout_micros", + "type": "int64" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + }, + { + "default": "", + "name": "batching_queue", + "type": "string" + }, + { + "minimum": 1, + "name": "T", + "type": "type[]" + } + ], + "description": "When many instances of this Op are being run concurrently with the same\ncontainer/shared_name in the same device, some will output zero-shaped Tensors\nand others will output Tensors of size up to max_batch_size.\n\nAll Tensors in in_tensors are batched together (so, for example, labels and\nfeatures should be batched with a single instance of this operation.\n\nEach invocation of batch emits an `id` scalar which will be used to identify\nthis particular invocation when doing unbatch or its gradient.\n\nEach op which emits a non-empty batch will also emit a non-empty batch_index\nTensor, which, is a [K, 3] matrix where each row contains the invocation's id,\nstart, and length of elements of each set of Tensors present in batched_tensors.\n\nBatched tensors are concatenated along the first dimension, and all tensors in\nin_tensors must have the first dimension of the same size.\n\nin_tensors: The tensors to be batched.\nnum_batch_threads: Number of scheduling threads for processing batches of work.\n Determines the number of batches processed in parallel.\nmax_batch_size: Batch sizes will never be bigger than this.\nbatch_timeout_micros: Maximum number of microseconds to wait before outputting\n an incomplete batch.\nallowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does\n nothing. Otherwise, supplies a list of batch sizes, causing the op to pad\n batches up to one of those sizes. The entries must increase monotonically, and\n the final entry must equal max_batch_size.\ngrad_timeout_micros: The timeout to use for the gradient. See Unbatch.\nbatched_tensors: Either empty tensors or a batch of concatenated Tensors.\nbatch_index: If out_tensors is non-empty, has information to invert it.\ncontainer: Controls the scope of sharing of this batch.\nid: always contains a scalar with a unique ID for this invocation of Batch.\nshared_name: Concurrently running instances of batch in the same device with the\n same container and shared_name will batch their elements together. If left\n empty, the op name will be used as the shared name.\nT: the types of tensors to be batched.", + "inputs": [ + { + "name": "in_tensors", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "name": "batched_tensors", + "typeListAttr": "T" + }, + { + "name": "batch_index", + "type": 9 + }, + { + "name": "id", + "type": 9 + } + ], + "summary": "Batches all input tensors nondeterministically." + } + }, + { + "name": "BatchCholesky", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float64`, `float32`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchCholeskyGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "l", + "typeAttr": "T" + }, + { + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of elements to accumulate in a\nbatch.", + "name": "batch_size", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that batches `batch_size` elements from `input_dataset`." + } + }, + { + "name": "BatchDatasetV2", + "schema": { + "attributes": [ + { + "default": false, + "name": "parallel_copy", + "type": "boolean" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of elements to accumulate in a batch.", + "name": "batch_size", + "type": 9 + }, + { + "description": "A scalar representing whether the last batch should be dropped in case its size\nis smaller than desired.", + "name": "drop_remainder", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that batches `batch_size` elements from `input_dataset`." + } + }, + { + "name": "BatchFFT", + "schema": { + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + } + }, + { + "name": "BatchFFT2D", + "schema": { + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + } + }, + { + "name": "BatchFFT3D", + "schema": { + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + } + }, + { + "name": "BatchFunction", + "schema": { + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "description": "Number of scheduling threads for processing batches of work.\nDetermines the number of batches processed in parallel.", + "name": "num_batch_threads", + "type": "int64" + }, + { + "description": "Batch sizes will never be bigger than this.", + "name": "max_batch_size", + "type": "int64" + }, + { + "description": "Maximum number of microseconds to wait before outputting\nan incomplete batch.", + "name": "batch_timeout_micros", + "type": "int64" + }, + { + "default": 10, + "description": "Maximum number of batches enqueued. Default: 10.", + "name": "max_enqueued_batches", + "type": "int64" + }, + { + "default": [], + "description": "Optional list of allowed batch sizes. If left empty, does\nnothing. Otherwise, supplies a list of batch sizes, causing the op to pad\nbatches up to one of those sizes. The entries must increase monotonically, and\nthe final entry must equal max_batch_size.", + "name": "allowed_batch_sizes", + "type": "int64[]" + }, + { + "default": "", + "description": "Controls the scope of sharing of this batch.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "Concurrently running instances of batch in the same device with the\nsame container and shared_name will batch their elements together. If left\nempty, the op name will be used as the shared name.", + "name": "shared_name", + "type": "string" + }, + { + "default": "", + "name": "batching_queue", + "type": "string" + }, + { + "description": "the types of tensors to be batched.", + "minimum": 1, + "name": "Tin", + "type": "type[]" + }, + { + "description": "the types of the captured tensors.", + "minimum": 0, + "name": "Tcaptured", + "type": "type[]" + }, + { + "description": "the types of the output tensors.", + "minimum": 1, + "name": "Tout", + "type": "type[]" + } + ], + "description": "So, for example, in the following code\n\n ```python\n\n # This input will be captured.\n y = tf.placeholder_with_default(1.0, shape=[])\n\n @tf.Defun(tf.float32)\n def computation(a):\n return tf.matmul(a, a) + y\n\n b = gen_batch_ops.batch_function(\n f=computation\n in_tensors=[a],\n captured_tensors=computation.captured_inputs,\n Tout=[o.type for o in computation.definition.signature.output_arg],\n num_batch_threads=1,\n max_batch_size=10,\n batch_timeout_micros=100000, # 100ms\n allowed_batch_sizes=[3, 10],\n batching_queue=\"\")\n\nIf more than one session.run call is simultaneously trying to compute `b`\nthe values of `a` will be gathered, non-deterministically concatenated\nalong the first axis, and only one thread will run the computation.\n\nAssumes that all arguments of the function are Tensors which will be batched\nalong their first dimension.\n\nArguments that are captured, are not batched. The session.run call which does\nthe concatenation, will use the values of the captured tensors available to it.\nTherefore, typical uses of captured tensors should involve values which remain\nunchanged across session.run calls. Inference is a good example of this.\n\nSparseTensor is not supported. The return value of the decorated function\nmust be a Tensor or a list/tuple of Tensors.", + "inputs": [ + { + "description": "The tensors to be batched.", + "name": "in_tensors", + "typeListAttr": "Tin" + }, + { + "description": "The tensors which are captured in the function, and don't need\nto be batched.", + "name": "captured_tensors", + "typeListAttr": "Tcaptured" + } + ], + "outputs": [ + { + "description": "The output tensors.", + "name": "out_tensors", + "typeListAttr": "Tout" + } + ], + "summary": "Batches all the inputs tensors to the computation done by the function." + } + }, + { + "name": "BatchIFFT", + "schema": { + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + } + }, + { + "name": "BatchIFFT2D", + "schema": { + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + } + }, + { + "name": "BatchIFFT3D", + "schema": { + "inputs": [ + { + "name": "input", + "type": 8 + } + ], + "outputs": [ + { + "name": "output", + "type": 8 + } + ] + } + }, + { + "name": "BatchMatMul", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, adjoint the slices of `x`. Defaults to `False`.", + "name": "adj_x", + "type": "boolean" + }, + { + "default": false, + "description": "If `True`, adjoint the slices of `y`. Defaults to `False`.", + "name": "adj_y", + "type": "boolean" + } + ], + "description": "Multiplies all slices of `Tensor` `x` and `y` (each slice can be\nviewed as an element of a batch), and arranges the individual results\nin a single output tensor of the same batch size. Each of the\nindividual slices can optionally be adjointed (to adjoint a matrix\nmeans to transpose and conjugate it) before multiplication by setting\nthe `adj_x` or `adj_y` flag to `True`, which are by default `False`.\n\nThe input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`\nand `[..., r_y, c_y]`.\n\nThe output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:\n\n r_o = c_x if adj_x else r_x\n c_o = r_y if adj_y else c_y\n\nIt is computed as:\n\n output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])", + "inputs": [ + { + "description": "2-D or higher with shape `[..., r_x, c_x]`.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "2-D or higher with shape `[..., r_y, c_y]`.", + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "3-D or higher with shape `[..., r_o, c_o]`", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Multiplies slices of two tensors in batches." + } + }, + { + "name": "BatchMatMulV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, adjoint the slices of `x`. Defaults to `False`.", + "name": "adj_x", + "type": "boolean" + }, + { + "default": false, + "description": "If `True`, adjoint the slices of `y`. Defaults to `False`.", + "name": "adj_y", + "type": "boolean" + } + ], + "description": "Multiplies all slices of `Tensor` `x` and `y` (each slice can be\nviewed as an element of a batch), and arranges the individual results\nin a single output tensor of the same batch size. Each of the\nindividual slices can optionally be adjointed (to adjoint a matrix\nmeans to transpose and conjugate it) before multiplication by setting\nthe `adj_x` or `adj_y` flag to `True`, which are by default `False`.\n\nThe input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`\nand `[..., r_y, c_y]`.\n\nThe output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:\n\n r_o = c_x if adj_x else r_x\n c_o = r_y if adj_y else c_y\n\nIt is computed as:\n\n output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])\n\n*NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More\nabout broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).\n", + "inputs": [ + { + "description": "2-D or higher with shape `[..., r_x, c_x]`.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "2-D or higher with shape `[..., r_y, c_y]`.", + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "3-D or higher with shape `[..., r_o, c_o]`", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Multiplies slices of two tensors in batches." + } + }, + { + "name": "BatchMatrixBandPart", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "num_lower", + "type": 9 + }, + { + "name": "num_upper", + "type": 9 + } + ], + "outputs": [ + { + "name": "band", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchMatrixDeterminant", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchMatrixDiag", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "diagonal", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchMatrixDiagPart", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "diagonal", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchMatrixInverse", + "schema": { + "attributes": [ + { + "default": false, + "name": "adjoint", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchMatrixSetDiag", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "diagonal", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchMatrixSolve", + "schema": { + "attributes": [ + { + "default": false, + "name": "adjoint", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "matrix", + "typeAttr": "T" + }, + { + "name": "rhs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchMatrixSolveLs", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float64`, `float32`.", + "name": "T", + "type": "type" + }, + { + "default": true, + "name": "fast", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "matrix", + "typeAttr": "T" + }, + { + "name": "rhs", + "typeAttr": "T" + }, + { + "name": "l2_regularizer", + "type": 2 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchMatrixTriangularSolve", + "schema": { + "attributes": [ + { + "default": true, + "name": "lower", + "type": "boolean" + }, + { + "default": false, + "name": "adjoint", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "matrix", + "typeAttr": "T" + }, + { + "name": "rhs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchNormWithGlobalNormalization", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "A small float number to avoid dividing by 0.", + "name": "variance_epsilon", + "type": "float32" + }, + { + "description": "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma.", + "name": "scale_after_normalization", + "type": "boolean" + } + ], + "category": "Normalization", + "description": "This op is deprecated. Prefer `tf.nn.batch_normalization`.", + "inputs": [ + { + "description": "A 4D input Tensor.", + "name": "t", + "typeAttr": "T" + }, + { + "description": "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof.", + "name": "m", + "typeAttr": "T" + }, + { + "description": "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof.", + "name": "v", + "typeAttr": "T" + }, + { + "description": "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor.", + "name": "beta", + "typeAttr": "T" + }, + { + "description": "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor.", + "name": "gamma", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "result", + "typeAttr": "T" + } + ], + "summary": "Batch normalization." + } + }, + { + "name": "BatchNormWithGlobalNormalizationGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "A small float number to avoid dividing by 0.", + "name": "variance_epsilon", + "type": "float32" + }, + { + "description": "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma.", + "name": "scale_after_normalization", + "type": "boolean" + } + ], + "description": "This op is deprecated. See `tf.nn.batch_normalization`.", + "inputs": [ + { + "description": "A 4D input Tensor.", + "name": "t", + "typeAttr": "T" + }, + { + "description": "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof.", + "name": "m", + "typeAttr": "T" + }, + { + "description": "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof.", + "name": "v", + "typeAttr": "T" + }, + { + "description": "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this Tensor will be multiplied\nwith the normalized Tensor.", + "name": "gamma", + "typeAttr": "T" + }, + { + "description": "4D backprop Tensor.", + "name": "backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "4D backprop tensor for input.", + "name": "dx", + "typeAttr": "T" + }, + { + "description": "1D backprop tensor for mean.", + "name": "dm", + "typeAttr": "T" + }, + { + "description": "1D backprop tensor for variance.", + "name": "dv", + "typeAttr": "T" + }, + { + "description": "1D backprop tensor for beta.", + "name": "db", + "typeAttr": "T" + }, + { + "description": "1D backprop tensor for gamma.", + "name": "dg", + "typeAttr": "T" + } + ], + "summary": "Gradients for batch normalization." + } + }, + { + "name": "BatchSelfAdjointEig", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float64`, `float32`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchSelfAdjointEigV2", + "schema": { + "attributes": [ + { + "default": true, + "name": "compute_v", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "e", + "typeAttr": "T" + }, + { + "name": "v", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchSvd", + "schema": { + "attributes": [ + { + "default": true, + "name": "compute_uv", + "type": "boolean" + }, + { + "default": false, + "name": "full_matrices", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "s", + "typeAttr": "T" + }, + { + "name": "u", + "typeAttr": "T" + }, + { + "name": "v", + "typeAttr": "T" + } + ] + } + }, + { + "name": "BatchToSpace", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "minimum": 2, + "name": "block_size", + "type": "int64" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "This is a legacy version of the more general BatchToSpaceND.\n\nRearranges (permutes) data from batch into blocks of spatial data, followed by\ncropping. This is the reverse transformation of SpaceToBatch. More specifically,\nthis op outputs a copy of the input tensor where values from the `batch`\ndimension are moved in spatial blocks to the `height` and `width` dimensions,\nfollowed by cropping along the `height` and `width` dimensions.", + "inputs": [ + { + "description": "4-D tensor with shape\n`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n depth]`. Note that the batch size of the input tensor must be divisible by\n`block_size * block_size`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\nhow many elements to crop from the intermediate result across the spatial\ndimensions as follows:\n\n crops = [[crop_top, crop_bottom], [crop_left, crop_right]]", + "name": "crops", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "4-D with shape `[batch, height, width, depth]`, where:\n\n height = height_pad - crop_top - crop_bottom\n width = width_pad - crop_left - crop_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:\n\n```\n[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\n(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```\nx = [[[[1], [3]], [[5], [7]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "BatchToSpace for 4-D tensors of type T." + } + }, + { + "name": "BatchToSpaceND", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tblock_shape", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tcrops", + "type": "type" + } + ], + "description": "This operation reshapes the \"batch\" dimension 0 into `M + 1` dimensions of shape\n`block_shape + [batch]`, interleaves these blocks back into the grid defined by\nthe spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as\nthe input. The spatial dimensions of this intermediate result are then\noptionally cropped according to `crops` to produce the output. This is the\nreverse of SpaceToBatch. See below for a precise description.", + "inputs": [ + { + "description": "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has M dimensions.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "1-D with shape `[M]`, all values must be >= 1.", + "name": "block_shape", + "typeAttr": "Tblock_shape" + }, + { + "description": "2-D with shape `[M, 2]`, all values must be >= 0.\n `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input\n dimension `i + 1`, which corresponds to spatial dimension `i`. It is\n required that\n `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.\n\nThis operation is equivalent to the following steps:\n\n1. Reshape `input` to `reshaped` of shape:\n [block_shape[0], ..., block_shape[M-1],\n batch / prod(block_shape),\n input_shape[1], ..., input_shape[N-1]]\n\n2. Permute dimensions of `reshaped` to produce `permuted` of shape\n [batch / prod(block_shape),\n\n input_shape[1], block_shape[0],\n ...,\n input_shape[M], block_shape[M-1],\n\n input_shape[M+1], ..., input_shape[N-1]]\n\n3. Reshape `permuted` to produce `reshaped_permuted` of shape\n [batch / prod(block_shape),\n\n input_shape[1] * block_shape[0],\n ...,\n input_shape[M] * block_shape[M-1],\n\n input_shape[M+1],\n ...,\n input_shape[N-1]]\n\n4. Crop the start and end of dimensions `[1, ..., M]` of\n `reshaped_permuted` according to `crops` to produce the output of shape:\n [batch / prod(block_shape),\n\n input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],\n ...,\n input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],\n\n input_shape[M+1], ..., input_shape[N-1]]\n\nSome examples:\n\n(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [0, 0]]`:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\n(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [0, 0]]`:\n\n```\n[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n```\n\nThe output tensor has shape `[1, 2, 2, 3]` and value:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\nThe output tensor has shape `[1, 4, 4, 1]` and value:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\n(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and\n `crops = [[0, 0], [2, 0]]`:\n\n```\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n [[[0], [2], [4]]], [[[0], [10], [12]]],\n [[[0], [5], [7]]], [[[0], [13], [15]]],\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nThe output tensor has shape `[2, 2, 4, 1]` and value:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```", + "name": "crops", + "typeAttr": "Tcrops" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "BatchToSpace for N-D tensors of type T." + } + }, + { + "name": "BesselI0e", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "Exponentially scaled modified Bessel function of order 0 defined as\n`bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`.\n\nThis function is faster and numerically stabler than `bessel_i0(x)`.", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes the Bessel i0e function of `x` element-wise." + } + }, + { + "name": "BesselI1e", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "Exponentially scaled modified Bessel function of order 0 defined as\n`bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`.\n\nThis function is faster and numerically stabler than `bessel_i1(x)`.", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes the Bessel i1e function of `x` element-wise." + } + }, + { + "name": "Betainc", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "The regularized incomplete beta integral is defined as:\n\n\n\\\\(I_x(a, b) = \\frac{B(x; a, b)}{B(a, b)}\\\\)\n\nwhere\n\n\n\\\\(B(x; a, b) = \\int_0^x t^{a-1} (1 - t)^{b-1} dt\\\\)\n\n\nis the incomplete beta function and \\\\(B(a, b)\\\\) is the *complete*\nbeta function.", + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "b", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Compute the regularized incomplete beta integral \\\\(I_x(a, b)\\\\)." + } + }, + { + "name": "BiasAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n dimension. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + } + ], + "category": "Layer", + "description": "This is a special case of `tf.add` where `bias` is restricted to be 1-D.\nBroadcasting is supported, so `value` may have any number of dimensions.", + "inputs": [ + { + "description": "Any number of dimensions.", + "name": "value", + "typeAttr": "T" + }, + { + "description": "1-D with size the last dimension of `value`.", + "name": "bias", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Broadcasted sum of `value` and `bias`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Adds `bias` to `value`." + } + }, + { + "name": "BiasAddGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the bias tensor will be added to the last dimension\nof the value tensor.\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width].\nThe tensor will be added to \"in_channels\", the third-to-the-last\n dimension. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + } + ], + "description": "It accumulates all the values from out_backprop into the feature dimension.\nFor NHWC data format, the feature dimension is the last. For NCHW data format,\nthe feature dimension is the third-to-last.", + "inputs": [ + { + "description": "Any number of dimensions.", + "name": "out_backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1-D with size the feature dimension of `out_backprop`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "The backward operation for \"BiasAdd\" on the \"bias\" tensor." + } + }, + { + "name": "BiasAddV1", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "This is a deprecated version of BiasAdd and will be soon removed.\n\nThis is a special case of `tf.add` where `bias` is restricted to be 1-D.\nBroadcasting is supported, so `value` may have any number of dimensions.", + "inputs": [ + { + "description": "Any number of dimensions.", + "name": "value", + "typeAttr": "T" + }, + { + "description": "1-D with size the last dimension of `value`.", + "name": "bias", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Broadcasted sum of `value` and `bias`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Adds `bias` to `value`." + } + }, + { + "name": "Bincount", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "Outputs a vector with length `size` and the same dtype as `weights`. If\n`weights` are empty, then index `i` stores the number of times the value `i` is\ncounted in `arr`. If `weights` are non-empty, then index `i` stores the sum of\nthe value in `weights` at each index where the corresponding value in `arr` is\n`i`.\n\nValues in `arr` outside of the range [0, size) are ignored.", + "inputs": [ + { + "description": "int32 `Tensor`.", + "name": "arr", + "type": 3 + }, + { + "description": "non-negative int32 scalar `Tensor`.", + "name": "size", + "type": 3 + }, + { + "description": "is an int32, int64, float32, or float64 `Tensor` with the same\nshape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights\nequal to 1.", + "name": "weights", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1D `Tensor` with length equal to `size`. The counts or summed weights for\neach value in the range [0, size).", + "name": "bins", + "typeAttr": "T" + } + ], + "summary": "Counts the number of occurrences of each value in an integer array." + } + }, + { + "name": "Bitcast", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `complex64`, `complex128`, `qint8`, `quint8`, `qint16`, `quint16`, `qint32`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `complex64`, `complex128`, `qint8`, `quint8`, `qint16`, `quint16`, `qint32`.", + "name": "type", + "type": "type" + } + ], + "description": "Given a tensor `input`, this operation returns a tensor that has the same buffer\ndata as `input` with datatype `type`.\n\nIf the input datatype `T` is larger than the output datatype `type` then the\nshape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].\n\nIf `T` is smaller than `type`, the operator requires that the rightmost\ndimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from\n[..., sizeof(`type`)/sizeof(`T`)] to [...].\n\ntf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype\n(e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()\ngives module error.\nFor example,\n\nExample 1:\n\n>>> a = [1., 2., 3.]\n>>> equality_bitcast = tf.bitcast(a, tf.complex128)\nTraceback (most recent call last):\n...\nInvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]\n>>> equality_cast = tf.cast(a, tf.complex128)\n>>> print(equality_cast)\ntf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)\n\nExample 2:\n\n>>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)\n\n\nExample 3:\n\n>>> x = [1., 2., 3.]\n>>> y = [0., 2., 3.]\n>>> equality= tf.equal(x,y)\n>>> equality_cast = tf.cast(equality,tf.float32)\n>>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)\n>>> print(equality)\ntf.Tensor([False True True], shape=(3,), dtype=bool)\n>>> print(equality_cast)\ntf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)\n>>> print(equality_bitcast)\ntf.Tensor(\n [[ 0 0 0 0]\n [ 0 0 128 63]\n [ 0 0 128 63]], shape=(3, 4), dtype=uint8)\n\n*NOTE*: Bitcast is implemented as a low-level cast, so machines with different\nendian orderings will give different results.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "type" + } + ], + "summary": "Bitcasts a tensor from one type to another without copying data." + } + }, + { + "name": "BitwiseAnd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "The result will have those bits set, that are set in both `x` and `y`. The\ncomputation is performed on the underlying representations of `x` and `y`.\n\nFor example:\n\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\ndtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,\n tf.uint8, tf.uint16, tf.uint32, tf.uint64]\n\nfor dtype in dtype_list:\n lhs = tf.constant([0, 5, 3, 14], dtype=dtype)\n rhs = tf.constant([5, 0, 7, 11], dtype=dtype)\n exp = tf.constant([0, 0, 3, 10], dtype=tf.float32)\n\n res = bitwise_ops.bitwise_and(lhs, rhs)\n tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE\n```\n", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Elementwise computes the bitwise AND of `x` and `y`." + } + }, + { + "name": "BitwiseOr", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "The result will have those bits set, that are set in `x`, `y` or both. The\ncomputation is performed on the underlying representations of `x` and `y`.\n\nFor example:\n\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\ndtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,\n tf.uint8, tf.uint16, tf.uint32, tf.uint64]\n\nfor dtype in dtype_list:\n lhs = tf.constant([0, 5, 3, 14], dtype=dtype)\n rhs = tf.constant([5, 0, 7, 11], dtype=dtype)\n exp = tf.constant([5, 5, 7, 15], dtype=tf.float32)\n\n res = bitwise_ops.bitwise_or(lhs, rhs)\n tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE\n```\n", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Elementwise computes the bitwise OR of `x` and `y`." + } + }, + { + "name": "BitwiseXor", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "The result will have those bits set, that are different in `x` and `y`. The\ncomputation is performed on the underlying representations of `x` and `y`.\n\nFor example:\n\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\ndtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,\n tf.uint8, tf.uint16, tf.uint32, tf.uint64]\n\nfor dtype in dtype_list:\n lhs = tf.constant([0, 5, 3, 14], dtype=dtype)\n rhs = tf.constant([5, 0, 7, 11], dtype=dtype)\n exp = tf.constant([5, 5, 4, 5], dtype=tf.float32)\n\n res = bitwise_ops.bitwise_xor(lhs, rhs)\n tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE\n```\n", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Elementwise computes the bitwise XOR of `x` and `y`." + } + }, + { + "name": "BlockLSTM", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "The forget gate bias.", + "name": "forget_bias", + "type": "float32" + }, + { + "default": 3.0, + "description": "Value to clip the 'cs' value to.", + "name": "cell_clip", + "type": "float32" + }, + { + "default": false, + "description": "Whether to use peephole weights.", + "name": "use_peephole", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "This is equivalent to applying LSTMBlockCell in a loop, like so:\n\n```python\nfor x1 in unpack(x):\n i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(\n x1, cs_prev, h_prev, w, wci, wcf, wco, b)\n cs_prev = cs1\n h_prev = h1\n i.append(i1)\n cs.append(cs1)\n f.append(f1)\n o.append(o1)\n ci.append(ci1)\n co.append(co1)\n h.append(h1)\nreturn pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)\n```", + "inputs": [ + { + "description": "Maximum time length actually used by this input. Outputs are padded\nwith zeros beyond this length.", + "name": "seq_len_max", + "type": 9 + }, + { + "description": "The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).", + "name": "x", + "typeAttr": "T" + }, + { + "description": "Value of the initial cell state.", + "name": "cs_prev", + "typeAttr": "T" + }, + { + "description": "Initial output of cell (to be used for peephole).", + "name": "h_prev", + "typeAttr": "T" + }, + { + "description": "The weight matrix.", + "name": "w", + "typeAttr": "T" + }, + { + "description": "The weight matrix for input gate peephole connection.", + "name": "wci", + "typeAttr": "T" + }, + { + "description": "The weight matrix for forget gate peephole connection.", + "name": "wcf", + "typeAttr": "T" + }, + { + "description": "The weight matrix for output gate peephole connection.", + "name": "wco", + "typeAttr": "T" + }, + { + "description": "The bias vector.", + "name": "b", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The input gate over the whole time sequence.", + "name": "i", + "typeAttr": "T" + }, + { + "description": "The cell state before the tanh over the whole time sequence.", + "name": "cs", + "typeAttr": "T" + }, + { + "description": "The forget gate over the whole time sequence.", + "name": "f", + "typeAttr": "T" + }, + { + "description": "The output gate over the whole time sequence.", + "name": "o", + "typeAttr": "T" + }, + { + "description": "The cell input over the whole time sequence.", + "name": "ci", + "typeAttr": "T" + }, + { + "description": "The cell after the tanh over the whole time sequence.", + "name": "co", + "typeAttr": "T" + }, + { + "description": "The output h vector over the whole time sequence.", + "name": "h", + "typeAttr": "T" + } + ], + "summary": "Computes the LSTM cell forward propagation for all the time steps." + } + }, + { + "name": "BlockLSTMGrad", + "schema": { + "attributes": [ + { + "description": "Whether to use peephole weights.", + "name": "use_peephole", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "This implementation is to be used in conjunction of LSTMBlock.", + "inputs": [ + { + "description": "Maximum time length actually used by this input. Outputs are padded\nwith zeros beyond this length.", + "name": "seq_len_max", + "type": 9 + }, + { + "description": "The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).", + "name": "x", + "typeAttr": "T" + }, + { + "description": "Value of the initial cell state.", + "name": "cs_prev", + "typeAttr": "T" + }, + { + "description": "Initial output of cell (to be used for peephole).", + "name": "h_prev", + "typeAttr": "T" + }, + { + "description": "The weight matrix.", + "name": "w", + "typeAttr": "T" + }, + { + "description": "The weight matrix for input gate peephole connection.", + "name": "wci", + "typeAttr": "T" + }, + { + "description": "The weight matrix for forget gate peephole connection.", + "name": "wcf", + "typeAttr": "T" + }, + { + "description": "The weight matrix for output gate peephole connection.", + "name": "wco", + "typeAttr": "T" + }, + { + "description": "The bias vector.", + "name": "b", + "typeAttr": "T" + }, + { + "description": "The input gate over the whole time sequence.", + "name": "i", + "typeAttr": "T" + }, + { + "description": "The cell state before the tanh over the whole time sequence.", + "name": "cs", + "typeAttr": "T" + }, + { + "description": "The forget gate over the whole time sequence.", + "name": "f", + "typeAttr": "T" + }, + { + "description": "The output gate over the whole time sequence.", + "name": "o", + "typeAttr": "T" + }, + { + "description": "The cell input over the whole time sequence.", + "name": "ci", + "typeAttr": "T" + }, + { + "description": "The cell after the tanh over the whole time sequence.", + "name": "co", + "typeAttr": "T" + }, + { + "description": "The output h vector over the whole time sequence.", + "name": "h", + "typeAttr": "T" + }, + { + "description": "The current gradient of cs.", + "name": "cs_grad", + "typeAttr": "T" + }, + { + "description": "The gradient of h vector.", + "name": "h_grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The gradient of x to be back-propped.", + "name": "x_grad", + "typeAttr": "T" + }, + { + "description": "The gradient of cs_prev to be back-propped.", + "name": "cs_prev_grad", + "typeAttr": "T" + }, + { + "description": "The gradient of h_prev to be back-propped.", + "name": "h_prev_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for w to be back-propped.", + "name": "w_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for wci to be back-propped.", + "name": "wci_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for wcf to be back-propped.", + "name": "wcf_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for wco to be back-propped.", + "name": "wco_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for w to be back-propped.", + "name": "b_grad", + "typeAttr": "T" + } + ], + "summary": "Computes the LSTM cell backward propagation for the entire time sequence." + } + }, + { + "name": "BlockLSTMGradV2", + "schema": { + "attributes": [ + { + "description": "Whether to use peephole weights.", + "name": "use_peephole", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "This implementation is to be used in conjunction of BlockLSTMV2.", + "inputs": [ + { + "description": "Maximum time length actually used by this input. Outputs are padded\nwith zeros beyond this length.", + "name": "seq_len_max", + "type": 9 + }, + { + "description": "The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).", + "name": "x", + "typeAttr": "T" + }, + { + "description": "Value of the initial cell state.", + "name": "cs_prev", + "typeAttr": "T" + }, + { + "description": "Initial output of cell (to be used for peephole).", + "name": "h_prev", + "typeAttr": "T" + }, + { + "description": "The weight matrix.", + "name": "w", + "typeAttr": "T" + }, + { + "description": "The weight matrix for input gate peephole connection.", + "name": "wci", + "typeAttr": "T" + }, + { + "description": "The weight matrix for forget gate peephole connection.", + "name": "wcf", + "typeAttr": "T" + }, + { + "description": "The weight matrix for output gate peephole connection.", + "name": "wco", + "typeAttr": "T" + }, + { + "description": "The bias vector.", + "name": "b", + "typeAttr": "T" + }, + { + "description": "The input gate over the whole time sequence.", + "name": "i", + "typeAttr": "T" + }, + { + "description": "The cell state before the tanh over the whole time sequence.", + "name": "cs", + "typeAttr": "T" + }, + { + "description": "The forget gate over the whole time sequence.", + "name": "f", + "typeAttr": "T" + }, + { + "description": "The output gate over the whole time sequence.", + "name": "o", + "typeAttr": "T" + }, + { + "description": "The cell input over the whole time sequence.", + "name": "ci", + "typeAttr": "T" + }, + { + "description": "The cell after the tanh over the whole time sequence.", + "name": "co", + "typeAttr": "T" + }, + { + "description": "The output h vector over the whole time sequence.", + "name": "h", + "typeAttr": "T" + }, + { + "description": "The current gradient of cs.", + "name": "cs_grad", + "typeAttr": "T" + }, + { + "description": "The gradient of h vector.", + "name": "h_grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The gradient of x to be back-propped.", + "name": "x_grad", + "typeAttr": "T" + }, + { + "description": "The gradient of cs_prev to be back-propped.", + "name": "cs_prev_grad", + "typeAttr": "T" + }, + { + "description": "The gradient of h_prev to be back-propped.", + "name": "h_prev_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for w to be back-propped.", + "name": "w_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for wci to be back-propped.", + "name": "wci_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for wcf to be back-propped.", + "name": "wcf_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for wco to be back-propped.", + "name": "wco_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for w to be back-propped.", + "name": "b_grad", + "typeAttr": "T" + } + ], + "summary": "Computes the LSTM cell backward propagation for the entire time sequence." + } + }, + { + "name": "BlockLSTMV2", + "schema": { + "attributes": [ + { + "default": 0.0, + "description": "Value to clip the 'cs' value to.", + "name": "cell_clip", + "type": "float32" + }, + { + "default": false, + "description": "Whether to use peephole weights.", + "name": "use_peephole", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "This is equivalent to applying LSTMBlockCell in a loop, like so:\n\n```python\nfor x1 in unpack(x):\n i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(\n x1, cs_prev, h_prev, w, wci, wcf, wco, b)\n cs_prev = cs1\n h_prev = h1\n i.append(i1)\n cs.append(cs1)\n f.append(f1)\n o.append(o1)\n ci.append(ci1)\n co.append(co1)\n h.append(h1)\nreturn pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)\n\nNote that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout,\nthis op uses IFCO. So in order for the following snippet to be equivalent\nall gate-related outputs should be reordered.\n```", + "inputs": [ + { + "description": "Maximum time length actually used by this input. Outputs are padded\nwith zeros beyond this length.", + "name": "seq_len_max", + "type": 9 + }, + { + "description": "The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).", + "name": "x", + "typeAttr": "T" + }, + { + "description": "Value of the initial cell state.", + "name": "cs_prev", + "typeAttr": "T" + }, + { + "description": "Initial output of cell (to be used for peephole).", + "name": "h_prev", + "typeAttr": "T" + }, + { + "description": "The weight matrix.", + "name": "w", + "typeAttr": "T" + }, + { + "description": "The weight matrix for input gate peephole connection.", + "name": "wci", + "typeAttr": "T" + }, + { + "description": "The weight matrix for forget gate peephole connection.", + "name": "wcf", + "typeAttr": "T" + }, + { + "description": "The weight matrix for output gate peephole connection.", + "name": "wco", + "typeAttr": "T" + }, + { + "description": "The bias vector.", + "name": "b", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The input gate over the whole time sequence.", + "name": "i", + "typeAttr": "T" + }, + { + "description": "The cell state before the tanh over the whole time sequence.", + "name": "cs", + "typeAttr": "T" + }, + { + "description": "The forget gate over the whole time sequence.", + "name": "f", + "typeAttr": "T" + }, + { + "description": "The output gate over the whole time sequence.", + "name": "o", + "typeAttr": "T" + }, + { + "description": "The cell input over the whole time sequence.", + "name": "ci", + "typeAttr": "T" + }, + { + "description": "The cell after the tanh over the whole time sequence.", + "name": "co", + "typeAttr": "T" + }, + { + "description": "The output h vector over the whole time sequence.", + "name": "h", + "typeAttr": "T" + } + ], + "summary": "Computes the LSTM cell forward propagation for all the time steps." + } + }, + { + "name": "BoostedTreesAggregateStats", + "schema": { + "attributes": [ + { + "description": "int; the maximum number of splits possible in the whole tree.", + "minimum": 1, + "name": "max_splits", + "type": "int64" + }, + { + "description": "int; equals to the maximum possible value of bucketized feature.", + "minimum": 1, + "name": "num_buckets", + "type": "int64" + } + ], + "description": "The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket.", + "inputs": [ + { + "description": "int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].", + "name": "node_ids", + "type": 3 + }, + { + "description": "float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.", + "name": "gradients", + "type": 1 + }, + { + "description": "float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.", + "name": "hessians", + "type": 1 + }, + { + "description": "int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]).", + "name": "feature", + "type": 3 + } + ], + "outputs": [ + { + "description": "output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension])\ncontaining accumulated stats for each node, feature dimension and bucket.", + "name": "stats_summary", + "type": 1 + } + ], + "summary": "Aggregates the summary of accumulated stats for the batch." + } + }, + { + "name": "BoostedTreesBucketize", + "schema": { + "attributes": [ + { + "description": "inferred int; number of features.", + "minimum": 0, + "name": "num_features", + "type": "int64" + } + ], + "description": "An op that returns a list of float tensors, where each tensor represents the\nbucketized values for a single feature.", + "inputs": [ + { + "description": "float; List of Rank 1 Tensor each containing float values for a single feature.", + "name": "float_values", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "float; List of Rank 1 Tensors each containing the bucket boundaries for a single\nfeature.", + "name": "bucket_boundaries", + "numberAttr": "num_features", + "type": 1 + } + ], + "outputs": [ + { + "description": "int; List of Rank 1 Tensors each containing the bucketized values for a single feature.", + "name": "buckets", + "numberAttr": "num_features", + "type": 3 + } + ], + "summary": "Bucketize each feature based on bucket boundaries." + } + }, + { + "name": "BoostedTreesCalculateBestFeatureSplit", + "schema": { + "attributes": [ + { + "description": "The dimension of logit, i.e., number of classes.", + "minimum": 1, + "name": "logits_dimension", + "type": "int64" + }, + { + "default": "inequality", + "description": "A string indicating if this Op should perform inequality split or equality split. Must be one of the following: `inequality`, `equality`.", + "name": "split_type", + "type": "string" + } + ], + "description": "The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.\n\nIt is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.\n\nIn this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).\n\nThe output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.", + "inputs": [ + { + "description": "A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).", + "name": "node_id_range", + "type": 3 + }, + { + "description": "A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.\nThe first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.", + "name": "stats_summary", + "type": 1 + }, + { + "description": "l1 regularization factor on leaf weights, per instance based.", + "name": "l1", + "type": 1 + }, + { + "description": "l2 regularization factor on leaf weights, per instance based.", + "name": "l2", + "type": 1 + }, + { + "description": "adjustment to the gain, per leaf based.", + "name": "tree_complexity", + "type": 1 + }, + { + "description": "minimum avg of hessians in a node before required for the node to be considered for splitting.", + "name": "min_node_weight", + "type": 1 + } + ], + "outputs": [ + { + "description": "A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.", + "name": "node_ids", + "type": 3 + }, + { + "description": "A Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.", + "name": "gains", + "type": 1 + }, + { + "description": "A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.", + "name": "feature_dimensions", + "type": 3 + }, + { + "description": "A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.", + "name": "thresholds", + "type": 3 + }, + { + "description": "A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.", + "name": "left_node_contribs", + "type": 1 + }, + { + "description": "A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.", + "name": "right_node_contribs", + "type": 1 + }, + { + "description": "A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.\nInequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.", + "name": "split_with_default_directions", + "type": 7 + } + ], + "summary": "Calculates gains for each feature and returns the best possible split information for the feature." + } + }, + { + "name": "BoostedTreesCalculateBestFeatureSplitV2", + "schema": { + "attributes": [ + { + "description": "inferred from the size of `stats_summary_list`; the number of total features.", + "minimum": 1, + "name": "num_features", + "type": "int64" + }, + { + "description": "The dimension of logit, i.e., number of classes.", + "minimum": 1, + "name": "logits_dimension", + "type": "int64" + } + ], + "description": "The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.\n\nIt is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.\n\nIn this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).\n\nThe output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.", + "inputs": [ + { + "description": "A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).", + "name": "node_id_range", + "type": 3 + }, + { + "description": "A list of Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.\nThe first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.", + "name": "stats_summaries_list", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "A Rank 1 tensor indicating if this Op should perform inequality split or equality split per feature.", + "name": "split_types", + "type": 7 + }, + { + "description": "Rank 1 tensor with ids for each feature. This is the real id of the feature.", + "name": "candidate_feature_ids", + "type": 3 + }, + { + "description": "l1 regularization factor on leaf weights, per instance based.", + "name": "l1", + "type": 1 + }, + { + "description": "l2 regularization factor on leaf weights, per instance based.", + "name": "l2", + "type": 1 + }, + { + "description": "adjustment to the gain, per leaf based.", + "name": "tree_complexity", + "type": 1 + }, + { + "description": "minimum avg of hessians in a node before required for the node to be considered for splitting.", + "name": "min_node_weight", + "type": 1 + } + ], + "outputs": [ + { + "description": "A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.", + "name": "node_ids", + "type": 3 + }, + { + "description": "A Rank 1 tensor indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.", + "name": "gains", + "type": 1 + }, + { + "description": "A Rank 1 tensors indicating the best feature id for each node. See above for details like shapes and sizes.", + "name": "feature_ids", + "type": 3 + }, + { + "description": "A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.", + "name": "feature_dimensions", + "type": 3 + }, + { + "description": "A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.", + "name": "thresholds", + "type": 3 + }, + { + "description": "A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.", + "name": "left_node_contribs", + "type": 1 + }, + { + "description": "A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.", + "name": "right_node_contribs", + "type": 1 + }, + { + "description": "A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.\nInequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.", + "name": "split_with_default_directions", + "type": 7 + } + ], + "summary": "Calculates gains for each feature and returns the best possible split information for each node. However, if no split is found, then no split information is returned for that node." + } + }, + { + "name": "BoostedTreesCalculateBestGainsPerFeature", + "schema": { + "attributes": [ + { + "description": "the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.", + "minimum": 1, + "name": "max_splits", + "type": "int64" + }, + { + "description": "inferred from the size of `stats_summary_list`; the number of total features.", + "minimum": 1, + "name": "num_features", + "type": "int64" + } + ], + "description": "The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.\n\nIt is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.\n\nIn this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).\n\nThe length of output lists are all of the same length, `num_features`.\nThe output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.", + "inputs": [ + { + "description": "A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).", + "name": "node_id_range", + "type": 3 + }, + { + "description": "A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.", + "name": "stats_summary_list", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "l1 regularization factor on leaf weights, per instance based.", + "name": "l1", + "type": 1 + }, + { + "description": "l2 regularization factor on leaf weights, per instance based.", + "name": "l2", + "type": 1 + }, + { + "description": "adjustment to the gain, per leaf based.", + "name": "tree_complexity", + "type": 1 + }, + { + "description": "minimum avg of hessians in a node before required for the node to be considered for splitting.", + "name": "min_node_weight", + "type": 1 + } + ], + "outputs": [ + { + "description": "An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.", + "name": "node_ids_list", + "numberAttr": "num_features", + "type": 3 + }, + { + "description": "An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.", + "name": "gains_list", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.", + "name": "thresholds_list", + "numberAttr": "num_features", + "type": 3 + }, + { + "description": "A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.", + "name": "left_node_contribs_list", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.", + "name": "right_node_contribs_list", + "numberAttr": "num_features", + "type": 1 + } + ], + "summary": "Calculates gains for each feature and returns the best possible split information for the feature." + } + }, + { + "name": "BoostedTreesCenterBias", + "schema": { + "inputs": [ + { + "description": "Handle to the tree ensemble.", + "name": "tree_ensemble_handle", + "type": 20 + }, + { + "description": "A tensor with shape=[logits_dimension] with mean of gradients for a first node.", + "name": "mean_gradients", + "type": 1 + }, + { + "description": "A tensor with shape=[logits_dimension] mean of hessians for a first node.", + "name": "mean_hessians", + "type": 1 + }, + { + "description": "l1 regularization factor on leaf weights, per instance based.", + "name": "l1", + "type": 1 + }, + { + "description": "l2 regularization factor on leaf weights, per instance based.", + "name": "l2", + "type": 1 + } + ], + "outputs": [ + { + "description": "Bool, whether to continue bias centering.", + "name": "continue_centering", + "type": 10 + } + ], + "summary": "Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering." + } + }, + { + "name": "BoostedTreesCreateEnsemble", + "schema": { + "inputs": [ + { + "description": "Handle to the tree ensemble resource to be created.", + "name": "tree_ensemble_handle", + "type": 20 + }, + { + "description": "Token to use as the initial value of the resource stamp.", + "name": "stamp_token", + "type": 9 + }, + { + "description": "Serialized proto of the tree ensemble.", + "name": "tree_ensemble_serialized", + "type": 7 + } + ], + "summary": "Creates a tree ensemble model and returns a handle to it." + } + }, + { + "name": "BoostedTreesCreateQuantileStreamResource", + "schema": { + "attributes": [ + { + "default": 1099511627776, + "description": "int; The maximum number of data points that can be fed to the stream.", + "name": "max_elements", + "type": "int64" + } + ], + "inputs": [ + { + "description": "resource; Handle to quantile stream resource.", + "name": "quantile_stream_resource_handle", + "type": 20 + }, + { + "description": "float; The required approximation error of the stream resource.", + "name": "epsilon", + "type": 1 + }, + { + "description": "int; The number of streams managed by the resource that shares the same epsilon.", + "name": "num_streams", + "type": 9 + } + ], + "summary": "Create the Resource for Quantile Streams." + } + }, + { + "name": "BoostedTreesDeserializeEnsemble", + "schema": { + "description": "ensemble.", + "inputs": [ + { + "description": "Handle to the tree ensemble.", + "name": "tree_ensemble_handle", + "type": 20 + }, + { + "description": "Token to use as the new value of the resource stamp.", + "name": "stamp_token", + "type": 9 + }, + { + "description": "Serialized proto of the ensemble.", + "name": "tree_ensemble_serialized", + "type": 7 + } + ], + "summary": "Deserializes a serialized tree ensemble config and replaces current tree" + } + }, + { + "name": "BoostedTreesEnsembleResourceHandleOp", + "schema": { + "attributes": [ + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "name": "resource", + "type": 20 + } + ], + "summary": "Creates a handle to a BoostedTreesEnsembleResource" + } + }, + { + "name": "BoostedTreesExampleDebugOutputs", + "schema": { + "attributes": [ + { + "description": "Inferred.", + "minimum": 1, + "name": "num_bucketized_features", + "type": "int64" + }, + { + "description": "scalar, dimension of the logits, to be used for constructing the protos in\nexamples_debug_outputs_serialized.", + "name": "logits_dimension", + "type": "int64" + } + ], + "description": "It traverses all the trees and computes debug metrics for individual examples,\nsuch as getting split feature ids and logits after each split along the decision\npath used to compute directional feature contributions.", + "inputs": [ + { + "name": "tree_ensemble_handle", + "type": 20 + }, + { + "description": "A list of rank 1 Tensors containing bucket id for each\nfeature.", + "name": "bucketized_features", + "numberAttr": "num_bucketized_features", + "type": 3 + } + ], + "outputs": [ + { + "description": "Output rank 1 Tensor containing a proto serialized as a string for each example.", + "name": "examples_debug_outputs_serialized", + "type": 7 + } + ], + "summary": "Debugging/model interpretability outputs for each example." + } + }, + { + "name": "BoostedTreesFlushQuantileSummaries", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "num_features", + "type": "int64" + } + ], + "description": "An op that outputs a list of quantile summaries of a quantile stream resource.\nEach summary Tensor is rank 2, containing summaries (value, weight, min_rank,\nmax_rank) for a single feature.", + "inputs": [ + { + "description": "resource handle referring to a QuantileStreamResource.", + "name": "quantile_stream_resource_handle", + "type": 20 + } + ], + "outputs": [ + { + "name": "summaries", + "numberAttr": "num_features", + "type": 1 + } + ], + "summary": "Flush the quantile summaries from each quantile stream resource." + } + }, + { + "name": "BoostedTreesGetEnsembleStates", + "schema": { + "inputs": [ + { + "description": "Handle to the tree ensemble.", + "name": "tree_ensemble_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "Stamp token of the tree ensemble resource.", + "name": "stamp_token", + "type": 9 + }, + { + "description": "The number of trees in the tree ensemble resource.", + "name": "num_trees", + "type": 3 + }, + { + "description": "The number of trees that were finished successfully.", + "name": "num_finalized_trees", + "type": 3 + }, + { + "description": "The number of layers we attempted to build (but not necessarily succeeded).", + "name": "num_attempted_layers", + "type": 3 + }, + { + "description": "Rank size 2 tensor that contains start and end ids of the nodes in the latest\nlayer.", + "name": "last_layer_nodes_range", + "type": 3 + } + ], + "summary": "Retrieves the tree ensemble resource stamp token, number of trees and growing statistics." + } + }, + { + "name": "BoostedTreesMakeQuantileSummaries", + "schema": { + "attributes": [ + { + "description": "int; Inferred from the size of float_values.\nThe number of float features.", + "minimum": 0, + "name": "num_features", + "type": "int64" + } + ], + "description": "An op that takes a list of tensors (one tensor per feature) and outputs the\nquantile summaries for each tensor.", + "inputs": [ + { + "description": "float; List of Rank 1 Tensors each containing values for a single feature.", + "name": "float_values", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "float; Rank 1 Tensor with weights per instance.", + "name": "example_weights", + "type": 1 + }, + { + "description": "float; The required maximum approximation error.", + "name": "epsilon", + "type": 1 + } + ], + "outputs": [ + { + "description": "float; List of Rank 2 Tensors each containing the quantile summary\n(value, weight, min_rank, max_rank) of a single feature.", + "name": "summaries", + "numberAttr": "num_features", + "type": 1 + } + ], + "summary": "Makes the summary of quantiles for the batch." + } + }, + { + "name": "BoostedTreesMakeStatsSummary", + "schema": { + "attributes": [ + { + "description": "int; the maximum number of splits possible in the whole tree.", + "minimum": 1, + "name": "max_splits", + "type": "int64" + }, + { + "description": "int; equals to the maximum possible value of bucketized feature.", + "minimum": 1, + "name": "num_buckets", + "type": "int64" + }, + { + "description": "int; inferred from the size of bucketized_features_list; the number of features.", + "minimum": 1, + "name": "num_features", + "type": "int64" + } + ], + "description": "The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.", + "inputs": [ + { + "description": "int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.", + "name": "node_ids", + "type": 3 + }, + { + "description": "float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.", + "name": "gradients", + "type": 1 + }, + { + "description": "float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.", + "name": "hessians", + "type": 1 + }, + { + "description": "int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).", + "name": "bucketized_features_list", + "numberAttr": "num_features", + "type": 3 + } + ], + "outputs": [ + { + "description": "output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians.", + "name": "stats_summary", + "type": 1 + } + ], + "summary": "Makes the summary of accumulated stats for the batch." + } + }, + { + "name": "BoostedTreesPredict", + "schema": { + "attributes": [ + { + "description": "Inferred.", + "minimum": 1, + "name": "num_bucketized_features", + "type": "int64" + }, + { + "description": "scalar, dimension of the logits, to be used for partial logits\nshape.", + "name": "logits_dimension", + "type": "int64" + } + ], + "description": "computes the logits. It is designed to be used during prediction.\nIt traverses all the trees and calculates the final score for each instance.", + "inputs": [ + { + "name": "tree_ensemble_handle", + "type": 20 + }, + { + "description": "A list of rank 1 Tensors containing bucket id for each\nfeature.", + "name": "bucketized_features", + "numberAttr": "num_bucketized_features", + "type": 3 + } + ], + "outputs": [ + { + "description": "Output rank 2 Tensor containing logits for each example.", + "name": "logits", + "type": 1 + } + ], + "summary": "Runs multiple additive regression ensemble predictors on input instances and" + } + }, + { + "name": "BoostedTreesQuantileStreamResourceAddSummaries", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "num_features", + "type": "int64" + } + ], + "description": "An op that adds a list of quantile summaries to a quantile stream resource. Each\nsummary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank)\nfor a single feature.", + "inputs": [ + { + "description": "resource handle referring to a QuantileStreamResource.", + "name": "quantile_stream_resource_handle", + "type": 20 + }, + { + "description": "string; List of Rank 2 Tensor each containing the summaries for a single feature.", + "name": "summaries", + "numberAttr": "num_features", + "type": 1 + } + ], + "summary": "Add the quantile summaries to each quantile stream resource." + } + }, + { + "name": "BoostedTreesQuantileStreamResourceDeserialize", + "schema": { + "attributes": [ + { + "description": "inferred int; number of features to get bucket boundaries for.", + "minimum": 1, + "name": "num_streams", + "type": "int64" + } + ], + "description": "An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator.", + "inputs": [ + { + "description": "resource handle referring to a QuantileStreamResource.", + "name": "quantile_stream_resource_handle", + "type": 20 + }, + { + "description": "float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.", + "name": "bucket_boundaries", + "numberAttr": "num_streams", + "type": 1 + } + ], + "summary": "Deserialize bucket boundaries and ready flag into current QuantileAccumulator." + } + }, + { + "name": "BoostedTreesQuantileStreamResourceFlush", + "schema": { + "attributes": [ + { + "default": false, + "description": "bool; If True, the output will be the num_quantiles for each stream where the ith\nentry is the ith quantile of the input with an approximation error of epsilon.\nDuplicate values may be present.\nIf False, the output will be the points in the histogram that we got which roughly\ntranslates to 1/epsilon boundaries and without any duplicates.\nDefault to False.", + "name": "generate_quantiles", + "type": "boolean" + } + ], + "description": "An op that flushes the summaries for a quantile stream resource.", + "inputs": [ + { + "description": "resource handle referring to a QuantileStreamResource.", + "name": "quantile_stream_resource_handle", + "type": 20 + }, + { + "description": "int; approximate number of buckets unless using generate_quantiles.", + "name": "num_buckets", + "type": 9 + } + ], + "summary": "Flush the summaries for a quantile stream resource." + } + }, + { + "name": "BoostedTreesQuantileStreamResourceGetBucketBoundaries", + "schema": { + "attributes": [ + { + "description": "inferred int; number of features to get bucket boundaries for.", + "minimum": 0, + "name": "num_features", + "type": "int64" + } + ], + "description": "An op that returns a list of float tensors for a quantile stream resource. Each\ntensor is Rank 1 containing bucket boundaries for a single feature.", + "inputs": [ + { + "description": "resource handle referring to a QuantileStreamResource.", + "name": "quantile_stream_resource_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.", + "name": "bucket_boundaries", + "numberAttr": "num_features", + "type": 1 + } + ], + "summary": "Generate the bucket boundaries for each feature based on accumulated summaries." + } + }, + { + "name": "BoostedTreesQuantileStreamResourceHandleOp", + "schema": { + "attributes": [ + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "name": "resource", + "type": 20 + } + ], + "summary": "Creates a handle to a BoostedTreesQuantileStreamResource." + } + }, + { + "name": "BoostedTreesSerializeEnsemble", + "schema": { + "inputs": [ + { + "description": "Handle to the tree ensemble.", + "name": "tree_ensemble_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "Stamp token of the tree ensemble resource.", + "name": "stamp_token", + "type": 9 + }, + { + "description": "Serialized proto of the ensemble.", + "name": "tree_ensemble_serialized", + "type": 7 + } + ], + "summary": "Serializes the tree ensemble to a proto." + } + }, + { + "name": "BoostedTreesSparseAggregateStats", + "schema": { + "attributes": [ + { + "description": "int; the maximum number of splits possible in the whole tree.", + "minimum": 1, + "name": "max_splits", + "type": "int64" + }, + { + "description": "int; equals to the maximum possible value of bucketized feature + 1.", + "minimum": 1, + "name": "num_buckets", + "type": "int64" + } + ], + "description": "The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id.", + "inputs": [ + { + "description": "int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].", + "name": "node_ids", + "type": 3 + }, + { + "description": "float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.", + "name": "gradients", + "type": 1 + }, + { + "description": "float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.", + "name": "hessians", + "type": 1 + }, + { + "description": "int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]).\nNumber of sparse entries across all instances from the batch. The first value is\nthe index of the instance, the second is dimension of the feature. The second axis\ncan only have 2 values, i.e., the input dense version of Tensor can only be matrix.", + "name": "feature_indices", + "type": 3 + }, + { + "description": "int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]).\nNumber of sparse entries across all instances from the batch. The first value is\nthe index of the instance, the second is dimension of the feature.", + "name": "feature_values", + "type": 3 + }, + { + "description": "int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]).\nThe first axis can only have 2 values, [batch_size, feature_dimension].", + "name": "feature_shape", + "type": 3 + } + ], + "outputs": [ + { + "description": "int32; Rank 2 indices of summary sparse Tensors (shape=[number of non zero statistics, 4])\nThe second axis can only be 4 including node id, feature dimension, bucket id, and statistics_dimension.\nstatistics_dimension = logits_dimension + hessian_dimension.", + "name": "stats_summary_indices", + "type": 3 + }, + { + "description": "output Rank 1 Tensor (shape=[number of non zero statistics])", + "name": "stats_summary_values", + "type": 1 + }, + { + "description": "output Rank 1 Tensor (shape=[4])\nThe tensor has following 4 values: [max_splits, feature_dimension, num_buckets, statistics_dimension],\nwhere statistics_dimension = gradient_dimension + hessian_dimension. gradient_dimension\nis the same as label_dimension, i.e., the output space. hessian_dimension can be the same\nas logits dimension when diagonal hessian is used, or label_dimension^2 when full\nhessian is used.", + "name": "stats_summary_shape", + "type": 3 + } + ], + "summary": "Aggregates the summary of accumulated stats for the batch." + } + }, + { + "name": "BoostedTreesSparseCalculateBestFeatureSplit", + "schema": { + "attributes": [ + { + "description": "The dimension of logit, i.e., number of classes.", + "minimum": 1, + "name": "logits_dimension", + "type": "int64" + }, + { + "default": "inequality", + "description": "A string indicating if this Op should perform inequality split or equality split. Must be one of the following: `inequality`.", + "name": "split_type", + "type": "string" + } + ], + "description": "The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.\n\nIt is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.\n\nIn this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).\n\nThe output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.", + "inputs": [ + { + "description": "A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).", + "name": "node_id_range", + "type": 3 + }, + { + "description": "A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim.\nstats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used.", + "name": "stats_summary_indices", + "type": 3 + }, + { + "description": "A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices.", + "name": "stats_summary_values", + "type": 1 + }, + { + "description": "A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim].", + "name": "stats_summary_shape", + "type": 3 + }, + { + "description": "l1 regularization factor on leaf weights, per instance based.", + "name": "l1", + "type": 1 + }, + { + "description": "l2 regularization factor on leaf weights, per instance based.", + "name": "l2", + "type": 1 + }, + { + "description": "adjustment to the gain, per leaf based.", + "name": "tree_complexity", + "type": 1 + }, + { + "description": "minimum avg of hessians in a node before required for the node to be considered for splitting.", + "name": "min_node_weight", + "type": 1 + } + ], + "outputs": [ + { + "description": "A Rank 1 tensor indicating possible node ids that can be split.", + "name": "node_ids", + "type": 3 + }, + { + "description": "A Rank 1 tensor indicating the best gains to split each node.", + "name": "gains", + "type": 1 + }, + { + "description": "A Rank 1 tensor indicating the best feature dimension for each feature to split for each node.", + "name": "feature_dimensions", + "type": 3 + }, + { + "description": "A Rank 1 tensor indicating the bucket id to compare with (as a threshold) for split in each node.", + "name": "thresholds", + "type": 3 + }, + { + "description": "A Rank 2 tensor indicating the contribution of the left nodes when branching from parent nodes to the left direction by the given threshold for each feature.\nThis value will be used to make the left node value by adding to the parent node value. Second dimension size is logits dimension.", + "name": "left_node_contribs", + "type": 1 + }, + { + "description": "A Rank 2 tensor, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.", + "name": "right_node_contribs", + "type": 1 + }, + { + "description": "A Rank 1 tensor indicating which direction to go if data is missing.\nInequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.", + "name": "split_with_default_directions", + "type": 7 + } + ], + "summary": "Calculates gains for each feature and returns the best possible split information for the feature." + } + }, + { + "name": "BoostedTreesTrainingPredict", + "schema": { + "attributes": [ + { + "description": "Inferred.", + "minimum": 1, + "name": "num_bucketized_features", + "type": "int64" + }, + { + "description": "scalar, dimension of the logits, to be used for partial logits\nshape.", + "name": "logits_dimension", + "type": "int64" + } + ], + "description": "computes the update to cached logits. It is designed to be used during training.\nIt traverses the trees starting from cached tree id and cached node id and\ncalculates the updates to be pushed to the cache.", + "inputs": [ + { + "name": "tree_ensemble_handle", + "type": 20 + }, + { + "description": "Rank 1 Tensor containing cached tree ids which is the starting\ntree of prediction.", + "name": "cached_tree_ids", + "type": 3 + }, + { + "description": "Rank 1 Tensor containing cached node id which is the starting\nnode of prediction.", + "name": "cached_node_ids", + "type": 3 + }, + { + "description": "A list of rank 1 Tensors containing bucket id for each\nfeature.", + "name": "bucketized_features", + "numberAttr": "num_bucketized_features", + "type": 3 + } + ], + "outputs": [ + { + "description": "Rank 2 Tensor containing logits update (with respect to cached\nvalues stored) for each example.", + "name": "partial_logits", + "type": 1 + }, + { + "description": "Rank 1 Tensor containing new tree ids for each example.", + "name": "tree_ids", + "type": 3 + }, + { + "description": "Rank 1 Tensor containing new node ids in the new tree_ids.", + "name": "node_ids", + "type": 3 + } + ], + "summary": "Runs multiple additive regression ensemble predictors on input instances and" + } + }, + { + "name": "BoostedTreesUpdateEnsemble", + "schema": { + "attributes": [ + { + "description": "0-No pruning, 1-Pre-pruning, 2-Post-pruning.", + "minimum": 0, + "name": "pruning_mode", + "type": "int64" + }, + { + "description": "Number of features that have best splits returned. INFERRED.", + "minimum": 0, + "name": "num_features", + "type": "int64" + } + ], + "description": "or by starting a new tree.", + "inputs": [ + { + "description": "Handle to the ensemble variable.", + "name": "tree_ensemble_handle", + "type": 20 + }, + { + "description": "Rank 1 tensor with ids for each feature. This is the real id of\nthe feature that will be used in the split.", + "name": "feature_ids", + "type": 3 + }, + { + "description": "List of rank 1 tensors representing the nodes for which this feature\nhas a split.", + "name": "node_ids", + "numberAttr": "num_features", + "type": 3 + }, + { + "description": "List of rank 1 tensors representing the gains for each of the feature's\nsplit.", + "name": "gains", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "List of rank 1 tensors representing the thesholds for each of the\nfeature's split.", + "name": "thresholds", + "numberAttr": "num_features", + "type": 3 + }, + { + "description": "List of rank 2 tensors with left leaf contribs for each of\nthe feature's splits. Will be added to the previous node values to constitute\nthe values of the left nodes.", + "name": "left_node_contribs", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "List of rank 2 tensors with right leaf contribs for each\nof the feature's splits. Will be added to the previous node values to constitute\nthe values of the right nodes.", + "name": "right_node_contribs", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "Max depth of the tree to build.", + "name": "max_depth", + "type": 3 + }, + { + "description": "shrinkage const for each new tree.", + "name": "learning_rate", + "type": 1 + } + ], + "summary": "Updates the tree ensemble by either adding a layer to the last tree being grown" + } + }, + { + "name": "BoostedTreesUpdateEnsembleV2", + "schema": { + "attributes": [ + { + "description": "Number of features that have best splits returned. INFERRED.", + "minimum": 0, + "name": "num_features", + "type": "int64" + }, + { + "default": 1, + "description": "scalar, dimension of the logits", + "name": "logits_dimension", + "type": "int64" + }, + { + "default": 1, + "description": "Number of groups of split information to process, where a group contains feature\nids that are processed together in BoostedTreesCalculateBestFeatureSplitOpV2.\nINFERRED.", + "minimum": 1, + "name": "num_groups", + "type": "int64" + } + ], + "description": "or by starting a new tree.", + "inputs": [ + { + "description": "Handle to the ensemble variable.", + "name": "tree_ensemble_handle", + "type": 20 + }, + { + "description": "Rank 1 tensor with ids for each feature. This is the real id of\nthe feature that will be used in the split.", + "name": "feature_ids", + "numberAttr": "num_groups", + "type": 3 + }, + { + "description": "List of rank 1 tensors representing the dimension in each feature.", + "name": "dimension_ids", + "numberAttr": "num_features", + "type": 3 + }, + { + "description": "List of rank 1 tensors representing the nodes for which this feature\nhas a split.", + "name": "node_ids", + "numberAttr": "num_features", + "type": 3 + }, + { + "description": "List of rank 1 tensors representing the gains for each of the feature's\nsplit.", + "name": "gains", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "List of rank 1 tensors representing the thesholds for each of the\nfeature's split.", + "name": "thresholds", + "numberAttr": "num_features", + "type": 3 + }, + { + "description": "List of rank 2 tensors with left leaf contribs for each of\nthe feature's splits. Will be added to the previous node values to constitute\nthe values of the left nodes.", + "name": "left_node_contribs", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "List of rank 2 tensors with right leaf contribs for each\nof the feature's splits. Will be added to the previous node values to constitute\nthe values of the right nodes.", + "name": "right_node_contribs", + "numberAttr": "num_features", + "type": 1 + }, + { + "description": "List of rank 1 tensors representing the split type for each feature.", + "name": "split_types", + "numberAttr": "num_features", + "type": 7 + }, + { + "description": "Max depth of the tree to build.", + "name": "max_depth", + "type": 3 + }, + { + "description": "shrinkage const for each new tree.", + "name": "learning_rate", + "type": 1 + }, + { + "description": "0-No pruning, 1-Pre-pruning, 2-Post-pruning.", + "name": "pruning_mode", + "type": 3 + } + ], + "summary": "Updates the tree ensemble by adding a layer to the last tree being grown" + } + }, + { + "name": "BroadcastArgs", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the\nbroadcasted shape. `s0`, `s1` and `r0` are all integer vectors.", + "inputs": [ + { + "name": "s0", + "typeAttr": "T" + }, + { + "name": "s1", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "r0", + "typeAttr": "T" + } + ], + "summary": "Return the shape of s0 op s1 with broadcast." + } + }, + { + "name": "BroadcastGradientArgs", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "This is typically used by gradient computations for a broadcasting operation.", + "inputs": [ + { + "name": "s0", + "typeAttr": "T" + }, + { + "name": "s1", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "r0", + "typeAttr": "T" + }, + { + "name": "r1", + "typeAttr": "T" + } + ], + "summary": "Return the reduction indices for computing gradients of s0 op s1 with broadcast." + } + }, + { + "name": "BroadcastTo", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "Broadcasting is the process of making arrays to have compatible shapes\nfor arithmetic operations. Two shapes are compatible if for each\ndimension pair they are either equal or one of them is one. When trying\nto broadcast a Tensor to a shape, it starts with the trailing dimensions,\nand works its way forward.\n\nFor example,\n\n>>> x = tf.constant([1, 2, 3])\n>>> y = tf.broadcast_to(x, [3, 3])\n>>> print(y)\ntf.Tensor(\n [[1 2 3]\n [1 2 3]\n [1 2 3]], shape=(3, 3), dtype=int32)\n\nIn the above example, the input Tensor with the shape of `[1, 3]`\nis broadcasted to output Tensor with shape of `[3, 3]`.\n\nWhen doing broadcasted operations such as multiplying a tensor\nby a scalar, broadcasting (usually) confers some time or space\nbenefit, as the broadcasted tensor is never materialized.\n\nHowever, `broadcast_to` does not carry with it any such benefits.\nThe newly-created tensor takes the full memory of the broadcasted\nshape. (In a graph context, `broadcast_to` might be fused to\nsubsequent operation and then be optimized away, however.)", + "inputs": [ + { + "description": "A Tensor to broadcast.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "An 1-D `int` Tensor. The shape of the desired output.", + "name": "shape", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "A Tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Broadcast an array for a compatible shape." + } + }, + { + "name": "Bucketize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "A sorted list of floats gives the boundary of the buckets.", + "name": "boundaries", + "type": "float32[]" + } + ], + "description": "For example, if the inputs are\n boundaries = [0, 10, 100]\n input = [[-5, 10000]\n [150, 10]\n [5, 100]]\n\nthen the output will be\n output = [[0, 3]\n [3, 2]\n [1, 3]]", + "inputs": [ + { + "description": "Any shape of Tensor contains with int or float type.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same shape with 'input', each value of input replaced with bucket index.\n\n@compatibility(numpy)\nEquivalent to np.digitize.\n@end_compatibility", + "name": "output", + "type": 3 + } + ], + "summary": "Bucketizes 'input' based on 'boundaries'." + } + }, + { + "name": "BytesProducedStatsDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "tag", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Records the bytes size of each element of `input_dataset` in a StatsAggregator." + } + }, + { + "name": "CSRSparseMatrixComponents", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "type", + "type": "type" + } + ], + "description": "This op is meant only for debugging / testing, and its interface is not expected\nto be stable.", + "inputs": [ + { + "description": "A batched CSRSparseMatrix.", + "name": "csr_sparse_matrix", + "type": 21 + }, + { + "description": "The index in `csr_sparse_matrix`'s batch.", + "name": "index", + "type": 3 + } + ], + "outputs": [ + { + "description": "An array containing CSR matrix row pointers.", + "name": "row_ptrs", + "type": 3 + }, + { + "description": "An array containing CSR matrix column indices.", + "name": "col_inds", + "type": 3 + }, + { + "description": "An array containing CSR matrix nonzero values.", + "name": "values", + "typeAttr": "type" + } + ], + "summary": "Reads out the CSR components at batch `index`." + } + }, + { + "name": "CSRSparseMatrixToDense", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "type", + "type": "type" + } + ], + "inputs": [ + { + "description": "A batched CSRSparseMatrix.", + "name": "sparse_input", + "type": 21 + } + ], + "outputs": [ + { + "description": "A dense tensor.", + "name": "dense_output", + "typeAttr": "type" + } + ], + "summary": "Convert a (possibly batched) CSRSparseMatrix to dense." + } + }, + { + "name": "CSRSparseMatrixToSparseTensor", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "type", + "type": "type" + } + ], + "inputs": [ + { + "description": "A (possibly batched) CSRSparseMatrix.", + "name": "sparse_matrix", + "type": 21 + } + ], + "outputs": [ + { + "description": "SparseTensor indices.", + "name": "indices", + "type": 9 + }, + { + "description": "SparseTensor values.", + "name": "values", + "typeAttr": "type" + }, + { + "description": "SparseTensor dense shape.", + "name": "dense_shape", + "type": 9 + } + ], + "summary": "Converts a (possibly batched) CSRSparesMatrix to a SparseTensor." + } + }, + { + "name": "CSVDataset", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`, `string`.", + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "filenames", + "type": 7 + }, + { + "name": "compression_type", + "type": 7 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "header", + "type": 10 + }, + { + "name": "field_delim", + "type": 7 + }, + { + "name": "use_quote_delim", + "type": 10 + }, + { + "name": "na_value", + "type": 7 + }, + { + "name": "select_cols", + "type": 9 + }, + { + "name": "record_defaults", + "typeListAttr": "output_types" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "CTCBeamSearchDecoder", + "schema": { + "attributes": [ + { + "description": "A scalar >= 0 (beam search beam width).", + "minimum": 1, + "name": "beam_width", + "type": "int64" + }, + { + "description": "A scalar >= 0, <= beam_width (controls output size).", + "minimum": 1, + "name": "top_paths", + "type": "int64" + }, + { + "default": true, + "description": "If true, merge repeated classes in output.", + "name": "merge_repeated", + "type": "boolean" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "A note about the attribute merge_repeated: For the beam search decoder,\nthis means that if consecutive entries in a beam are the same, only\nthe first of these is emitted. That is, when the top path is \"A B B B B\",\n\"A B\" is returned if merge_repeated = True but \"A B B B B\" is\nreturned if merge_repeated = False.", + "inputs": [ + { + "description": "3-D, shape: `(max_time x batch_size x num_classes)`, the logits.", + "name": "inputs", + "typeAttr": "T" + }, + { + "description": "A vector containing sequence lengths, size `(batch)`.", + "name": "sequence_length", + "type": 3 + } + ], + "outputs": [ + { + "description": "A list (length: top_paths) of indices matrices. Matrix j,\nsize `(total_decoded_outputs[j] x 2)`, has indices of a\n`SparseTensor`. The rows store: [batch, time].", + "name": "decoded_indices", + "numberAttr": "top_paths", + "type": 9 + }, + { + "description": "A list (length: top_paths) of values vectors. Vector j,\nsize `(length total_decoded_outputs[j])`, has the values of a\n`SparseTensor`. The vector stores the decoded classes for beam j.", + "name": "decoded_values", + "numberAttr": "top_paths", + "type": 9 + }, + { + "description": "A list (length: top_paths) of shape vector. Vector j,\nsize `(2)`, stores the shape of the decoded `SparseTensor[j]`.\nIts values are: `[batch_size, max_decoded_length[j]]`.", + "name": "decoded_shape", + "numberAttr": "top_paths", + "type": 9 + }, + { + "description": "A matrix, shaped: `(batch_size x top_paths)`. The\nsequence log-probabilities.", + "name": "log_probability", + "typeAttr": "T" + } + ], + "summary": "Performs beam search decoding on the logits given in input." + } + }, + { + "name": "CTCGreedyDecoder", + "schema": { + "attributes": [ + { + "default": false, + "description": "If True, merge repeated classes in output.", + "name": "merge_repeated", + "type": "boolean" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "A note about the attribute merge_repeated: if enabled, when\nconsecutive logits' maximum indices are the same, only the first of\nthese is emitted. Labeling the blank '*', the sequence \"A B B * B B\"\nbecomes \"A B B\" if merge_repeated = True and \"A B B B B\" if\nmerge_repeated = False.\n\nRegardless of the value of merge_repeated, if the maximum index of a given\ntime and batch corresponds to the blank, index `(num_classes - 1)`, no new\nelement is emitted.", + "inputs": [ + { + "description": "3-D, shape: `(max_time x batch_size x num_classes)`, the logits.", + "name": "inputs", + "typeAttr": "T" + }, + { + "description": "A vector containing sequence lengths, size `(batch_size)`.", + "name": "sequence_length", + "type": 3 + } + ], + "outputs": [ + { + "description": "Indices matrix, size `(total_decoded_outputs x 2)`,\nof a `SparseTensor`. The rows store: [batch, time].", + "name": "decoded_indices", + "type": 9 + }, + { + "description": "Values vector, size: `(total_decoded_outputs)`,\nof a `SparseTensor`. The vector stores the decoded classes.", + "name": "decoded_values", + "type": 9 + }, + { + "description": "Shape vector, size `(2)`, of the decoded SparseTensor.\nValues are: `[batch_size, max_decoded_length]`.", + "name": "decoded_shape", + "type": 9 + }, + { + "description": "Matrix, size `(batch_size x 1)`, containing sequence\nlog-probabilities.", + "name": "log_probability", + "typeAttr": "T" + } + ], + "summary": "Performs greedy decoding on the logits given in inputs." + } + }, + { + "name": "CTCLoss", + "schema": { + "attributes": [ + { + "default": false, + "description": "Scalar, if true then repeated labels are\ncollapsed prior to the CTC calculation.", + "name": "preprocess_collapse_repeated", + "type": "boolean" + }, + { + "default": true, + "description": "Scalar. If set to false, *during* CTC calculation\nrepeated non-blank labels will not be merged and are interpreted as\nindividual labels. This is a simplified version of CTC.", + "name": "ctc_merge_repeated", + "type": "boolean" + }, + { + "default": false, + "description": "Scalar. If set to true, during CTC\ncalculation, items that have longer output sequences than input sequences\nare skipped: they don't contribute to the loss term and have zero-gradient.", + "name": "ignore_longer_outputs_than_inputs", + "type": "boolean" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "the gradient. This class performs the softmax operation for you, so inputs\nshould be e.g. linear projections of outputs by an LSTM.", + "inputs": [ + { + "description": "3-D, shape: `(max_time x batch_size x num_classes)`, the logits.", + "name": "inputs", + "typeAttr": "T" + }, + { + "description": "The indices of a `SparseTensor`.\n`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for\n`(batch b, time t)`.", + "name": "labels_indices", + "type": 9 + }, + { + "description": "The values (labels) associated with the given batch and time.", + "name": "labels_values", + "type": 3 + }, + { + "description": "A vector containing sequence lengths (batch).", + "name": "sequence_length", + "type": 3 + } + ], + "outputs": [ + { + "description": "A vector (batch) containing log-probabilities.", + "name": "loss", + "typeAttr": "T" + }, + { + "description": "The gradient of `loss`. 3-D, shape:\n`(max_time x batch_size x num_classes)`.", + "name": "gradient", + "typeAttr": "T" + } + ], + "summary": "Calculates the CTC Loss (log probability) for each batch entry. Also calculates" + } + }, + { + "name": "CTCLossV2", + "schema": { + "attributes": [ + { + "default": false, + "description": "Scalar, if true then repeated labels are\ncollapsed prior to the CTC calculation.", + "name": "preprocess_collapse_repeated", + "type": "boolean" + }, + { + "default": true, + "description": "Scalar. If set to false, *during* CTC calculation\nrepeated non-blank labels will not be merged and are interpreted as\nindividual labels. This is a simplified version of CTC.", + "name": "ctc_merge_repeated", + "type": "boolean" + }, + { + "default": false, + "description": "Scalar. If set to true, during CTC\ncalculation, items that have longer output sequences than input sequences\nare skipped: they don't contribute to the loss term and have zero-gradient.", + "name": "ignore_longer_outputs_than_inputs", + "type": "boolean" + } + ], + "description": "the gradient. This class performs the softmax operation for you, so inputs\nshould be e.g. linear projections of outputs by an LSTM.", + "inputs": [ + { + "description": "3-D, shape: `(max_time x batch_size x num_classes)`, the logits. Default blank\nlabel is 0 rather num_classes - 1.", + "name": "inputs", + "type": 1 + }, + { + "description": "The indices of a `SparseTensor`.\n`labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for\n`(batch b, time t)`.", + "name": "labels_indices", + "type": 9 + }, + { + "description": "The values (labels) associated with the given batch and time.", + "name": "labels_values", + "type": 3 + }, + { + "description": "A vector containing sequence lengths (batch).", + "name": "sequence_length", + "type": 3 + } + ], + "outputs": [ + { + "description": "A vector (batch) containing log-probabilities.", + "name": "loss", + "type": 1 + }, + { + "description": "The gradient of `loss`. 3-D, shape:\n`(max_time x batch_size x num_classes)`.", + "name": "gradient", + "type": 1 + } + ], + "summary": "Calculates the CTC Loss (log probability) for each batch entry. Also calculates" + } + }, + { + "name": "CacheDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "A CacheDataset will iterate over the input_dataset, and store tensors. If the\ncache already exists, the cache will be used. If the cache is inappropriate\n(e.g. cannot be opened, contains tensors of the wrong shape / size), an error\nwill the returned when used.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A path on the filesystem where we should cache the dataset. Note: this\nwill be a directory.", + "name": "filename", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that caches elements from `input_dataset`." + } + }, + { + "name": "CacheDatasetV2", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "filename", + "type": 7 + }, + { + "name": "cache", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "Case", + "schema": { + "attributes": [ + { + "description": "A list of input types.", + "minimum": 0, + "name": "Tin", + "type": "type[]" + }, + { + "description": "A list of output types.", + "minimum": 0, + "name": "Tout", + "type": "type[]" + }, + { + "description": " A list of functions each of which takes 'inputs' and returns a list of\n tensors, whose types are the same as what every other branch returns.", + "minimum": 1, + "name": "branches", + "type": "function[]" + }, + { + "default": [], + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": " An n-way switch statement, implementing the following:\n ```\n switch (branch_index) {\n case 0:\n output = branches[0](input);\n break;\n case 1:\n output = branches[1](input);\n break;\n ...\n case [[nbranches-1]]:\n default:\n output = branches[nbranches-1](input);\n break;\n }\n ```", + "inputs": [ + { + "description": "The branch selector, an int32 Tensor.", + "name": "branch_index", + "type": 3 + }, + { + "description": "A list of input tensors passed to the branch function.", + "name": "input", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "description": "A list of return values.", + "name": "output", + "typeListAttr": "Tout" + } + ], + "summary": "An n-way switch statement which calls a single branch function." + } + }, + { + "name": "Cast", + "schema": { + "attributes": [ + { + "name": "SrcT", + "type": "type" + }, + { + "name": "DstT", + "type": "type" + }, + { + "default": false, + "name": "Truncate", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "SrcT" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "DstT" + } + ], + "summary": "Cast x of type SrcT to y of DstT." + } + }, + { + "name": "Ceil", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Returns element-wise smallest integer not less than x." + } + }, + { + "name": "CheckNumerics", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "Prefix of the error message.", + "name": "message", + "type": "string" + } + ], + "description": "When run, reports an `InvalidArgument` error if `tensor` has any values\nthat are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.", + "inputs": [ + { + "name": "tensor", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Checks a tensor for NaN and Inf values." + } + }, + { + "name": "CheckNumericsV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "Prefix of the error message.", + "name": "message", + "type": "string" + } + ], + "description": "When run, reports an `InvalidArgument` error if `tensor` has any values\nthat are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.\nUnlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf in the\nerrors it throws.", + "inputs": [ + { + "name": "tensor", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Checks a tensor for NaN, -Inf and +Inf values." + } + }, + { + "name": "Cholesky", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices.\n\nThe input has to be symmetric and positive definite. Only the lower-triangular\npart of the input will be used for this operation. The upper-triangular part\nwill not be read.\n\nThe output is a tensor of the same shape as the input\ncontaining the Cholesky decompositions for all input submatrices `[..., :, :]`.\n\n**Note**: The gradient computation on GPU is faster for large matrices but\nnot for large batch dimensions when the submatrices are small. In this\ncase it might be faster to use the CPU.", + "inputs": [ + { + "description": "Shape is `[..., M, M]`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Shape is `[..., M, M]`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the Cholesky decomposition of one or more square matrices." + } + }, + { + "name": "CholeskyGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "For an explanation see \"Differentiation of the Cholesky algorithm\" by\nIain Murray http://arxiv.org/abs/1602.07527.", + "inputs": [ + { + "description": "Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor.", + "name": "l", + "typeAttr": "T" + }, + { + "description": "df/dl where f is some scalar function. Shape is `[..., M, M]`.\nAlgorithm depends only on lower triangular part of the innermost matrices of\nthis tensor.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Symmetrized version of df/dA . Shape is `[..., M, M]`", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the reverse mode backpropagated gradient of the Cholesky algorithm." + } + }, + { + "name": "ChooseFastestBranchDataset", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "num_elements_per_branch", + "type": "int64" + }, + { + "minimum": 1, + "name": "branches", + "type": "function[]" + }, + { + "minimum": 1, + "name": "other_arguments_lengths", + "type": "int64[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "ratio_numerator", + "type": 9 + }, + { + "name": "ratio_denominator", + "type": 9 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ChooseFastestDataset", + "schema": { + "attributes": [ + { + "minimum": 2, + "name": "N", + "type": "int64" + }, + { + "name": "num_experiments", + "type": "int64" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_datasets", + "numberAttr": "N", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ClipByValue", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "Given a tensor `t`, this operation returns a tensor of the same type and\nshape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.\nAny values less than `clip_value_min` are set to `clip_value_min`. Any values\ngreater than `clip_value_max` are set to `clip_value_max`.", + "inputs": [ + { + "description": "A `Tensor`.", + "name": "t", + "typeAttr": "T" + }, + { + "description": "A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape\nas `t`. The minimum value to clip by.", + "name": "clip_value_min", + "typeAttr": "T" + }, + { + "description": "A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape\nas `t`. The maximum value to clip by.", + "name": "clip_value_max", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A clipped `Tensor` with the same shape as input 't'.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Clips tensor values to a specified min and max." + } + }, + { + "name": "CloseSummaryWriter", + "schema": { + "inputs": [ + { + "name": "writer", + "type": 20 + } + ] + } + }, + { + "name": "CollectiveBcastRecv", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bool`, `float32`, `float16`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "name": "group_size", + "type": "int64" + }, + { + "name": "group_key", + "type": "int64" + }, + { + "name": "instance_key", + "type": "int64" + }, + { + "name": "shape", + "type": "shape" + }, + { + "default": "auto", + "name": "communication_hint", + "type": "string" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ], + "summary": "Receives a tensor value broadcast from another device." + } + }, + { + "name": "CollectiveBcastSend", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bool`, `float32`, `float16`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "name": "group_size", + "type": "int64" + }, + { + "name": "group_key", + "type": "int64" + }, + { + "name": "instance_key", + "type": "int64" + }, + { + "name": "shape", + "type": "shape" + }, + { + "default": "auto", + "name": "communication_hint", + "type": "string" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ], + "summary": "Broadcasts a tensor value to one or more other devices." + } + }, + { + "name": "CollectiveGather", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float16`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "name": "group_size", + "type": "int64" + }, + { + "name": "group_key", + "type": "int64" + }, + { + "name": "instance_key", + "type": "int64" + }, + { + "name": "shape", + "type": "shape" + }, + { + "default": "auto", + "name": "communication_hint", + "type": "string" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ], + "summary": "Mutually accumulates multiple tensors of identical type and shape." + } + }, + { + "name": "CollectivePermute", + "schema": { + "attributes": [ + { + "description": "The type of elements to be exchanged. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "Each instance supplies its own input.\n\nFor example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing\nsource_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs:\n`[D, A, B, C]`.", + "inputs": [ + { + "description": "The local input to be permuted. Currently only supports float and\nbfloat16.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "A tensor with shape [num_pairs, 2].", + "name": "source_target_pairs", + "type": 3 + } + ], + "outputs": [ + { + "description": "The permuted input.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "An Op to permute tensors across replicated TPU instances." + } + }, + { + "name": "CollectiveReduce", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float16`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "name": "group_size", + "type": "int64" + }, + { + "name": "group_key", + "type": "int64" + }, + { + "name": "instance_key", + "type": "int64" + }, + { + "description": "Must be one of the following: `Min`, `Max`, `Mul`, `Add`.", + "name": "merge_op", + "type": "string" + }, + { + "description": "Must be one of the following: `Id`, `Div`.", + "name": "final_op", + "type": "string" + }, + { + "name": "subdiv_offsets", + "type": "int64[]" + }, + { + "default": [], + "name": "wait_for", + "type": "int64[]" + }, + { + "default": "auto", + "name": "communication_hint", + "type": "string" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ], + "summary": "Mutually reduces multiple tensors of identical type and shape." + } + }, + { + "name": "CombinedNonMaxSuppression", + "schema": { + "attributes": [ + { + "default": false, + "description": "If false, the output nmsed boxes, scores and classes\nare padded/clipped to `max_total_size`. If true, the\noutput nmsed boxes, scores and classes are padded to be of length\n`max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in\nwhich case it is clipped to `max_total_size`. Defaults to false.", + "name": "pad_per_class", + "type": "boolean" + }, + { + "default": true, + "description": "If true, assume the box coordinates are between [0, 1] and clip the output boxes\nif they fall beyond [0, 1]. If false, do not do clipping and output the box\ncoordinates as it is.", + "name": "clip_boxes", + "type": "boolean" + } + ], + "description": "This operation performs non_max_suppression on the inputs per batch, across\nall classes.\nPrunes away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system. Also note that\nthis algorithm is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\nThe output of this operation is the final boxes, scores and classes tensor\nreturned after performing non_max_suppression.", + "inputs": [ + { + "description": "A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then\nsame boxes are used for all classes otherwise, if `q` is equal to number of\nclasses, class-specific boxes are used.", + "name": "boxes", + "type": 1 + }, + { + "description": "A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]`\nrepresenting a single score corresponding to each box (each row of boxes).", + "name": "scores", + "type": 1 + }, + { + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression per class", + "name": "max_output_size_per_class", + "type": 3 + }, + { + "description": "A scalar representing maximum number of boxes retained over all classes.", + "name": "max_total_size", + "type": 3 + }, + { + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU.", + "name": "iou_threshold", + "type": 1 + }, + { + "description": "A 0-D float tensor representing the threshold for deciding when to remove\nboxes based on score.", + "name": "score_threshold", + "type": 1 + } + ], + "outputs": [ + { + "description": "A [batch_size, max_detections, 4] float32 tensor\ncontaining the non-max suppressed boxes.", + "name": "nmsed_boxes", + "type": 1 + }, + { + "description": "A [batch_size, max_detections] float32 tensor\ncontaining the scores for the boxes.", + "name": "nmsed_scores", + "type": 1 + }, + { + "description": "A [batch_size, max_detections] float32 tensor\ncontaining the classes for the boxes.", + "name": "nmsed_classes", + "type": 1 + }, + { + "description": "A [batch_size] int32 tensor indicating the number of\nvalid detections per batch item. Only the top num_detections[i] entries in\nnms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the\nentries are zero paddings.", + "name": "valid_detections", + "type": 3 + } + ], + "summary": "Greedily selects a subset of bounding boxes in descending order of score," + } + }, + { + "name": "CompareAndBitpack", + "schema": { + "attributes": [ + { + "description": "The type of the input and threshold. Must be one of the following: `bool`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "Each comparison returns a boolean `true` (if `input_value > threshold`)\nor and `false` otherwise.\n\nThis operation is useful for Locality-Sensitive-Hashing (LSH) and other\nalgorithms that use hashing approximations of cosine and `L2` distances;\ncodes can be generated from an input via:\n\n```python\ncodebook_size = 50\ncodebook_bits = codebook_size * 32\ncodebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],\n dtype=x.dtype,\n initializer=tf.orthogonal_initializer())\ncodes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)\ncodes = tf.bitcast(codes, tf.int32) # go from uint8 to int32\n# now codes has shape x.shape[:-1] + [codebook_size]\n```\n\n**NOTE**: Currently, the innermost dimension of the tensor must be divisible\nby 8.\n\nGiven an `input` shaped `[s0, s1, ..., s_n]`, the output is\na `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.", + "inputs": [ + { + "description": "Values to compare against `threshold` and bitpack.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "Threshold to compare against.", + "name": "threshold", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The bitpacked comparisons.", + "name": "output", + "type": 4 + } + ], + "summary": "Compare values of `input` to `threshold` and pack resulting bits into a `uint8`." + } + }, + { + "name": "Complex", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tout", + "type": "type" + } + ], + "description": "Given a tensor `real` representing the real part of a complex number, and a\ntensor `imag` representing the imaginary part of a complex number, this\noperation returns complex numbers elementwise of the form \\\\(a + bj\\\\), where\n*a* represents the `real` part and *b* represents the `imag` part.\n\nThe input tensors `real` and `imag` must have the same shape.\n\nFor example:\n\n```\n# tensor 'real' is [2.25, 3.25]\n# tensor `imag` is [4.75, 5.75]\ntf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]\n```", + "inputs": [ + { + "name": "real", + "typeAttr": "T" + }, + { + "name": "imag", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Tout" + } + ], + "summary": "Converts two real numbers to a complex number." + } + }, + { + "name": "ComplexAbs", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "Tout", + "type": "type" + } + ], + "description": "Given a tensor `x` of complex numbers, this operation returns a tensor of type\n`float` or `double` that is the absolute value of each element in `x`. All\nelements in `x` must be complex numbers of the form \\\\(a + bj\\\\). The absolute\nvalue is computed as \\\\( \\sqrt{a^2 + b^2}\\\\).", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "Tout" + } + ], + "summary": "Computes the complex absolute value of a tensor." + } + }, + { + "name": "CompressElement", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "input_types", + "type": "type[]" + } + ], + "inputs": [ + { + "name": "components", + "typeListAttr": "input_types" + } + ], + "outputs": [ + { + "name": "compressed", + "type": 21 + } + ], + "summary": "Compresses a dataset element." + } + }, + { + "name": "ComputeAccidentalHits", + "schema": { + "attributes": [ + { + "description": "Number of true labels per context.", + "name": "num_true", + "type": "int64" + }, + { + "default": 0, + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "An second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + } + ], + "description": "When doing log-odds NCE, the result of this op should be passed through a\nSparseToDense op, then added to the logits of the sampled candidates. This has\nthe effect of 'removing' the sampled labels that match the true labels by\nmaking the classifier sure that they are sampled labels.", + "inputs": [ + { + "description": "The true_classes output of UnpackSparseLabels.", + "name": "true_classes", + "type": 9 + }, + { + "description": "The sampled_candidates output of CandidateSampler.", + "name": "sampled_candidates", + "type": 9 + } + ], + "outputs": [ + { + "description": "A vector of indices corresponding to rows of true_candidates.", + "name": "indices", + "type": 3 + }, + { + "description": "A vector of IDs of positions in sampled_candidates that match a true_label\nfor the row with the corresponding index in indices.", + "name": "ids", + "type": 9 + }, + { + "description": "A vector of the same length as indices and ids, in which each element\nis -FLOAT_MAX.", + "name": "weights", + "type": 1 + } + ], + "summary": "Computes the ids of the positions in sampled_candidates that match true_labels." + } + }, + { + "name": "Concat", + "schema": { + "attributes": [ + { + "minimum": 2, + "name": "N", + "type": "int64" + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "0-D. The dimension along which to concatenate. Must be in the\nrange [0, rank(values)).", + "name": "concat_dim", + "type": 3 + }, + { + "description": "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`.", + "name": "values", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension. This tensor's shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Concatenates tensors along one dimension." + } + }, + { + "name": "ConcatOffset", + "schema": { + "attributes": [ + { + "minimum": 2, + "name": "N", + "type": "int64" + } + ], + "description": "For example:\n\n```\n# 'x' is [2, 2, 7]\n# 'y' is [2, 3, 7]\n# 'z' is [2, 5, 7]\nconcat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]\n```\n\nThis is typically used by gradient computations for a concat operation.", + "inputs": [ + { + "description": "The dimension along which to concatenate.", + "name": "concat_dim", + "type": 3 + }, + { + "description": "The `N` int32 vectors representing shape of tensors being concatenated.", + "name": "shape", + "numberAttr": "N", + "type": 3 + } + ], + "outputs": [ + { + "description": "The `N` int32 vectors representing the starting offset\nof input tensors within the concatenated output.", + "name": "offset", + "numberAttr": "N", + "type": 3 + } + ], + "summary": "Computes offsets of concat inputs within its output." + } + }, + { + "name": "ConcatV2", + "schema": { + "attributes": [ + { + "minimum": 2, + "name": "N", + "type": "int64" + }, + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "category": "Tensor", + "inputs": [ + { + "description": "List of `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`.", + "name": "values", + "numberAttr": "N", + "typeAttr": "T" + }, + { + "description": "0-D. The dimension along which to concatenate. Must be in the\nrange [-rank(values), rank(values)).", + "name": "axis", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension. This tensor's shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Concatenates tensors along one dimension." + } + }, + { + "name": "ConcatenateDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "another_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that concatenates `input_dataset` with `another_dataset`." + } + }, + { + "name": "ConditionalAccumulator", + "schema": { + "attributes": [ + { + "description": "The type of the value being accumulated. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "The shape of the values, can be [], in which case shape is unknown.", + "name": "shape", + "type": "shape" + }, + { + "default": "", + "description": "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this accumulator will be shared under the\ngiven name across multiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "default": "MEAN", + "description": "Must be one of the following: `MEAN`, `SUM`.", + "name": "reduction_type", + "type": "string" + } + ], + "description": "The accumulator accepts gradients marked with local_step greater or\nequal to the most recent global_step known to the accumulator. The\naverage can be extracted from the accumulator, provided sufficient\ngradients have been accumulated. Extracting the average automatically\nresets the aggregate to 0, and increments the global_step recorded by\nthe accumulator.", + "outputs": [ + { + "description": "The handle to the accumulator.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "A conditional accumulator for aggregating gradients." + } + }, + { + "name": "ConfigureDistributedTPU", + "schema": { + "attributes": [ + { + "default": "", + "description": "Reserved. Do not use.", + "name": "embedding_config", + "type": "string" + }, + { + "default": "", + "description": "Serialized tensorflow.tpu.TPUEmbeddingConfiguration that\ndescribes the embedding lookups of the program.", + "name": "tpu_embedding_config", + "type": "string" + }, + { + "default": false, + "description": "Reserved. Do not use.", + "name": "is_global_init", + "type": "boolean" + }, + { + "default": false, + "name": "enable_whole_mesh_compilations", + "type": "boolean" + }, + { + "default": true, + "name": "compilation_failure_closes_chips", + "type": "boolean" + } + ], + "outputs": [ + { + "description": "A serialized tensorflow.tpu.TopologyProto that describes the TPU\ntopology.", + "name": "topology", + "type": 7 + } + ], + "summary": "Sets up the centralized structures for a distributed TPU system." + } + }, + { + "name": "ConfigureTPUEmbedding", + "schema": { + "attributes": [ + { + "description": "Serialized tensorflow.tpu.TPUEmbeddingConfiguration that\ndescribes the embedding lookups of the program.", + "name": "config", + "type": "string" + } + ], + "summary": "Sets up TPUEmbedding in a distributed TPU system." + } + }, + { + "name": "Conj", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`, `variant`.", + "name": "T", + "type": "type" + } + ], + "description": "Given a tensor `input` of complex numbers, this operation returns a tensor of\ncomplex numbers that are the complex conjugate of each element in `input`. The\ncomplex numbers in `input` must be of the form \\\\(a + bj\\\\), where *a* is the\nreal part and *b* is the imaginary part.\n\nThe complex conjugate returned by this operation is of the form \\\\(a - bj\\\\).\n\nFor example:\n\n```\n# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\ntf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]\n```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns the complex conjugate of a complex number." + } + }, + { + "name": "ConjugateTranspose", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tperm", + "type": "type" + } + ], + "description": "The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:\n `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`\n `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "perm", + "typeAttr": "Tperm" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Shuffle dimensions of x according to a permutation and conjugate the result." + } + }, + { + "name": "Const", + "schema": { + "attributes": [ + { + "description": "Attr `value` is the tensor to return.", + "name": "value", + "type": "tensor" + }, + { + "name": "dtype", + "type": "type" + } + ], + "category": "Constant", + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Returns a constant tensor." + } + }, + { + "name": "ConsumeMutexLock", + "schema": { + "description": "This op exists to consume a tensor created by `MutexLock` (other than\ndirect control dependencies). It should be the only that consumes the tensor,\nand will raise an error if it is not. Its only purpose is to keep the\nmutex lock tensor alive until it is consumed by this op.\n\n**NOTE**: This operation must run on the same device as its input. This may\nbe enforced via the `colocate_with` mechanism.", + "inputs": [ + { + "description": "A tensor returned by `MutexLock`.", + "name": "mutex_lock", + "type": 21 + } + ], + "summary": "This op consumes a lock created by `MutexLock`." + } + }, + { + "name": "ControlTrigger", + "schema": { + "description": "Only useful as a placeholder for control edges.", + "summary": "Does nothing. Serves as a control trigger for scheduling." + } + }, + { + "name": "Conv2D", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`.", + "name": "T", + "type": "type" + }, + { + "description": "1-D tensor of length 4. The stride of the sliding window for each\ndimension of `input`. The dimension order is determined by the value of\n`data_format`, see below for details.", + "name": "strides", + "type": "int64[]" + }, + { + "default": true, + "name": "use_cudnn_on_gpu", + "type": "boolean" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`.", + "name": "padding", + "type": "string" + }, + { + "default": [], + "description": "If `padding` is `\"EXPLICIT\"`, the list of explicit padding amounts. For the ith\ndimension, the amount of padding inserted before and after the dimension is\n`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If\n`padding` is not `\"EXPLICIT\"`, `explicit_paddings` must be empty.", + "name": "explicit_paddings", + "type": "int64[]" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each\nfilter element on that dimension. The dimension order is determined by the\nvalue of `data_format`, see above for details. Dilations in the batch and\ndepth dimensions must be 1.", + "name": "dilations", + "type": "int64[]" + } + ], + "category": "Layer", + "description": "Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\nand a filter / kernel tensor of shape\n`[filter_height, filter_width, in_channels, out_channels]`, this op\nperforms the following:\n\n1. Flattens the filter to a 2-D matrix with shape\n `[filter_height * filter_width * in_channels, output_channels]`.\n2. Extracts image patches from the input tensor to form a *virtual*\n tensor of shape `[batch, out_height, out_width,\n filter_height * filter_width * in_channels]`.\n3. For each patch, right-multiplies the filter matrix and the image patch\n vector.\n\nIn detail, with the default NHWC format,\n\n output[b, i, j, k] =\n sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *\n filter[di, dj, q, k]\n\nMust have `strides[0] = strides[3] = 1`. For the most common case of the same\nhorizontal and vertices strides, `strides = [1, stride, stride, 1]`.", + "inputs": [ + { + "description": "A 4-D tensor. The dimension order is interpreted according to the value\nof `data_format`, see below for details.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "A 4-D tensor of shape\n`[filter_height, filter_width, in_channels, out_channels]`", + "name": "filter", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A 4-D tensor. The dimension order is determined by the value of\n`data_format`, see below for details.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes a 2-D convolution given 4-D `input` and `filter` tensors." + } + }, + { + "name": "Conv2DBackpropFilter", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat.", + "name": "strides", + "type": "int64[]" + }, + { + "default": true, + "name": "use_cudnn_on_gpu", + "type": "boolean" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`.", + "name": "padding", + "type": "string" + }, + { + "default": [], + "description": "If `padding` is `\"EXPLICIT\"`, the list of explicit padding amounts. For the ith\ndimension, the amount of padding inserted before and after the dimension is\n`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If\n`padding` is not `\"EXPLICIT\"`, `explicit_paddings` must be empty.", + "name": "explicit_paddings", + "type": "int64[]" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "name": "dilations", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "4-D with shape `[batch, in_height, in_width, in_channels]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, out_channels]` tensor.", + "name": "filter_sizes", + "type": 3 + }, + { + "description": "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution.", + "name": "out_backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.\nthe `filter` input of the convolution.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradients of convolution with respect to the filter." + } + }, + { + "name": "Conv2DBackpropInput", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`.", + "name": "T", + "type": "type" + }, + { + "description": "The stride of the sliding window for each dimension of the input\nof the convolution. Must be in the same order as the dimension specified with\nformat.", + "name": "strides", + "type": "int64[]" + }, + { + "default": true, + "name": "use_cudnn_on_gpu", + "type": "boolean" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`.", + "name": "padding", + "type": "string" + }, + { + "default": [], + "description": "If `padding` is `\"EXPLICIT\"`, the list of explicit padding amounts. For the ith\ndimension, the amount of padding inserted before and after the dimension is\n`explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If\n`padding` is not `\"EXPLICIT\"`, `explicit_paddings` must be empty.", + "name": "explicit_paddings", + "type": "int64[]" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "name": "dilations", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "An integer vector representing the shape of `input`,\nwhere `input` is a 4-D `[batch, height, width, channels]` tensor.", + "name": "input_sizes", + "type": 3 + }, + { + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.", + "name": "filter", + "typeAttr": "T" + }, + { + "description": "4-D with shape `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution.", + "name": "out_backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient\nw.r.t. the input of the convolution.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradients of convolution with respect to the input." + } + }, + { + "name": "Conv3D", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NDHWC", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1, + 1 + ], + "description": "1-D tensor of length 5. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each\nfilter element on that dimension. The dimension order is determined by the\nvalue of `data_format`, see above for details. Dilations in the batch and\ndepth dimensions must be 1.", + "name": "dilations", + "type": "int64[]" + } + ], + "description": "In signal processing, cross-correlation is a measure of similarity of\ntwo waveforms as a function of a time-lag applied to one of them. This\nis also known as a sliding dot product or sliding inner-product.\n\nOur Conv3D implements a form of cross-correlation.", + "inputs": [ + { + "description": "Shape `[batch, in_depth, in_height, in_width, in_channels]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "Shape `[filter_depth, filter_height, filter_width, in_channels,\nout_channels]`. `in_channels` must match between `input` and `filter`.", + "name": "filter", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes a 3-D convolution given 5-D `input` and `filter` tensors." + } + }, + { + "name": "Conv3DBackpropFilter", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "Shape `[batch, depth, rows, cols, in_channels]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`.", + "name": "filter", + "typeAttr": "T" + }, + { + "description": "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`.", + "name": "out_backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradients of 3-D convolution with respect to the filter." + } + }, + { + "name": "Conv3DBackpropFilterV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NDHWC", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1, + 1 + ], + "description": "1-D tensor of length 5. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each\nfilter element on that dimension. The dimension order is determined by the\nvalue of `data_format`, see above for details. Dilations in the batch and\ndepth dimensions must be 1.", + "name": "dilations", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "Shape `[batch, depth, rows, cols, in_channels]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 5-D\n`[filter_depth, filter_height, filter_width, in_channels, out_channels]`\ntensor.", + "name": "filter_sizes", + "type": 3 + }, + { + "description": "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`.", + "name": "out_backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradients of 3-D convolution with respect to the filter." + } + }, + { + "name": "Conv3DBackpropInput", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "Shape `[batch, depth, rows, cols, in_channels]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`.", + "name": "filter", + "typeAttr": "T" + }, + { + "description": "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`.", + "name": "out_backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradients of 3-D convolution with respect to the input." + } + }, + { + "name": "Conv3DBackpropInputV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NDHWC", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1, + 1 + ], + "description": "1-D tensor of length 5. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each\nfilter element on that dimension. The dimension order is determined by the\nvalue of `data_format`, see above for details. Dilations in the batch and\ndepth dimensions must be 1.", + "name": "dilations", + "type": "int64[]" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tshape", + "type": "type" + } + ], + "inputs": [ + { + "description": "An integer vector representing the tensor shape of `input`,\nwhere `input` is a 5-D\n`[batch, depth, rows, cols, in_channels]` tensor.", + "name": "input_sizes", + "typeAttr": "Tshape" + }, + { + "description": "Shape `[depth, rows, cols, in_channels, out_channels]`.\n`in_channels` must match between `input` and `filter`.", + "name": "filter", + "typeAttr": "T" + }, + { + "description": "Backprop signal of shape `[batch, out_depth, out_rows, out_cols,\nout_channels]`.", + "name": "out_backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradients of 3-D convolution with respect to the input." + } + }, + { + "name": "Copy", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "", + "description": "The name of the input tensor.", + "name": "tensor_name", + "type": "string" + }, + { + "default": [], + "description": "A list of debug op spec (op, url, gated_grpc) for attached debug\nops. Each element of the list has the format\n;;, wherein gated_grpc is boolean represented\nas 0/1. E.g., \"DebugIdentity;grpc://foo:3333;1\",\n\"DebugIdentity;file:///tmp/tfdbg_1;0\".", + "name": "debug_ops_spec", + "type": "string[]" + } + ], + "description": "Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the\ndevice on which the tensor is allocated.\nN.B.: If the all downstream attached debug ops are disabled given the current\ngRPC gating status, the output will simply forward the input tensor without\ndeep-copying. See the documentation of Debug* ops for more details.\n\nUnlike the CopyHost Op, this op does not have HostMemory constraint on its\ninput or output.", + "inputs": [ + { + "description": "Input tensor.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Copy a tensor from CPU-to-CPU or GPU-to-GPU." + } + }, + { + "name": "CopyHost", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "", + "description": "The name of the input tensor.", + "name": "tensor_name", + "type": "string" + }, + { + "default": [], + "description": "A list of debug op spec (op, url, gated_grpc) for attached debug\nops. Each element of the list has the format\n;;, wherein gated_grpc is boolean represented\nas 0/1. E.g., \"DebugIdentity;grpc://foo:3333;1\",\n\"DebugIdentity;file:///tmp/tfdbg_1;0\".", + "name": "debug_ops_spec", + "type": "string[]" + } + ], + "description": "Performs CPU-to-CPU deep-copying of tensor.\nN.B.: If the all downstream attached debug ops are disabled given the current\ngRPC gating status, the output will simply forward the input tensor without\ndeep-copying. See the documentation of Debug* ops for more details.\n\nUnlike the Copy Op, this op has HostMemory constraint on its input or output.", + "inputs": [ + { + "description": "Input tensor.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Copy a tensor to host." + } + }, + { + "name": "Cos", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": " Given an input tensor, this function computes cosine of every\n element in the tensor. Input range is `(-inf, inf)` and\n output range is `[-1,1]`. If input lies outside the boundary, `nan`\n is returned.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\n tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan]\n ```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes cos of x element-wise." + } + }, + { + "name": "Cosh", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": " Given an input tensor, this function computes hyperbolic cosine of every\n element in the tensor. Input range is `[-inf, inf]` and output range\n is `[1, inf]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 2, 10, float(\"inf\")])\n tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf]\n ```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes hyperbolic cosine of x element-wise." + } + }, + { + "name": "CountUpTo", + "schema": { + "attributes": [ + { + "description": "If incrementing ref would bring it above limit, instead generates an\n'OutOfRange' error.", + "name": "limit", + "type": "int64" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "Should be from a scalar `Variable` node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Increments 'ref' until it reaches 'limit'." + } + }, + { + "name": "CreateSummaryDbWriter", + "schema": { + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "db_uri", + "type": 7 + }, + { + "name": "experiment_name", + "type": 7 + }, + { + "name": "run_name", + "type": 7 + }, + { + "name": "user_name", + "type": 7 + } + ] + } + }, + { + "name": "CreateSummaryFileWriter", + "schema": { + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "logdir", + "type": 7 + }, + { + "name": "max_queue", + "type": 3 + }, + { + "name": "flush_millis", + "type": 3 + }, + { + "name": "filename_suffix", + "type": 7 + } + ] + } + }, + { + "name": "CropAndResize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `uint8`, `uint16`, `int8`, `int16`, `int32`, `int64`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": "bilinear", + "description": "A string specifying the sampling method for resizing. It can be either\n`\"bilinear\"` or `\"nearest\"` and default to `\"bilinear\"`. Currently two sampling\nmethods are supported: Bilinear and Nearest Neighbor. Must be one of the following: `bilinear`, `nearest`.", + "name": "method", + "type": "string" + }, + { + "default": 0.0, + "description": "Value used for extrapolation, when applicable.", + "name": "extrapolation_value", + "type": "float32" + } + ], + "description": "Extracts crops from the input image tensor and resizes them using bilinear\nsampling or nearest neighbor sampling (possibly with aspect ratio change) to a\ncommon output size specified by `crop_size`. This is more general than the\n`crop_to_bounding_box` op which extracts a fixed size slice from the input image\nand does not allow resizing or aspect ratio change.\n\nReturns a tensor with `crops` from the input `image` at positions defined at the\nbounding box locations in `boxes`. The cropped boxes are all resized (with\nbilinear or nearest neighbor interpolation) to a fixed\n`size = [crop_height, crop_width]`. The result is a 4-D tensor\n`[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned.\nIn particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical\nresults to using `tf.image.resize_bilinear()` or\n`tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with\n`align_corners=True`.", + "inputs": [ + { + "description": "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive.", + "name": "image", + "typeAttr": "T" + }, + { + "description": "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values.", + "name": "boxes", + "type": 1 + }, + { + "description": "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to.", + "name": "box_ind", + "type": 3 + }, + { + "description": "A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All\ncropped image patches are resized to this size. The aspect ratio of the image\ncontent is not preserved. Both `crop_height` and `crop_width` need to be\npositive.", + "name": "crop_size", + "type": 3 + } + ], + "outputs": [ + { + "description": "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.", + "name": "crops", + "type": 1 + } + ], + "summary": "Extracts crops from the input image tensor and resizes them." + } + }, + { + "name": "CropAndResizeGradBoxes", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `uint8`, `uint16`, `int8`, `int16`, `int32`, `int64`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": "bilinear", + "description": "A string specifying the interpolation method. Only 'bilinear' is\nsupported for now. Must be one of the following: `bilinear`.", + "name": "method", + "type": "string" + } + ], + "inputs": [ + { + "description": "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.", + "name": "grads", + "type": 1 + }, + { + "description": "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\nBoth `image_height` and `image_width` need to be positive.", + "name": "image", + "typeAttr": "T" + }, + { + "description": "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values.", + "name": "boxes", + "type": 1 + }, + { + "description": "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to.", + "name": "box_ind", + "type": 3 + } + ], + "outputs": [ + { + "description": "A 2-D tensor of shape `[num_boxes, 4]`.", + "name": "output", + "type": 1 + } + ], + "summary": "Computes the gradient of the crop_and_resize op wrt the input boxes tensor." + } + }, + { + "name": "CropAndResizeGradImage", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float16`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": "bilinear", + "description": "A string specifying the interpolation method. Only 'bilinear' is\nsupported for now. Must be one of the following: `bilinear`, `nearest`.", + "name": "method", + "type": "string" + } + ], + "inputs": [ + { + "description": "A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.", + "name": "grads", + "type": 1 + }, + { + "description": "A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\nspecifies the coordinates of a box in the `box_ind[i]` image and is specified\nin normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of\n`y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the\n`[0, 1]` interval of normalized image height is mapped to\n`[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in\nwhich case the sampled crop is an up-down flipped version of the original\nimage. The width dimension is treated similarly. Normalized coordinates\noutside the `[0, 1]` range are allowed, in which case we use\n`extrapolation_value` to extrapolate the input image values.", + "name": "boxes", + "type": 1 + }, + { + "description": "A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\nThe value of `box_ind[i]` specifies the image that the `i`-th box refers to.", + "name": "box_ind", + "type": 3 + }, + { + "description": "A 1-D tensor with value `[batch, image_height, image_width, depth]`\ncontaining the original image size. Both `image_height` and `image_width` need\nto be positive.", + "name": "image_size", + "type": 3 + } + ], + "outputs": [ + { + "description": "A 4-D tensor of shape `[batch, image_height, image_width, depth]`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient of the crop_and_resize op wrt the input image tensor." + } + }, + { + "name": "Cross", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "`a` and `b` must be the same shape; they can either be simple 3-element vectors,\nor any shape where the innermost dimension is 3. In the latter case, each pair\nof corresponding 3-element vectors is cross-multiplied independently.", + "inputs": [ + { + "description": "A tensor containing 3-element vectors.", + "name": "a", + "typeAttr": "T" + }, + { + "description": "Another tensor, of same type and shape as `a`.", + "name": "b", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Pairwise cross product of the vectors in `a` and `b`.", + "name": "product", + "typeAttr": "T" + } + ], + "summary": "Compute the pairwise cross product." + } + }, + { + "name": "CrossReplicaSum", + "schema": { + "attributes": [ + { + "description": "The type of elements to be summed. Must be one of the following: `bfloat16`, `float32`, `int32`, `uint32`.", + "name": "T", + "type": "type" + } + ], + "description": "Each instance supplies its own input.\n\nFor example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`.\nPassing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0,\nand `B, D, F, H` as group 1. Thus we get the outputs:\n`[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`.", + "inputs": [ + { + "description": "The local input to the sum.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "An int32 tensor with shape\n[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the\nreplica ids in the ith subgroup.", + "name": "group_assignment", + "type": 3 + } + ], + "outputs": [ + { + "description": "The sum of all the distributed inputs.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "An Op to sum inputs across replicated TPU instances." + } + }, + { + "name": "CudnnRNN", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": "lstm", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "name": "rnn_mode", + "type": "string" + }, + { + "default": "linear_input", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "name": "input_mode", + "type": "string" + }, + { + "default": "unidirectional", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "name": "direction", + "type": "string" + }, + { + "default": 0.0, + "name": "dropout", + "type": "float32" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + }, + { + "default": true, + "name": "is_training", + "type": "boolean" + } + ], + "description": "Computes the RNN from the input and initial states, with respect to the params\nbuffer.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: A 3-D tensor with the shape of [seq_length, batch_size, input_size].\ninput_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,\n num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\noutput: A 3-D tensor with the shape of [seq_length, batch_size,\n dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\nis_training: Indicates whether this operation is used for inference or\n training.\nreserve_space: An opaque tensor that can be used in backprop calculation. It\n is only produced if is_training is false.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + } + ], + "summary": "A RNN backed by cuDNN." + } + }, + { + "name": "CudnnRNNBackprop", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": "lstm", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "name": "rnn_mode", + "type": "string" + }, + { + "default": "linear_input", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "name": "input_mode", + "type": "string" + }, + { + "default": "unidirectional", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "name": "direction", + "type": "string" + }, + { + "default": 0.0, + "name": "dropout", + "type": "float32" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + } + ], + "description": "Compute the backprop of both data and weights in a RNN.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: A 3-D tensor with the shape of [seq_length, batch_size, input_size].\ninput_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,\n num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\noutput: A 3-D tensor with the shape of [seq_length, batch_size,\n dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\noutput_backprop: A 3-D tensor with the same shape as output in the forward pass.\noutput_h_backprop: A 3-D tensor with the same shape as output_h in the forward\n pass.\noutput_c_backprop: A 3-D tensor with the same shape as output_c in the forward\n pass.\nreserve_space: The same reserve_space produced in for forward operation.\ninput_backprop: The backprop to input in the forward pass. Has the same shape\n as input.\ninput_h_backprop: The backprop to input_h in the forward pass. Has the same\n shape as input_h.\ninput_c_backprop: The backprop to input_c in the forward pass. Has the same\n shape as input_c.\nparams_backprop: The backprop to the params buffer in the forward pass. Has the\n same shape as params.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + }, + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "output_backprop", + "typeAttr": "T" + }, + { + "name": "output_h_backprop", + "typeAttr": "T" + }, + { + "name": "output_c_backprop", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "input_backprop", + "typeAttr": "T" + }, + { + "name": "input_h_backprop", + "typeAttr": "T" + }, + { + "name": "input_c_backprop", + "typeAttr": "T" + }, + { + "name": "params_backprop", + "typeAttr": "T" + } + ], + "summary": "Backprop step of CudnnRNN." + } + }, + { + "name": "CudnnRNNBackpropV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": "lstm", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "name": "rnn_mode", + "type": "string" + }, + { + "default": "linear_input", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "name": "input_mode", + "type": "string" + }, + { + "default": "unidirectional", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "name": "direction", + "type": "string" + }, + { + "default": 0.0, + "name": "dropout", + "type": "float32" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + } + ], + "description": "Compute the backprop of both data and weights in a RNN. Takes an extra\n \"host_reserved\" inupt than CudnnRNNBackprop, which is used to determine RNN\n cudnnRNNAlgo_t and cudnnMathType_t.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicates whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: A 3-D tensor with the shape of [seq_length, batch_size, input_size].\ninput_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,\n num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\noutput: A 3-D tensor with the shape of [seq_length, batch_size,\n dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\noutput_backprop: A 3-D tensor with the same shape as output in the forward pass.\noutput_h_backprop: A 3-D tensor with the same shape as output_h in the forward\n pass.\noutput_c_backprop: A 3-D tensor with the same shape as output_c in the forward\n pass.\nreserve_space: The same reserve_space produced in the forward operation.\nhost_reserved: The same host_reserved produced in the forward operation.\ninput_backprop: The backprop to input in the forward pass. Has the same shape\n as input.\ninput_h_backprop: The backprop to input_h in the forward pass. Has the same\n shape as input_h.\ninput_c_backprop: The backprop to input_c in the forward pass. Has the same\n shape as input_c.\nparams_backprop: The backprop to the params buffer in the forward pass. Has the\n same shape as params.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + }, + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "output_backprop", + "typeAttr": "T" + }, + { + "name": "output_h_backprop", + "typeAttr": "T" + }, + { + "name": "output_c_backprop", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + }, + { + "name": "host_reserved", + "type": 6 + } + ], + "outputs": [ + { + "name": "input_backprop", + "typeAttr": "T" + }, + { + "name": "input_h_backprop", + "typeAttr": "T" + }, + { + "name": "input_c_backprop", + "typeAttr": "T" + }, + { + "name": "params_backprop", + "typeAttr": "T" + } + ], + "summary": "Backprop step of CudnnRNN." + } + }, + { + "name": "CudnnRNNBackpropV3", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": "lstm", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "name": "rnn_mode", + "type": "string" + }, + { + "default": "linear_input", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "name": "input_mode", + "type": "string" + }, + { + "default": "unidirectional", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "name": "direction", + "type": "string" + }, + { + "default": 0.0, + "name": "dropout", + "type": "float32" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + }, + { + "default": 0, + "name": "num_proj", + "type": "int64" + }, + { + "default": true, + "name": "time_major", + "type": "boolean" + } + ], + "description": "Compute the backprop of both data and weights in a RNN. Takes an extra\n \"sequence_lengths\" input than CudnnRNNBackprop.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicates whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: If time_major is true, this is a 3-D tensor with the shape of\n [seq_length, batch_size, input_size]. If time_major is false, the shape is\n [batch_size, seq_length, input_size].\ninput_h: If time_major is true, this is a 3-D tensor with the shape of\n [num_layer * dir, batch_size, num_units]. If time_major is false, the shape\n is [batch_size, num_layer * dir, num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\nsequence_lengths: a vector of lengths of each input sequence.\noutput: If time_major is true, this is a 3-D tensor with the shape of\n [seq_length, batch_size, dir * num_units]. If time_major is false, the\n shape is [batch_size, seq_length, dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\noutput_backprop: A 3-D tensor with the same shape as output in the forward pass.\noutput_h_backprop: A 3-D tensor with the same shape as output_h in the forward\n pass.\noutput_c_backprop: A 3-D tensor with the same shape as output_c in the forward\n pass.\ntime_major: Indicates whether the input/output format is time major or batch\n major.\nreserve_space: The same reserve_space produced in the forward operation.\ninput_backprop: The backprop to input in the forward pass. Has the same shape\n as input.\ninput_h_backprop: The backprop to input_h in the forward pass. Has the same\n shape as input_h.\ninput_c_backprop: The backprop to input_c in the forward pass. Has the same\n shape as input_c.\nparams_backprop: The backprop to the params buffer in the forward pass. Has the\n same shape as params.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + }, + { + "name": "sequence_lengths", + "type": 3 + }, + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "output_backprop", + "typeAttr": "T" + }, + { + "name": "output_h_backprop", + "typeAttr": "T" + }, + { + "name": "output_c_backprop", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + }, + { + "name": "host_reserved", + "type": 6 + } + ], + "outputs": [ + { + "name": "input_backprop", + "typeAttr": "T" + }, + { + "name": "input_h_backprop", + "typeAttr": "T" + }, + { + "name": "input_c_backprop", + "typeAttr": "T" + }, + { + "name": "params_backprop", + "typeAttr": "T" + } + ], + "summary": "Backprop step of CudnnRNNV3." + } + }, + { + "name": "CudnnRNNCanonicalToParams", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "minimum": 1, + "name": "num_params", + "type": "int64" + }, + { + "default": "lstm", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "name": "rnn_mode", + "type": "string" + }, + { + "default": "linear_input", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "name": "input_mode", + "type": "string" + }, + { + "default": "unidirectional", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "name": "direction", + "type": "string" + }, + { + "default": 0.0, + "name": "dropout", + "type": "float32" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + } + ], + "description": "Writes a set of weights into the opaque params buffer so they can be used in\nupcoming training or inferences.\n\nNote that the params buffer may not be compatible across different GPUs. So any\nsave and restoration should be converted to and from the canonical weights and\nbiases.\n\nnum_layers: Specifies the number of layers in the RNN model.\nnum_units: Specifies the size of the hidden state.\ninput_size: Specifies the size of the input state.\nweights: the canonical form of weights that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nbiases: the canonical form of biases that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nnum_params: number of parameter sets for all layers.\n Each layer may contain multiple parameter sets, with each set consisting of\n a weight matrix and a bias vector.\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n The actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used.\n dir = (direction == bidirectional) ? 2 : 1\ndropout: dropout probability. When set to 0., dropout is disabled.\nseed: the 1st part of a seed to initialize dropout.\nseed2: the 2nd part of a seed to initialize dropout.", + "inputs": [ + { + "name": "num_layers", + "type": 3 + }, + { + "name": "num_units", + "type": 3 + }, + { + "name": "input_size", + "type": 3 + }, + { + "name": "weights", + "numberAttr": "num_params", + "typeAttr": "T" + }, + { + "name": "biases", + "numberAttr": "num_params", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "params", + "typeAttr": "T" + } + ], + "summary": "Converts CudnnRNN params from canonical form to usable form." + } + }, + { + "name": "CudnnRNNCanonicalToParamsV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "minimum": 1, + "name": "num_params_weights", + "type": "int64" + }, + { + "minimum": 1, + "name": "num_params_biases", + "type": "int64" + }, + { + "default": "lstm", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "name": "rnn_mode", + "type": "string" + }, + { + "default": "linear_input", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "name": "input_mode", + "type": "string" + }, + { + "default": "unidirectional", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "name": "direction", + "type": "string" + }, + { + "default": 0.0, + "name": "dropout", + "type": "float32" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + }, + { + "default": 0, + "name": "num_proj", + "type": "int64" + } + ], + "description": "Writes a set of weights into the opaque params buffer so they can be used in\nupcoming training or inferences.\n\nNote that the params buffer may not be compatible across different GPUs. So any\nsave and restoration should be converted to and from the canonical weights and\nbiases.\n\nnum_layers: Specifies the number of layers in the RNN model.\nnum_units: Specifies the size of the hidden state.\ninput_size: Specifies the size of the input state.\nweights: the canonical form of weights that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nbiases: the canonical form of biases that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nnum_params_weights: number of weight parameter matrix for all layers.\nnum_params_biases: number of bias parameter vector for all layers.\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n The actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used.\n dir = (direction == bidirectional) ? 2 : 1\ndropout: dropout probability. When set to 0., dropout is disabled.\nseed: the 1st part of a seed to initialize dropout.\nseed2: the 2nd part of a seed to initialize dropout.\nnum_proj: The output dimensionality for the projection matrices. If None or 0,\n no projection is performed.", + "inputs": [ + { + "name": "num_layers", + "type": 3 + }, + { + "name": "num_units", + "type": 3 + }, + { + "name": "input_size", + "type": 3 + }, + { + "name": "weights", + "numberAttr": "num_params_weights", + "typeAttr": "T" + }, + { + "name": "biases", + "numberAttr": "num_params_biases", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "params", + "typeAttr": "T" + } + ], + "summary": "Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM." + } + }, + { + "name": "CudnnRNNParamsSize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "S", + "type": "type" + }, + { + "default": "lstm", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "name": "rnn_mode", + "type": "string" + }, + { + "default": "linear_input", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "name": "input_mode", + "type": "string" + }, + { + "default": "unidirectional", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "name": "direction", + "type": "string" + }, + { + "default": 0.0, + "name": "dropout", + "type": "float32" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + }, + { + "default": 0, + "name": "num_proj", + "type": "int64" + } + ], + "description": "Return the params size that can be used by the Cudnn RNN model. Subsequent\nweight allocation and initialization should use this size.\n\nnum_layers: Specifies the number of layers in the RNN model.\nnum_units: Specifies the size of the hidden state.\ninput_size: Specifies the size of the input state.\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n The actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used.\n dir = (direction == bidirectional) ? 2 : 1\ndropout: dropout probability. When set to 0., dropout is disabled.\nseed: the 1st part of a seed to initialize dropout.\nseed2: the 2nd part of a seed to initialize dropout.\nparams_size: The size of the params buffer that should be allocated and\n initialized for this RNN model. Note that this params buffer may not be\n compatible across GPUs. Please use CudnnRNNParamsWeights and\n CudnnRNNParamsBiases to save and restore them in a way that is compatible\n across different runs.", + "inputs": [ + { + "name": "num_layers", + "type": 3 + }, + { + "name": "num_units", + "type": 3 + }, + { + "name": "input_size", + "type": 3 + } + ], + "outputs": [ + { + "name": "params_size", + "typeAttr": "S" + } + ], + "summary": "Computes size of weights that can be used by a Cudnn RNN model." + } + }, + { + "name": "CudnnRNNParamsToCanonical", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "minimum": 1, + "name": "num_params", + "type": "int64" + }, + { + "default": "lstm", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "name": "rnn_mode", + "type": "string" + }, + { + "default": "linear_input", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "name": "input_mode", + "type": "string" + }, + { + "default": "unidirectional", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "name": "direction", + "type": "string" + }, + { + "default": 0.0, + "name": "dropout", + "type": "float32" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + } + ], + "description": "Retrieves a set of weights from the opaque params buffer that can be saved and\nrestored in a way compatible with future runs.\n\nNote that the params buffer may not be compatible across different GPUs. So any\nsave and restoration should be converted to and from the canonical weights and\nbiases.\n\nnum_layers: Specifies the number of layers in the RNN model.\nnum_units: Specifies the size of the hidden state.\ninput_size: Specifies the size of the input state.\nnum_params: number of parameter sets for all layers.\n Each layer may contain multiple parameter sets, with each set consisting of\n a weight matrix and a bias vector.\nweights: the canonical form of weights that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nbiases: the canonical form of biases that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n The actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used.\n dir = (direction == bidirectional) ? 2 : 1\ndropout: dropout probability. When set to 0., dropout is disabled.\nseed: the 1st part of a seed to initialize dropout.\nseed2: the 2nd part of a seed to initialize dropout.", + "inputs": [ + { + "name": "num_layers", + "type": 3 + }, + { + "name": "num_units", + "type": 3 + }, + { + "name": "input_size", + "type": 3 + }, + { + "name": "params", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "weights", + "numberAttr": "num_params", + "typeAttr": "T" + }, + { + "name": "biases", + "numberAttr": "num_params", + "typeAttr": "T" + } + ], + "summary": "Retrieves CudnnRNN params in canonical form." + } + }, + { + "name": "CudnnRNNParamsToCanonicalV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "minimum": 1, + "name": "num_params_weights", + "type": "int64" + }, + { + "minimum": 1, + "name": "num_params_biases", + "type": "int64" + }, + { + "default": "lstm", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "name": "rnn_mode", + "type": "string" + }, + { + "default": "linear_input", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "name": "input_mode", + "type": "string" + }, + { + "default": "unidirectional", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "name": "direction", + "type": "string" + }, + { + "default": 0.0, + "name": "dropout", + "type": "float32" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + }, + { + "default": 0, + "name": "num_proj", + "type": "int64" + } + ], + "description": "Retrieves a set of weights from the opaque params buffer that can be saved and\nrestored in a way compatible with future runs.\n\nNote that the params buffer may not be compatible across different GPUs. So any\nsave and restoration should be converted to and from the canonical weights and\nbiases.\n\nnum_layers: Specifies the number of layers in the RNN model.\nnum_units: Specifies the size of the hidden state.\ninput_size: Specifies the size of the input state.\nnum_params_weights: number of weight parameter matrix for all layers.\nnum_params_biases: number of bias parameter vector for all layers.\nweights: the canonical form of weights that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nbiases: the canonical form of biases that can be used for saving\n and restoration. They are more likely to be compatible across different\n generations.\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicate whether there is a linear projection between the input and\n The actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used.\n dir = (direction == bidirectional) ? 2 : 1\ndropout: dropout probability. When set to 0., dropout is disabled.\nseed: the 1st part of a seed to initialize dropout.\nseed2: the 2nd part of a seed to initialize dropout.\nnum_proj: The output dimensionality for the projection matrices. If None or 0,\n no projection is performed.", + "inputs": [ + { + "name": "num_layers", + "type": 3 + }, + { + "name": "num_units", + "type": 3 + }, + { + "name": "input_size", + "type": 3 + }, + { + "name": "params", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "weights", + "numberAttr": "num_params_weights", + "typeAttr": "T" + }, + { + "name": "biases", + "numberAttr": "num_params_biases", + "typeAttr": "T" + } + ], + "summary": "Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM." + } + }, + { + "name": "CudnnRNNV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": "lstm", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "name": "rnn_mode", + "type": "string" + }, + { + "default": "linear_input", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "name": "input_mode", + "type": "string" + }, + { + "default": "unidirectional", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "name": "direction", + "type": "string" + }, + { + "default": 0.0, + "name": "dropout", + "type": "float32" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + }, + { + "default": true, + "name": "is_training", + "type": "boolean" + } + ], + "description": "Computes the RNN from the input and initial states, with respect to the params\nbuffer. Produces one extra output \"host_reserved\" than CudnnRNN.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicates whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: A 3-D tensor with the shape of [seq_length, batch_size, input_size].\ninput_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,\n num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\noutput: A 3-D tensor with the shape of [seq_length, batch_size,\n dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\nis_training: Indicates whether this operation is used for inference or\n training.\nreserve_space: An opaque tensor that can be used in backprop calculation. It\n is only produced if is_training is true.\nhost_reserved: An opaque tensor that can be used in backprop calculation. It is\n only produced if is_training is true. It is output on host memory rather than\n device memory.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + }, + { + "name": "host_reserved", + "type": 6 + } + ], + "summary": "A RNN backed by cuDNN." + } + }, + { + "name": "CudnnRNNV3", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": "lstm", + "description": "Must be one of the following: `rnn_relu`, `rnn_tanh`, `lstm`, `gru`.", + "name": "rnn_mode", + "type": "string" + }, + { + "default": "linear_input", + "description": "Must be one of the following: `linear_input`, `skip_input`, `auto_select`.", + "name": "input_mode", + "type": "string" + }, + { + "default": "unidirectional", + "description": "Must be one of the following: `unidirectional`, `bidirectional`.", + "name": "direction", + "type": "string" + }, + { + "default": 0.0, + "name": "dropout", + "type": "float32" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + }, + { + "default": 0, + "name": "num_proj", + "type": "int64" + }, + { + "default": true, + "name": "is_training", + "type": "boolean" + }, + { + "default": true, + "name": "time_major", + "type": "boolean" + } + ], + "description": "Computes the RNN from the input and initial states, with respect to the params\nbuffer. Accepts one extra input \"sequence_lengths\" than CudnnRNN.\n\nrnn_mode: Indicates the type of the RNN model.\ninput_mode: Indicates whether there is a linear projection between the input and\n the actual computation before the first layer. 'skip_input' is only allowed\n when input_size == num_units; 'auto_select' implies 'skip_input' when\n input_size == num_units; otherwise, it implies 'linear_input'.\ndirection: Indicates whether a bidirectional model will be used. Should be\n \"unidirectional\" or \"bidirectional\".\ndropout: Dropout probability. When set to 0., dropout is disabled.\nseed: The 1st part of a seed to initialize dropout.\nseed2: The 2nd part of a seed to initialize dropout.\ninput: If time_major is true, this is a 3-D tensor with the shape of\n [seq_length, batch_size, input_size]. If time_major is false, the shape is\n [batch_size, seq_length, input_size].\ninput_h: If time_major is true, this is a 3-D tensor with the shape of\n [num_layer * dir, batch_size, num_units]. If time_major is false, the shape\n is [batch_size, num_layer * dir, num_units].\ninput_c: For LSTM, a 3-D tensor with the shape of\n [num_layer * dir, batch, num_units]. For other models, it is ignored.\nparams: A 1-D tensor that contains the weights and biases in an opaque layout.\n The size must be created through CudnnRNNParamsSize, and initialized\n separately. Note that they might not be compatible across different\n generations. So it is a good idea to save and restore\nsequence_lengths: a vector of lengths of each input sequence.\noutput: If time_major is true, this is a 3-D tensor with the shape of\n [seq_length, batch_size, dir * num_units]. If time_major is false, the\n shape is [batch_size, seq_length, dir * num_units].\noutput_h: The same shape has input_h.\noutput_c: The same shape as input_c for LSTM. An empty tensor for other models.\nis_training: Indicates whether this operation is used for inference or\n training.\ntime_major: Indicates whether the input/output format is time major or batch\n major.\nreserve_space: An opaque tensor that can be used in backprop calculation. It\n is only produced if is_training is true.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_h", + "typeAttr": "T" + }, + { + "name": "input_c", + "typeAttr": "T" + }, + { + "name": "params", + "typeAttr": "T" + }, + { + "name": "sequence_lengths", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "name": "output_h", + "typeAttr": "T" + }, + { + "name": "output_c", + "typeAttr": "T" + }, + { + "name": "reserve_space", + "typeAttr": "T" + }, + { + "name": "host_reserved", + "type": 6 + } + ], + "summary": "A RNN backed by cuDNN." + } + }, + { + "name": "Cumprod", + "schema": { + "attributes": [ + { + "default": false, + "description": "If `True`, perform exclusive cumprod.", + "name": "exclusive", + "type": "boolean" + }, + { + "default": false, + "description": "A `bool` (default: False).", + "name": "reverse", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "By default, this op performs an inclusive cumprod, which means that the first\nelement of the input is identical to the first element of the output:\n\n```python\ntf.cumprod([a, b, c]) # => [a, a * b, a * b * c]\n```\n\nBy setting the `exclusive` kwarg to `True`, an exclusive cumprod is\nperformed instead:\n\n```python\ntf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]\n```\n\nBy setting the `reverse` kwarg to `True`, the cumprod is performed in the\nopposite direction:\n\n```python\ntf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]\n```\n\nThis is more efficient than using separate `tf.reverse` ops.\n\nThe `reverse` and `exclusive` kwargs can also be combined:\n\n```python\ntf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]\n```", + "inputs": [ + { + "description": "A `Tensor`. Must be one of the following types: `float32`, `float64`,\n`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n`complex128`, `qint8`, `quint8`, `qint32`, `half`.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A `Tensor` of type `int32` (default: 0). Must be in the range\n`[-rank(x), rank(x))`.", + "name": "axis", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Compute the cumulative product of the tensor `x` along `axis`." + } + }, + { + "name": "Cumsum", + "schema": { + "attributes": [ + { + "default": false, + "description": "If `True`, perform exclusive cumsum.", + "name": "exclusive", + "type": "boolean" + }, + { + "default": false, + "description": "A `bool` (default: False).", + "name": "reverse", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "By default, this op performs an inclusive cumsum, which means that the first\nelement of the input is identical to the first element of the output:\n\n```python\ntf.cumsum([a, b, c]) # => [a, a + b, a + b + c]\n```\n\nBy setting the `exclusive` kwarg to `True`, an exclusive cumsum is\nperformed instead:\n\n```python\ntf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]\n```\n\nBy setting the `reverse` kwarg to `True`, the cumsum is performed in the\nopposite direction:\n\n```python\ntf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]\n```\n\nThis is more efficient than using separate `tf.reverse` ops.\n\nThe `reverse` and `exclusive` kwargs can also be combined:\n\n```python\ntf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]\n```", + "inputs": [ + { + "description": "A `Tensor`. Must be one of the following types: `float32`, `float64`,\n`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,\n`complex128`, `qint8`, `quint8`, `qint32`, `half`.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A `Tensor` of type `int32` (default: 0). Must be in the range\n`[-rank(x), rank(x))`.", + "name": "axis", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Compute the cumulative sum of the tensor `x` along `axis`." + } + }, + { + "name": "CumulativeLogsumexp", + "schema": { + "attributes": [ + { + "default": false, + "description": "If `True`, perform exclusive cumulative log-sum-exp.", + "name": "exclusive", + "type": "boolean" + }, + { + "default": false, + "description": "A `bool` (default: False).", + "name": "reverse", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "By default, this op performs an inclusive cumulative log-sum-exp,\nwhich means that the first\nelement of the input is identical to the first element of the output:\n```python\ntf.math.cumulative_logsumexp([a, b, c]) # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))]\n```\n\nBy setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is\nperformed instead:\n```python\ntf.cumulative_logsumexp([a, b, c], exclusive=True) # => [-inf, a, log(exp(a) * exp(b))]\n```\nNote that the neutral element of the log-sum-exp operation is `-inf`,\nhowever, for performance reasons, the minimal value representable by the\nfloating point type is used instead.\n\nBy setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the\nopposite direction.", + "inputs": [ + { + "description": "A `Tensor`. Must be one of the following types: `float16`, `float32`, `float64`.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A `Tensor` of type `int32` (default: 0). Must be in the range\n`[-rank(x), rank(x))`.", + "name": "axis", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Compute the cumulative product of the tensor `x` along `axis`." + } + }, + { + "name": "DataFormatDimMap", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": "NHWC", + "description": "source data format.", + "name": "src_format", + "type": "string" + }, + { + "default": "NCHW", + "description": "destination data format.", + "name": "dst_format", + "type": "string" + } + ], + "description": "the source data format.", + "inputs": [ + { + "description": "A Tensor with each element as a dimension index in source data format.\nMust be in the range [-4, 4).", + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A Tensor with each element as a dimension index in destination data format.", + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Returns the dimension index in the destination data format given the one in" + } + }, + { + "name": "DataFormatVecPermute", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": "NHWC", + "description": "source data format.", + "name": "src_format", + "type": "string" + }, + { + "default": "NCHW", + "description": "destination data format.", + "name": "dst_format", + "type": "string" + } + ], + "description": "one in the source data format.", + "inputs": [ + { + "description": "Vector of size 4 or Tensor of shape (4, 2) in source data format.", + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Vector of size 4 or Tensor of shape (4, 2) in destination data format.", + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Returns the permuted vector/tensor in the destination data format given the" + } + }, + { + "name": "DataServiceDataset", + "schema": { + "attributes": [ + { + "default": -1, + "name": "task_refresh_interval_hint_ms", + "type": "int64" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "dataset_id", + "type": 9 + }, + { + "name": "processing_mode", + "type": 7 + }, + { + "name": "address", + "type": 7 + }, + { + "name": "protocol", + "type": 7 + }, + { + "name": "job_name", + "type": 7 + }, + { + "name": "max_outstanding_requests", + "type": 9 + }, + { + "name": "iteration_counter", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that reads data from the tf.data service." + } + }, + { + "name": "DatasetCardinality", + "schema": { + "description": "Returns the cardinality of `input_dataset`.", + "inputs": [ + { + "description": "A variant tensor representing the dataset to return cardinality for.", + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "description": "The cardinality of `input_dataset`. Named constants are used to represent\ninfinite and unknown cardinality.", + "name": "cardinality", + "type": 9 + } + ], + "summary": "Returns the cardinality of `input_dataset`." + } + }, + { + "name": "DatasetFromGraph", + "schema": { + "description": "Creates a dataset from the provided `graph_def`.", + "inputs": [ + { + "description": "The graph representation of the dataset (as serialized GraphDef).", + "name": "graph_def", + "type": 7 + } + ], + "outputs": [ + { + "description": "A variant tensor representing the dataset.", + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset from the given `graph_def`." + } + }, + { + "name": "DatasetToGraph", + "schema": { + "attributes": [ + { + "default": [], + "minimum": 0, + "name": "stateful_whitelist", + "type": "string[]" + }, + { + "default": false, + "name": "allow_stateful", + "type": "boolean" + }, + { + "default": false, + "name": "strip_device_assignment", + "type": "boolean" + } + ], + "description": "Returns a graph representation for `input_dataset`.", + "inputs": [ + { + "description": "A variant tensor representing the dataset to return the graph representation for.", + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "description": "The graph representation of the dataset (as serialized GraphDef).", + "name": "graph", + "type": 7 + } + ], + "summary": "Returns a serialized GraphDef representing `input_dataset`." + } + }, + { + "name": "DatasetToGraphV2", + "schema": { + "attributes": [ + { + "default": 0, + "name": "external_state_policy", + "type": "int64" + }, + { + "default": false, + "name": "strip_device_assignment", + "type": "boolean" + } + ], + "description": "Returns a graph representation for `input_dataset`.", + "inputs": [ + { + "description": "A variant tensor representing the dataset to return the graph representation for.", + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "description": "The graph representation of the dataset (as serialized GraphDef).", + "name": "graph", + "type": 7 + } + ], + "summary": "Returns a serialized GraphDef representing `input_dataset`." + } + }, + { + "name": "DatasetToSingleElement", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A handle to a dataset that contains a single element.", + "name": "dataset", + "type": 21 + } + ], + "outputs": [ + { + "description": "The components of the single element of `input`.", + "name": "components", + "typeListAttr": "output_types" + } + ], + "summary": "Outputs the single element from the given dataset." + } + }, + { + "name": "DatasetToTFRecord", + "schema": { + "inputs": [ + { + "description": "A variant tensor representing the dataset to write.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar string tensor representing the filename to use.", + "name": "filename", + "type": 7 + }, + { + "description": "A scalar string tensor containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\".", + "name": "compression_type", + "type": 7 + } + ], + "summary": "Writes the given dataset to the given file using the TFRecord format." + } + }, + { + "name": "Dawsn", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + } + }, + { + "name": "DebugGradientIdentity", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "This op is hidden from public in Python. It is used by TensorFlow Debugger to\nregister gradient tensors for gradient debugging.\nThis op operates on non-reference-type tensors.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Identity op for gradient debugging." + } + }, + { + "name": "DebugGradientRefIdentity", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "This op is hidden from public in Python. It is used by TensorFlow Debugger to\nregister gradient tensors for gradient debugging.\nThis op operates on reference-type tensors.", + "inputs": [ + { + "isRef": true, + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "isRef": true, + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Identity op for gradient debugging." + } + }, + { + "name": "DebugIdentity", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "", + "description": "Name of the device on which the tensor resides.", + "name": "device_name", + "type": "string" + }, + { + "default": "", + "description": "Name of the input tensor.", + "name": "tensor_name", + "type": "string" + }, + { + "default": [], + "description": "List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011", + "name": "debug_urls", + "type": "string[]" + }, + { + "default": false, + "description": "Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.", + "name": "gated_grpc", + "type": "boolean" + } + ], + "description": "Provides an identity mapping of the non-Ref type input tensor for debugging.", + "inputs": [ + { + "description": "Input tensor, non-Reference type", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Provides an identity mapping of the non-Ref type input tensor for debugging." + } + }, + { + "name": "DebugIdentityV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "", + "description": "A tfdbg-generated ID for the context that the op belongs to,\n e.g., a concrete compiled tf.function.", + "name": "tfdbg_context_id", + "type": "string" + }, + { + "default": "", + "description": "Optional. Name of the op that the debug op is concerned with.\n Used only for single-tensor trace.", + "name": "op_name", + "type": "string" + }, + { + "default": -1, + "description": "Optional. Output slot index of the tensor that the debug op\n is concerned with. Used only for single-tensor trace.", + "name": "output_slot", + "type": "int64" + }, + { + "default": -1, + "description": "TensorDebugMode enum value. See debug_event.proto for details.", + "name": "tensor_debug_mode", + "type": "int64" + }, + { + "default": [], + "description": "List of URLs to debug targets, e.g., file:///foo/tfdbg_dump.", + "name": "debug_urls", + "type": "string[]" + } + ], + "description": "Provides an identity mapping from input to output, while writing the content of\nthe input tensor by calling DebugEventsWriter.\n\nThe semantics of the input tensor depends on tensor_debug_mode. In typical\nusage, the input tensor comes directly from the user computation only when\ngraph_debug_mode is FULL_TENSOR (see protobuf/debug_event.proto for a\nlist of all the possible values of graph_debug_mode). For the other debug modes,\nthe input tensor should be produced by an additional op or subgraph that\ncomputes summary information about one or more tensors.", + "inputs": [ + { + "description": "Input tensor, non-Reference type", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Debug Identity V2 Op." + } + }, + { + "name": "DebugNanCount", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "", + "name": "device_name", + "type": "string" + }, + { + "default": "", + "description": "Name of the input tensor.", + "name": "tensor_name", + "type": "string" + }, + { + "default": [], + "description": "List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011.", + "name": "debug_urls", + "type": "string[]" + }, + { + "default": false, + "description": " Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.", + "name": "gated_grpc", + "type": "boolean" + } + ], + "description": "Counts number of NaNs in the input tensor, for debugging.", + "inputs": [ + { + "description": "Input tensor, non-Reference type.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": 9 + } + ], + "summary": "Debug NaN Value Counter Op." + } + }, + { + "name": "DebugNumericSummary", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "", + "name": "device_name", + "type": "string" + }, + { + "default": "", + "description": "Name of the input tensor.", + "name": "tensor_name", + "type": "string" + }, + { + "default": [], + "description": "List of URLs to debug targets, e.g.,\n file:///foo/tfdbg_dump, grpc:://localhost:11011.", + "name": "debug_urls", + "type": "string[]" + }, + { + "default": "-NaN", + "description": "(float) The lower bound <= which values will be included in the\n generalized -inf count. Default: -inf.", + "name": "lower_bound", + "type": "float32" + }, + { + "default": "NaN", + "description": "(float) The upper bound >= which values will be included in the\n generalized +inf count. Default: +inf.", + "name": "upper_bound", + "type": "float32" + }, + { + "default": false, + "description": "(bool) Do not send data to the debug URLs unless at least one\n of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and\n inf counts) is non-zero.", + "name": "mute_if_healthy", + "type": "boolean" + }, + { + "default": false, + "description": "Whether this op will be gated. If any of the debug_urls of this\n debug node is of the grpc:// scheme, when the value of this attribute is set\n to True, the data will not actually be sent via the grpc stream unless this\n debug op has been enabled at the debug_url. If all of the debug_urls of this\n debug node are of the grpc:// scheme and the debug op is enabled at none of\n them, the output will be an empty Tensor.", + "name": "gated_grpc", + "type": "boolean" + } + ], + "description": "Provide a basic summary of numeric value types, range and distribution.\n\noutput: A double tensor of shape [14 + nDimensions], where nDimensions is the\n number of dimensions of the tensor's shape. The elements of output are:\n [0]: is initialized (1.0) or not (0.0).\n [1]: total number of elements\n [2]: NaN element count\n [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by\n default.\n [4]: negative element count (excluding -inf), if lower_bound is the default\n -inf. Otherwise, this is the count of elements > lower_bound and < 0.\n [5]: zero element count\n [6]: positive element count (excluding +inf), if upper_bound is the default\n +inf. Otherwise, this is the count of elements < upper_bound and > 0.\n [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by\n default.\nOutput elements [1:8] are all zero, if the tensor is uninitialized.\n [8]: minimum of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: +inf.\n [9]: maximum of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: -inf.\n [10]: mean of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: NaN.\n [11]: variance of all non-inf and non-NaN elements.\n If uninitialized or no such element exists: NaN.\n [12]: Data type of the tensor encoded as an enum integer. See the DataType\n proto for more details.\n [13]: Number of dimensions of the tensor (ndims).\n [14+]: Sizes of the dimensions.\n", + "inputs": [ + { + "description": "Input tensor, non-Reference type.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": 2 + } + ], + "summary": "Debug Numeric Summary Op." + } + }, + { + "name": "DebugNumericSummaryV2", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Optional. The type of the output. Can be float32 or float64 (default: float32). Must be one of the following: `float32`, `float64`.", + "name": "output_dtype", + "type": "type" + }, + { + "name": "T", + "type": "type" + }, + { + "default": -1, + "description": "Tensor debug mode: the mode in which the input tensor is summarized\n by the op. See the TensorDebugMode enum in\n tensorflow/core/protobuf/debug_event.proto for details.\n\nSupported values:\n 2 (CURT_HEALTH): Output a float32/64 tensor of shape [2]. The 1st\n element is the tensor_id, if provided, and -1 otherwise. The 2nd\n element is a bit which is set to 1 if the input tensor has an\n infinity or nan value, or zero otherwise.\n\n 3 (CONCISE_HEALTH): Output a float32/64 tensor of shape [5]. The 1st\n element is the tensor_id, if provided, and -1 otherwise. The\n remaining four slots are the total number of elements, -infs,\n +infs, and nans in the input tensor respectively.\n\n 4 (FULL_HEALTH): Output a float32/64 tensor of shape [11]. The 1st\n element is the tensor_id, if provided, and -1 otherwise. The 2nd\n element is the device_id, if provided, and -1 otherwise. The 3rd\n element holds the datatype value of the input tensor as according\n to the enumerated type in tensorflow/core/framework/types.proto.\n The remaining elements hold the total number of elements, -infs,\n +infs, nans, negative finite numbers, zeros, and positive finite\n numbers in the input tensor respectively.\n\n 5 (SHAPE): Output a float32/64 tensor of shape [10]. The 1st\n element is the tensor_id, if provided, and -1 otherwise. The 2nd\n element holds the datatype value of the input tensor as according\n to the enumerated type in tensorflow/core/framework/types.proto.\n The 3rd element holds the rank of the tensor. The 4th element holds\n the number of elements within the tensor. Finally the remaining 6\n elements hold the shape of the tensor. If the rank of the tensor\n is lower than 6, the shape is right padded with zeros. If the rank\n is greater than 6, the head of the shape is truncated.\n\n 6 (FULL_NUMERICS): Output a float32/64 tensor of shape [22]. The 1st\n element is the tensor_id, if provided, and -1 otherwise. The 2nd\n element is the device_id, if provided, and -1 otherwise. The 3rd\n element holds the datatype value of the input tensor as according\n to the enumerated type in tensorflow/core/framework/types.proto.\n The 4th element holds the rank of the tensor. The 5th to 11th\n elements hold the shape of the tensor. If the rank of the tensor\n is lower than 6, the shape is right padded with zeros. If the rank\n is greater than 6, the head of the shape is truncated. The 12th to\n 18th elements hold the number of elements, -infs, +infs, nans,\n denormal floats, negative finite numbers, zeros, and positive\n finite numbers in the input tensor respectively. The final four\n elements hold the min value, max value, mean, and variance of the\n input tensor.\n\n 8 (REDUCE_INF_NAN_THREE_SLOTS): Output a float32/64 tensor of shape\n [3]. The 1st element is -inf if any elements of the input tensor\n is -inf, or zero otherwise. The 2nd element is +inf if any elements\n of the input tensor is +inf, or zero otherwise. The 3rd element is\n nan if any element of the input tensor is nan, or zero otherwise.", + "name": "tensor_debug_mode", + "type": "int64" + }, + { + "default": -1, + "description": "Optional. An integer identifier for the tensor being summarized by this op.", + "name": "tensor_id", + "type": "int64" + } + ], + "description": "Computes a numeric summary of the input tensor. The shape of the output\ndepends on the tensor_debug_mode attribute.\nThis op is used internally by TensorFlow Debugger (tfdbg) v2.", + "inputs": [ + { + "description": "Input tensor, to be summarized by the op.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "output_dtype" + } + ], + "summary": "Debug Numeric Summary V2 Op." + } + }, + { + "name": "DecodeAndCropJpeg", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Number of color channels for the decoded image.", + "name": "channels", + "type": "int64" + }, + { + "default": 1, + "description": "Downscaling ratio.", + "name": "ratio", + "type": "int64" + }, + { + "default": true, + "description": "If true use a slower but nicer upscaling of the\nchroma planes (yuv420/422 only).", + "name": "fancy_upscaling", + "type": "boolean" + }, + { + "default": false, + "description": "If true try to recover an image from truncated input.", + "name": "try_recover_truncated", + "type": "boolean" + }, + { + "default": 1.0, + "description": "The minimum required fraction of lines before a truncated\ninput is accepted.", + "name": "acceptable_fraction", + "type": "float32" + }, + { + "default": "", + "description": "string specifying a hint about the algorithm used for\ndecompression. Defaults to \"\" which maps to a system-specific\ndefault. Currently valid values are [\"INTEGER_FAST\",\n\"INTEGER_ACCURATE\"]. The hint may be ignored (e.g., the internal\njpeg library changes to a version that does not have that specific\noption.)", + "name": "dct_method", + "type": "string" + } + ], + "description": "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the JPEG-encoded image.\n* 1: output a grayscale image.\n* 3: output an RGB image.\n\nIf needed, the JPEG-encoded image is transformed to match the requested number\nof color channels.\n\nThe attr `ratio` allows downscaling the image by an integer factor during\ndecoding. Allowed values are: 1, 2, 4, and 8. This is much faster than\ndownscaling the image later.\n\n\nIt is equivalent to a combination of decode and crop, but much faster by only\ndecoding partial jpeg image.", + "inputs": [ + { + "description": "0-D. The JPEG-encoded image.", + "name": "contents", + "type": 7 + }, + { + "description": "1-D. The crop window: [crop_y, crop_x, crop_height, crop_width].", + "name": "crop_window", + "type": 3 + } + ], + "outputs": [ + { + "description": "3-D with shape `[height, width, channels]`..", + "name": "image", + "type": 4 + } + ], + "summary": "Decode and Crop a JPEG-encoded image to a uint8 tensor." + } + }, + { + "name": "DecodeBase64", + "schema": { + "description": "Input may or may not have padding at the end. See EncodeBase64 for padding.\nWeb-safe means that input must use - and _ instead of + and /.", + "inputs": [ + { + "description": "Base64 strings to decode.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "Decoded strings.", + "name": "output", + "type": 7 + } + ], + "summary": "Decode web-safe base64-encoded strings." + } + }, + { + "name": "DecodeBmp", + "schema": { + "attributes": [ + { + "default": 0, + "name": "channels", + "type": "int64" + } + ], + "description": "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the BMP-encoded image.\n* 3: output an RGB image.\n* 4: output an RGBA image.", + "inputs": [ + { + "description": "0-D. The BMP-encoded image.", + "name": "contents", + "type": 7 + } + ], + "outputs": [ + { + "description": "3-D with shape `[height, width, channels]`. RGB order", + "name": "image", + "type": 4 + } + ], + "summary": "Decode the first frame of a BMP-encoded image to a uint8 tensor." + } + }, + { + "name": "DecodeCSV", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`, `string`.", + "minimum": 1, + "name": "OUT_TYPE", + "type": "type[]" + }, + { + "default": ",", + "description": "char delimiter to separate fields in a record.", + "name": "field_delim", + "type": "string" + }, + { + "default": true, + "description": "If false, treats double quotation marks as regular\ncharacters inside of the string fields (ignoring RFC 4180, Section 2,\nBullet 5).", + "name": "use_quote_delim", + "type": "boolean" + }, + { + "default": "", + "description": "Additional string to recognize as NA/NaN.", + "name": "na_value", + "type": "string" + }, + { + "default": [], + "name": "select_cols", + "type": "int64[]" + } + ], + "description": "RFC 4180 format is expected for the CSV records.\n(https://tools.ietf.org/html/rfc4180)\nNote that we allow leading and trailing spaces with int or float field.", + "inputs": [ + { + "description": "Each string is a record/row in the csv and all records should have\nthe same format.", + "name": "records", + "type": 7 + }, + { + "description": "One tensor per column of the input record, with either a\nscalar default value for that column or an empty vector if the column is\nrequired.", + "name": "record_defaults", + "typeListAttr": "OUT_TYPE" + } + ], + "outputs": [ + { + "description": "Each tensor will have the same shape as records.", + "name": "output", + "typeListAttr": "OUT_TYPE" + } + ], + "summary": "Convert CSV records to tensors. Each column maps to one tensor." + } + }, + { + "name": "DecodeCompressed", + "schema": { + "attributes": [ + { + "default": "", + "description": "A scalar containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\".", + "name": "compression_type", + "type": "string" + } + ], + "description": "This op decompresses each element of the `bytes` input `Tensor`, which\nis assumed to be compressed using the given `compression_type`.\n\nThe `output` is a string `Tensor` of the same shape as `bytes`,\neach element containing the decompressed data from the corresponding\nelement in `bytes`.", + "inputs": [ + { + "description": "A Tensor of string which is compressed.", + "name": "bytes", + "type": 7 + } + ], + "outputs": [ + { + "description": "A Tensor with the same shape as input `bytes`, uncompressed\nfrom bytes.", + "name": "output", + "type": 7 + } + ], + "summary": "Decompress strings." + } + }, + { + "name": "DecodeGif", + "schema": { + "description": "GIF images with frame or transparency compression are not supported.\nOn Linux and MacOS systems, convert animated GIFs from compressed to\nuncompressed by running:\n\n convert $src.gif -coalesce $dst.gif\n\nThis op also supports decoding JPEGs and PNGs, though it is cleaner to use\n`tf.io.decode_image`.", + "inputs": [ + { + "description": "0-D. The GIF-encoded image.", + "name": "contents", + "type": 7 + } + ], + "outputs": [ + { + "description": "4-D with shape `[num_frames, height, width, 3]`. RGB channel order.", + "name": "image", + "type": 4 + } + ], + "summary": "Decode the frame(s) of a GIF-encoded image to a uint8 tensor." + } + }, + { + "name": "DecodeJSONExample", + "schema": { + "description": "This op translates a tensor containing Example records, encoded using\nthe [standard JSON\nmapping](https://developers.google.com/protocol-buffers/docs/proto3#json),\ninto a tensor containing the same records encoded as binary protocol\nbuffers. The resulting tensor can then be fed to any of the other\nExample-parsing ops.", + "inputs": [ + { + "description": "Each string is a JSON object serialized according to the JSON\nmapping of the Example proto.", + "name": "json_examples", + "type": 7 + } + ], + "outputs": [ + { + "description": "Each string is a binary Example protocol buffer corresponding\nto the respective element of `json_examples`.", + "name": "binary_examples", + "type": 7 + } + ], + "summary": "Convert JSON-encoded Example records to binary protocol buffer strings." + } + }, + { + "name": "DecodeJpeg", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Number of color channels for the decoded image.", + "name": "channels", + "type": "int64" + }, + { + "default": 1, + "description": "Downscaling ratio.", + "name": "ratio", + "type": "int64" + }, + { + "default": true, + "description": "If true use a slower but nicer upscaling of the\nchroma planes (yuv420/422 only).", + "name": "fancy_upscaling", + "type": "boolean" + }, + { + "default": false, + "description": "If true try to recover an image from truncated input.", + "name": "try_recover_truncated", + "type": "boolean" + }, + { + "default": 1.0, + "description": "The minimum required fraction of lines before a truncated\ninput is accepted.", + "name": "acceptable_fraction", + "type": "float32" + }, + { + "default": "", + "description": "string specifying a hint about the algorithm used for\ndecompression. Defaults to \"\" which maps to a system-specific\ndefault. Currently valid values are [\"INTEGER_FAST\",\n\"INTEGER_ACCURATE\"]. The hint may be ignored (e.g., the internal\njpeg library changes to a version that does not have that specific\noption.)", + "name": "dct_method", + "type": "string" + } + ], + "description": "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the JPEG-encoded image.\n* 1: output a grayscale image.\n* 3: output an RGB image.\n\nIf needed, the JPEG-encoded image is transformed to match the requested number\nof color channels.\n\nThe attr `ratio` allows downscaling the image by an integer factor during\ndecoding. Allowed values are: 1, 2, 4, and 8. This is much faster than\ndownscaling the image later.\n\n\nThis op also supports decoding PNGs and non-animated GIFs since the interface is\nthe same, though it is cleaner to use `tf.io.decode_image`.", + "inputs": [ + { + "description": "0-D. The JPEG-encoded image.", + "name": "contents", + "type": 7 + } + ], + "outputs": [ + { + "description": "3-D with shape `[height, width, channels]`..", + "name": "image", + "type": 4 + } + ], + "summary": "Decode a JPEG-encoded image to a uint8 tensor." + } + }, + { + "name": "DecodePaddedRaw", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`.", + "name": "out_type", + "type": "type" + }, + { + "default": true, + "description": "Whether the input `input_bytes` is in little-endian order. Ignored for\n`out_type` values that are stored in a single byte, like `uint8`", + "name": "little_endian", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Tensor of string to be decoded.", + "name": "input_bytes", + "type": 7 + }, + { + "description": "Length in bytes for each element of the decoded output. Must be a multiple\nof the size of the output type.", + "name": "fixed_length", + "type": 3 + } + ], + "outputs": [ + { + "description": "A Tensor with one more dimension than the input `bytes`. The added dimension\nwill have size equal to the length of the elements of `bytes` divided by the\nnumber of bytes to represent `out_type`.", + "name": "output", + "typeAttr": "out_type" + } + ], + "summary": "Reinterpret the bytes of a string as a vector of numbers." + } + }, + { + "name": "DecodePng", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Number of color channels for the decoded image.", + "name": "channels", + "type": "int64" + }, + { + "default": { + "type": "type", + "value": 4 + }, + "description": "Must be one of the following: `uint8`, `uint16`.", + "name": "dtype", + "type": "type" + } + ], + "description": "The attr `channels` indicates the desired number of color channels for the\ndecoded image.\n\nAccepted values are:\n\n* 0: Use the number of channels in the PNG-encoded image.\n* 1: output a grayscale image.\n* 3: output an RGB image.\n* 4: output an RGBA image.\n\nIf needed, the PNG-encoded image is transformed to match the requested number\nof color channels.\n\nThis op also supports decoding JPEGs and non-animated GIFs since the interface\nis the same, though it is cleaner to use `tf.io.decode_image`.", + "inputs": [ + { + "description": "0-D. The PNG-encoded image.", + "name": "contents", + "type": 7 + } + ], + "outputs": [ + { + "description": "3-D with shape `[height, width, channels]`.", + "name": "image", + "typeAttr": "dtype" + } + ], + "summary": "Decode a PNG-encoded image to a uint8 or uint16 tensor." + } + }, + { + "name": "DecodeProtoV2", + "schema": { + "attributes": [ + { + "description": "Name of the proto message type to decode.", + "name": "message_type", + "type": "string" + }, + { + "description": "List of strings containing proto field names. An extension field can be decoded\nby using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME.", + "name": "field_names", + "type": "string[]" + }, + { + "description": "List of TF types to use for the respective field in field_names.", + "minimum": 0, + "name": "output_types", + "type": "type[]" + }, + { + "default": "local://", + "description": "Either the special value `local://` or a path to a file containing\na serialized `FileDescriptorSet`.", + "name": "descriptor_source", + "type": "string" + }, + { + "default": "binary", + "description": "Either `binary` or `text`.", + "name": "message_format", + "type": "string" + }, + { + "default": false, + "description": "Whether to sanitize the result or not.", + "name": "sanitize", + "type": "boolean" + } + ], + "description": "The `decode_proto` op extracts fields from a serialized protocol buffers\nmessage into tensors. The fields in `field_names` are decoded and converted\nto the corresponding `output_types` if possible.\n\nA `message_type` name must be provided to give context for the field names.\nThe actual message descriptor can be looked up either in the linked-in\ndescriptor pool or a filename provided by the caller using the\n`descriptor_source` attribute.\n\nEach output tensor is a dense tensor. This means that it is padded to hold\nthe largest number of repeated elements seen in the input minibatch. (The\nshape is also padded by one to prevent zero-sized dimensions). The actual\nrepeat counts for each example in the minibatch can be found in the `sizes`\noutput. In many cases the output of `decode_proto` is fed immediately into\ntf.squeeze if missing values are not a concern. When using tf.squeeze, always\npass the squeeze dimension explicitly to avoid surprises.\n\nFor the most part, the mapping between Proto field types and TensorFlow dtypes\nis straightforward. However, there are a few special cases:\n\n- A proto field that contains a submessage or group can only be converted\nto `DT_STRING` (the serialized submessage). This is to reduce the complexity\nof the API. The resulting string can be used as input to another instance of\nthe decode_proto op.\n\n- TensorFlow lacks support for unsigned integers. The ops represent uint64\ntypes as a `DT_INT64` with the same twos-complement bit pattern (the obvious\nway). Unsigned int32 values can be represented exactly by specifying type\n`DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in\nthe `output_types` attribute.\n\nBoth binary and text proto serializations are supported, and can be\nchosen using the `format` attribute.\n\nThe `descriptor_source` attribute selects the source of protocol\ndescriptors to consult when looking up `message_type`. This may be:\n\n- An empty string or \"local://\", in which case protocol descriptors are\ncreated for C++ (not Python) proto definitions linked to the binary.\n\n- A file, in which case protocol descriptors are created from the file,\nwhich is expected to contain a `FileDescriptorSet` serialized as a string.\nNOTE: You can build a `descriptor_source` file using the `--descriptor_set_out`\nand `--include_imports` options to the protocol compiler `protoc`.\n\n- A \"bytes://\", in which protocol descriptors are created from ``,\nwhich is expected to be a `FileDescriptorSet` serialized as a string.", + "inputs": [ + { + "description": "Tensor of serialized protos with shape `batch_shape`.", + "name": "bytes", + "type": 7 + } + ], + "outputs": [ + { + "description": "Tensor of int32 with shape `[batch_shape, len(field_names)]`.\nEach entry is the number of values found for the corresponding field.\nOptional fields may have 0 or 1 values.", + "name": "sizes", + "type": 3 + }, + { + "description": "List of tensors containing values for the corresponding field.\n`values[i]` has datatype `output_types[i]`\nand shape `[batch_shape, max(sizes[...,i])]`.", + "name": "values", + "typeListAttr": "output_types" + } + ], + "summary": "The op extracts fields from a serialized protocol buffers message into tensors." + } + }, + { + "name": "DecodeRaw", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`, `complex64`, `complex128`, `bool`.", + "name": "out_type", + "type": "type" + }, + { + "default": true, + "description": "Whether the input `bytes` are in little-endian order.\nIgnored for `out_type` values that are stored in a single byte like\n`uint8`.", + "name": "little_endian", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "All the elements must have the same length.", + "name": "bytes", + "type": 7 + } + ], + "outputs": [ + { + "description": "A Tensor with one more dimension than the input `bytes`. The\nadded dimension will have size equal to the length of the elements\nof `bytes` divided by the number of bytes to represent `out_type`.", + "name": "output", + "typeAttr": "out_type" + } + ], + "summary": "Reinterpret the bytes of a string as a vector of numbers." + } + }, + { + "name": "DecodeWav", + "schema": { + "attributes": [ + { + "default": -1, + "description": "Number of sample channels wanted.", + "name": "desired_channels", + "type": "int64" + }, + { + "default": -1, + "description": "Length of audio requested.", + "name": "desired_samples", + "type": "int64" + } + ], + "description": "The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.\n\nWhen desired_channels is set, if the input contains fewer channels than this\nthen the last channel will be duplicated to give the requested number, else if\nthe input has more channels than requested then the additional channels will be\nignored.\n\nIf desired_samples is set, then the audio will be cropped or padded with zeroes\nto the requested length.\n\nThe first output contains a Tensor with the content of the audio samples. The\nlowest dimension will be the number of channels, and the second will be the\nnumber of samples. For example, a ten-sample-long stereo WAV file should give an\noutput shape of [10, 2].", + "inputs": [ + { + "description": "The WAV-encoded audio, usually from a file.", + "name": "contents", + "type": 7 + } + ], + "outputs": [ + { + "description": "2-D with shape `[length, channels]`.", + "name": "audio", + "type": 1 + }, + { + "description": "Scalar holding the sample rate found in the WAV header.", + "name": "sample_rate", + "type": 3 + } + ], + "summary": "Decode a 16-bit PCM WAV file to a float tensor." + } + }, + { + "name": "DeepCopy", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The source tensor of type `T`.", + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": " y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y`\n is not an alias of `x`.", + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Makes a copy of `x`." + } + }, + { + "name": "DeleteIterator", + "schema": { + "inputs": [ + { + "description": "A handle to the iterator to delete.", + "name": "handle", + "type": 20 + }, + { + "description": "A variant deleter.", + "name": "deleter", + "type": 21 + } + ], + "summary": "A container for an iterator resource." + } + }, + { + "name": "DeleteMemoryCache", + "schema": { + "inputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + } + }, + { + "name": "DeleteMultiDeviceIterator", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "N", + "type": "int64" + } + ], + "inputs": [ + { + "description": "A handle to the multi device iterator to delete.", + "name": "multi_device_iterator", + "type": 20 + }, + { + "description": "A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted.", + "name": "iterators", + "numberAttr": "N", + "type": 20 + }, + { + "description": "A variant deleter.", + "name": "deleter", + "type": 21 + } + ], + "summary": "A container for an iterator resource." + } + }, + { + "name": "DeleteRandomSeedGenerator", + "schema": { + "inputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + } + }, + { + "name": "DeleteSeedGenerator", + "schema": { + "inputs": [ + { + "name": "handle", + "type": 20 + }, + { + "name": "deleter", + "type": 21 + } + ] + } + }, + { + "name": "DeleteSessionTensor", + "schema": { + "inputs": [ + { + "description": "The handle for a tensor stored in the session state.", + "name": "handle", + "type": 7 + } + ], + "summary": "Delete the tensor specified by its handle in the session." + } + }, + { + "name": "DenseBincount", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "bool; Whether the kernel should count the appearance or number of occurrences.", + "name": "binary_output", + "type": "boolean" + } + ], + "description": "Outputs a vector with length `size` and the same dtype as `weights`. If\n`weights` are empty, then index `i` stores the number of times the value `i` is\ncounted in `arr`. If `weights` are non-empty, then index `i` stores the sum of\nthe value in `weights` at each index where the corresponding value in `arr` is\n`i`.\n\nValues in `arr` outside of the range [0, size) are ignored.", + "inputs": [ + { + "description": "1D or 2D int `Tensor`.", + "name": "input", + "typeAttr": "Tidx" + }, + { + "description": "non-negative int scalar `Tensor`.", + "name": "size", + "typeAttr": "Tidx" + }, + { + "description": "is an int32, int64, float32, or float64 `Tensor` with the same\nshape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights\nequal to 1.", + "name": "weights", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].\nThe counts or summed weights for each value in the range [0, size).", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Counts the number of occurrences of each value in an integer array." + } + }, + { + "name": "DenseCountSparseOutput", + "schema": { + "attributes": [ + { + "description": "Dtype of the input values tensor. Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": -1, + "description": "Minimum value to count. Can be set to -1 for no minimum.", + "minimum": -1, + "name": "minlength", + "type": "int64" + }, + { + "default": -1, + "description": "Maximum value to count. Can be set to -1 for no maximum.", + "minimum": -1, + "name": "maxlength", + "type": "int64" + }, + { + "description": "Whether to output the number of occurrences of each value or 1.", + "name": "binary_output", + "type": "boolean" + }, + { + "description": "Dtype of the output values tensor. Must be one of the following: `int32`, `int64`, `float32`, `float64`.", + "name": "output_type", + "type": "type" + } + ], + "description": " Counts the number of times each value occurs in the input.", + "inputs": [ + { + "description": "Tensor containing data to count.", + "name": "values", + "typeAttr": "T" + }, + { + "description": "A Tensor of the same shape as indices containing per-index weight values. May\nalso be the empty tensor if no weights are used.", + "name": "weights", + "typeAttr": "output_type" + } + ], + "outputs": [ + { + "description": "Indices tensor for the resulting sparse tensor object.", + "name": "output_indices", + "type": 9 + }, + { + "description": "Values tensor for the resulting sparse tensor object.", + "name": "output_values", + "typeAttr": "output_type" + }, + { + "description": "Shape tensor for the resulting sparse tensor object.", + "name": "output_dense_shape", + "type": 9 + } + ], + "summary": "Performs sparse-output bin counting for a tf.tensor input." + } + }, + { + "name": "DenseToCSRSparseMatrix", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "A Dense tensor.", + "name": "dense_input", + "typeAttr": "T" + }, + { + "description": "Indices of nonzero elements.", + "name": "indices", + "type": 9 + } + ], + "outputs": [ + { + "description": "A (possibly batched) CSRSparseMatrix.", + "name": "sparse_output", + "type": 21 + } + ], + "summary": "Converts a dense tensor to a (possibly batched) CSRSparseMatrix." + } + }, + { + "name": "DenseToDenseSetOperation", + "schema": { + "attributes": [ + { + "name": "set_operation", + "type": "string" + }, + { + "default": true, + "name": "validate_indices", + "type": "boolean" + }, + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.", + "name": "T", + "type": "type" + } + ], + "description": "See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\nOutput `result` is a `SparseTensor` represented by `result_indices`,\n`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\nhas rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\ndimension contains the result of `set_operation` applied to the corresponding\n`[0...n-1]` dimension of `set`.", + "inputs": [ + { + "description": "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\nDimension `n` contains values in a set, duplicates are allowed but ignored.", + "name": "set1", + "typeAttr": "T" + }, + { + "description": "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.\nDimension `n` contains values in a set, duplicates are allowed but ignored.", + "name": "set2", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "2D indices of a `SparseTensor`.", + "name": "result_indices", + "type": 9 + }, + { + "description": "1D values of a `SparseTensor`.", + "name": "result_values", + "typeAttr": "T" + }, + { + "description": "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions.", + "name": "result_shape", + "type": 9 + } + ], + "summary": "Applies set operation along last dimension of 2 `Tensor` inputs." + } + }, + { + "name": "DenseToSparseBatchDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A handle to an input dataset. Must have a single component.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of elements to accumulate in a\nbatch.", + "name": "batch_size", + "type": 9 + }, + { + "description": "A vector representing the dense shape of each row in the produced\nSparseTensor. The shape may be partially specified, using `-1` to indicate\nthat a particular dimension should use the maximum size of all batch elements.", + "name": "row_shape", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that batches input elements into a SparseTensor." + } + }, + { + "name": "DenseToSparseSetOperation", + "schema": { + "attributes": [ + { + "name": "set_operation", + "type": "string" + }, + { + "default": true, + "name": "validate_indices", + "type": "boolean" + }, + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.", + "name": "T", + "type": "type" + } + ], + "description": "See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\nInput `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,\nand `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same\nas `set1`. Dimension `n` contains values in a set, duplicates are allowed but\nignored.\n\nIf `validate_indices` is `True`, this op validates the order and range of `set2`\nindices.\n\nOutput `result` is a `SparseTensor` represented by `result_indices`,\n`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\nhas rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\ndimension contains the result of `set_operation` applied to the corresponding\n`[0...n-1]` dimension of `set`.", + "inputs": [ + { + "description": "`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.\nDimension `n` contains values in a set, duplicates are allowed but ignored.", + "name": "set1", + "typeAttr": "T" + }, + { + "description": "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder.", + "name": "set2_indices", + "type": 9 + }, + { + "description": "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder.", + "name": "set2_values", + "typeAttr": "T" + }, + { + "description": "1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\nbe the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the\nmax set size across `n-1` dimensions.", + "name": "set2_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "2D indices of a `SparseTensor`.", + "name": "result_indices", + "type": 9 + }, + { + "description": "1D values of a `SparseTensor`.", + "name": "result_values", + "typeAttr": "T" + }, + { + "description": "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions.", + "name": "result_shape", + "type": 9 + } + ], + "summary": "Applies set operation along last dimension of `Tensor` and `SparseTensor`." + } + }, + { + "name": "DepthToSpace", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "The size of the spatial block, same as in Space2Depth.", + "minimum": 2, + "name": "block_size", + "type": "int64" + }, + { + "default": "NHWC", + "description": "Must be one of the following: `NHWC`, `NCHW`, `NCHW_VECT_C`.", + "name": "data_format", + "type": "string" + } + ], + "description": "Rearranges data from depth into blocks of spatial data.\nThis is the reverse transformation of SpaceToDepth. More specifically,\nthis op outputs a copy of the input tensor where values from the `depth`\ndimension are moved in spatial blocks to the `height` and `width` dimensions.\nThe attr `block_size` indicates the input block size and how the data is moved.\n\n * Chunks of data of size `block_size * block_size` from depth are rearranged\n into non-overlapping blocks of size `block_size x block_size`\n * The width the output tensor is `input_depth * block_size`, whereas the\n height is `input_height * block_size`.\n * The Y, X coordinates within each block of the output image are determined\n by the high order component of the input channel index.\n * The depth of the input tensor must be divisible by\n `block_size * block_size`.\n\nThe `data_format` attr specifies the layout of the input and output tensors\nwith the following options:\n \"NHWC\": `[ batch, height, width, channels ]`\n \"NCHW\": `[ batch, channels, height, width ]`\n \"NCHW_VECT_C\":\n `qint8 [ batch, channels / 4, height, width, 4 ]`\n\nIt is useful to consider the operation as transforming a 6-D Tensor.\ne.g. for data_format = NHWC,\n Each element in the input tensor can be specified via 6 coordinates,\n ordered by decreasing memory layout significance as:\n n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates\n within the input image, bX, bY means coordinates\n within the output block, oC means output channels).\n The output would be the input transposed to the following layout:\n n,iY,bY,iX,bX,oC\n\nThis operation is useful for resizing the activations between convolutions\n(but keeping all data), e.g. instead of pooling. It is also useful for training\npurely convolutional models.\n\nFor example, given an input of shape `[1, 1, 1, 4]`, data_format = \"NHWC\" and\nblock_size = 2:\n\n```\nx = [[[[1, 2, 3, 4]]]]\n\n```\n\nThis operation will output a tensor of shape `[1, 2, 2, 1]`:\n\n```\n [[[[1], [2]],\n [[3], [4]]]]\n```\n\nHere, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,\nthe corresponding output will have 2x2 elements and will have a depth of\n1 channel (1 = `4 / (block_size * block_size)`).\nThe output element shape is `[2, 2, 1]`.\n\nFor an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.\n\n```\nx = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]\n```\n\nThis operation, for block size of 2, will return the following tensor of shape\n`[1, 2, 2, 3]`\n\n```\n [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n\n```\n\nSimilarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:\n\n```\nx = [[[[1, 2, 3, 4],\n [5, 6, 7, 8]],\n [[9, 10, 11, 12],\n [13, 14, 15, 16]]]]\n```\n\nthe operator will return the following tensor of shape `[1 4 4 1]`:\n\n```\nx = [[[ [1], [2], [5], [6]],\n [ [3], [4], [7], [8]],\n [ [9], [10], [13], [14]],\n [ [11], [12], [15], [16]]]]\n\n```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "DepthToSpace for tensors of type T." + } + }, + { + "name": "DepthwiseConv2dNative", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "1-D of length 4. The stride of the sliding window for each dimension\nof `input`.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`.", + "name": "padding", + "type": "string" + }, + { + "default": [], + "name": "explicit_paddings", + "type": "int64[]" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "name": "dilations", + "type": "int64[]" + } + ], + "category": "Layer", + "description": "Given an input tensor of shape `[batch, in_height, in_width, in_channels]`\nand a filter / kernel tensor of shape\n`[filter_height, filter_width, in_channels, channel_multiplier]`, containing\n`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies\na different filter to each input channel (expanding from 1 channel to\n`channel_multiplier` channels for each), then concatenates the results\ntogether. Thus, the output has `in_channels * channel_multiplier` channels.\n\n```\nfor k in 0..in_channels-1\n for q in 0..channel_multiplier-1\n output[b, i, j, k * channel_multiplier + q] =\n sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *\n filter[di, dj, k, q]\n```\n\nMust have `strides[0] = strides[3] = 1`. For the most common case of the same\nhorizontal and vertices strides, `strides = [1, stride, stride, 1]`.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "filter", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors." + } + }, + { + "name": "DepthwiseConv2dNativeBackpropFilter", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "The stride of the sliding window for each dimension of the input\nof the convolution.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`.", + "name": "padding", + "type": "string" + }, + { + "default": [], + "name": "explicit_paddings", + "type": "int64[]" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "name": "dilations", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "4-D with shape based on `data_format`. For example, if\n`data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,\nin_width, in_channels]` tensor.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "An integer vector representing the tensor shape of `filter`,\nwhere `filter` is a 4-D\n`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.", + "name": "filter_sizes", + "type": 3 + }, + { + "description": "4-D with shape based on `data_format`.\nFor example, if `data_format` is 'NHWC' then\nout_backprop shape is `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution.", + "name": "out_backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t.\nthe `filter` input of the convolution.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradients of depthwise convolution with respect to the filter." + } + }, + { + "name": "DepthwiseConv2dNativeBackpropInput", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "The stride of the sliding window for each dimension of the input\nof the convolution.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`, `EXPLICIT`.", + "name": "padding", + "type": "string" + }, + { + "default": [], + "name": "explicit_paddings", + "type": "int64[]" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, height, width, channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, channels, height, width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each filter\nelement on that dimension. The dimension order is determined by the value of\n`data_format`, see above for details. Dilations in the batch and depth\ndimensions must be 1.", + "name": "dilations", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "An integer vector representing the shape of `input`, based\non `data_format`. For example, if `data_format` is 'NHWC' then\n `input` is a 4-D `[batch, height, width, channels]` tensor.", + "name": "input_sizes", + "type": 3 + }, + { + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, depthwise_multiplier]`.", + "name": "filter", + "typeAttr": "T" + }, + { + "description": "4-D with shape based on `data_format`.\nFor example, if `data_format` is 'NHWC' then\nout_backprop shape is `[batch, out_height, out_width, out_channels]`.\nGradients w.r.t. the output of the convolution.", + "name": "out_backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "4-D with shape according to `data_format`. For example, if\n`data_format` is 'NHWC', output shape is `[batch, in_height,\nin_width, in_channels]`. Gradient w.r.t. the input of the\nconvolution.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradients of depthwise convolution with respect to the input." + } + }, + { + "name": "Dequantize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T", + "type": "type" + }, + { + "default": "MIN_COMBINED", + "description": "Must be one of the following: `MIN_COMBINED`, `MIN_FIRST`, `SCALED`.", + "name": "mode", + "type": "string" + }, + { + "default": false, + "name": "narrow_range", + "type": "boolean" + }, + { + "default": -1, + "name": "axis", + "type": "int64" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Type of the output tensor. Currently Dequantize supports float and bfloat16.\nIf 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. Must be one of the following: `bfloat16`, `float32`.", + "name": "dtype", + "type": "type" + } + ], + "category": "Tensor", + "description": "[min_range, max_range] are scalar floats that specify the range for\nthe output. The 'mode' attribute controls exactly which calculations are\nused to convert the float values to their quantized equivalents.\n\nIn 'MIN_COMBINED' mode, each value of the tensor will undergo the following:\n\n```\nif T == qint8: in[i] += (range(T) + 1)/ 2.0\nout[i] = min_range + (in[i]* (max_range - min_range) / range(T))\n```\nhere `range(T) = numeric_limits::max() - numeric_limits::min()`\n\n*MIN_COMBINED Mode Example*\n\nIf the input comes from a QuantizedRelu6, the output type is\nquint8 (range of 0-255) but the possible range of QuantizedRelu6 is\n0-6. The min_range and max_range values are therefore 0.0 and 6.0.\nDequantize on quint8 will take each value, cast to float, and multiply\nby 6 / 255.\nNote that if quantizedtype is qint8, the operation will additionally add\neach value by 128 prior to casting.\n\nIf the mode is 'MIN_FIRST', then this approach is used:\n\n```c++\nnum_discrete_values = 1 << (# of bits in T)\nrange_adjust = num_discrete_values / (num_discrete_values - 1)\nrange = (range_max - range_min) * range_adjust\nrange_scale = range / num_discrete_values\nconst double offset_input = static_cast(input) - lowest_quantized;\nresult = range_min + ((input - numeric_limits::min()) * range_scale)\n```\n\nIf the mode is `SCALED`, dequantization is performed by multiplying each\ninput value by a scaling_factor. (Thus an input of 0 always maps to 0.0).\n\nThe scaling_factor is determined from `min_range`, `max_range`, and\n`narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}`\nand `QuantizeV2`, using the following algorithm:\n\n```c++\n\n const int min_expected_T = std::numeric_limits::min() +\n (narrow_range ? 1 : 0);\n const int max_expected_T = std::numeric_limits::max();\n const float max_expected_T = std::numeric_limits::max();\n\n const float scale_factor =\n (std::numeric_limits::min() == 0) ? (max_range / max_expected_T)\n : std::max(min_range / min_expected_T,\n max_range / max_expected_T);\n```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "description": "The minimum scalar value possibly produced for the input.", + "name": "min_range", + "type": 1 + }, + { + "description": "The maximum scalar value possibly produced for the input.", + "name": "max_range", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Dequantize the 'input' tensor into a float or bfloat16 Tensor." + } + }, + { + "name": "DeserializeIterator", + "schema": { + "inputs": [ + { + "description": "A handle to an iterator resource.", + "name": "resource_handle", + "type": 20 + }, + { + "description": "A variant tensor storing the state of the iterator contained in the\nresource.", + "name": "serialized", + "type": 21 + } + ], + "summary": "Converts the given variant tensor to an iterator and stores it in the given resource." + } + }, + { + "name": "DeserializeManySparse", + "schema": { + "attributes": [ + { + "description": "The `dtype` of the serialized `SparseTensor` objects.", + "name": "dtype", + "type": "type" + } + ], + "description": "The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where\n`N` is the minibatch size and the rows correspond to packed outputs of\n`SerializeSparse`. The ranks of the original `SparseTensor` objects\nmust all match. When the final `SparseTensor` is created, it has rank one\nhigher than the ranks of the incoming `SparseTensor` objects\n(they have been concatenated along a new row dimension).\n\nThe output `SparseTensor` object's shape values for all dimensions but the\nfirst are the max across the input `SparseTensor` objects' shape values\nfor the corresponding dimensions. Its first shape value is `N`, the minibatch\nsize.\n\nThe input `SparseTensor` objects' indices are assumed ordered in\nstandard lexicographic order. If this is not the case, after this\nstep run `SparseReorder` to restore index ordering.\n\nFor example, if the serialized input is a `[2 x 3]` matrix representing two\noriginal `SparseTensor` objects:\n\n index = [ 0]\n [10]\n [20]\n values = [1, 2, 3]\n shape = [50]\n\nand\n\n index = [ 2]\n [10]\n values = [4, 5]\n shape = [30]\n\nthen the final deserialized `SparseTensor` will be:\n\n index = [0 0]\n [0 10]\n [0 20]\n [1 2]\n [1 10]\n values = [1, 2, 3, 4, 5]\n shape = [2 50]", + "inputs": [ + { + "description": "2-D, The `N` serialized `SparseTensor` objects.\nMust have 3 columns.", + "name": "serialized_sparse", + "type": 7 + } + ], + "outputs": [ + { + "name": "sparse_indices", + "type": 9 + }, + { + "name": "sparse_values", + "typeAttr": "dtype" + }, + { + "name": "sparse_shape", + "type": 9 + } + ], + "summary": "Deserialize and concatenate `SparseTensors` from a serialized minibatch." + } + }, + { + "name": "DeserializeSparse", + "schema": { + "attributes": [ + { + "description": "The `dtype` of the serialized `SparseTensor` objects.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 7 + }, + "description": "Must be one of the following: `string`, `variant`.", + "name": "Tserialized", + "type": "type" + } + ], + "description": "The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where\nthe last dimension stores serialized `SparseTensor` objects and the other N\ndimensions (N >= 0) correspond to a batch. The ranks of the original\n`SparseTensor` objects must all match. When the final `SparseTensor` is\ncreated, its rank is the rank of the incoming `SparseTensor` objects plus N;\nthe sparse tensors have been concatenated along new dimensions, one for each\nbatch.\n\nThe output `SparseTensor` object's shape values for the original dimensions\nare the max across the input `SparseTensor` objects' shape values for the\ncorresponding dimensions. The new dimensions match the size of the batch.\n\nThe input `SparseTensor` objects' indices are assumed ordered in\nstandard lexicographic order. If this is not the case, after this\nstep run `SparseReorder` to restore index ordering.\n\nFor example, if the serialized input is a `[2 x 3]` matrix representing two\noriginal `SparseTensor` objects:\n\n index = [ 0]\n [10]\n [20]\n values = [1, 2, 3]\n shape = [50]\n\nand\n\n index = [ 2]\n [10]\n values = [4, 5]\n shape = [30]\n\nthen the final deserialized `SparseTensor` will be:\n\n index = [0 0]\n [0 10]\n [0 20]\n [1 2]\n [1 10]\n values = [1, 2, 3, 4, 5]\n shape = [2 50]", + "inputs": [ + { + "description": "The serialized `SparseTensor` objects. The last dimension\nmust have 3 columns.", + "name": "serialized_sparse", + "typeAttr": "Tserialized" + } + ], + "outputs": [ + { + "name": "sparse_indices", + "type": 9 + }, + { + "name": "sparse_values", + "typeAttr": "dtype" + }, + { + "name": "sparse_shape", + "type": 9 + } + ], + "summary": "Deserialize `SparseTensor` objects." + } + }, + { + "name": "DestroyResourceOp", + "schema": { + "attributes": [ + { + "default": true, + "description": "whether to ignore the error when the resource\ndoesn't exist.", + "name": "ignore_lookup_error", + "type": "boolean" + } + ], + "description": "All subsequent operations using the resource will result in a NotFound\nerror status.", + "inputs": [ + { + "description": "handle to the resource to delete.", + "name": "resource", + "type": 20 + } + ], + "summary": "Deletes the resource specified by the handle." + } + }, + { + "name": "DestroyTemporaryVariable", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Name of the temporary variable, usually the name of the matching\n'TemporaryVariable' op.", + "name": "var_name", + "type": "string" + } + ], + "description": "Sets output to the value of the Tensor pointed to by 'ref', then destroys\nthe temporary variable called 'var_name'.\nAll other uses of 'ref' *must* have executed before this op.\nThis is typically achieved by chaining the ref through each assign op, or by\nusing control dependencies.\n\nOutputs the final value of the tensor pointed to by 'ref'.", + "inputs": [ + { + "description": "A reference to the temporary variable tensor.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "T" + } + ], + "summary": "Destroys the temporary variable and returns its final value." + } + }, + { + "name": "Diag", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Given a `diagonal`, this operation returns a tensor with the `diagonal` and\neverything else padded with zeros. The diagonal is computed as follows:\n\nAssume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of\nrank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:\n\n`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.\n\nFor example:\n\n```\n# 'diagonal' is [1, 2, 3, 4]\ntf.diag(diagonal) ==> [[1, 0, 0, 0]\n [0, 2, 0, 0]\n [0, 0, 3, 0]\n [0, 0, 0, 4]]\n```", + "inputs": [ + { + "description": "Rank k tensor where k is at most 1.", + "name": "diagonal", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns a diagonal tensor with a given diagonal values." + } + }, + { + "name": "DiagPart", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "This operation returns a tensor with the `diagonal` part\nof the `input`. The `diagonal` part is computed as follows:\n\nAssume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a\ntensor of rank `k` with dimensions `[D1,..., Dk]` where:\n\n`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.\n\nFor example:\n\n```\n# 'input' is [[1, 0, 0, 0]\n [0, 2, 0, 0]\n [0, 0, 3, 0]\n [0, 0, 0, 4]]\n\ntf.diag_part(input) ==> [1, 2, 3, 4]\n```", + "inputs": [ + { + "description": "Rank k tensor where k is even and not zero.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The extracted diagonal.", + "name": "diagonal", + "typeAttr": "T" + } + ], + "summary": "Returns the diagonal part of the tensor." + } + }, + { + "name": "Digamma", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "`Gamma(x)`), element-wise.", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes Psi, the derivative of Lgamma (the log of the absolute value of" + } + }, + { + "name": "Dilation2D", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "The stride of the sliding window for each dimension of the input\ntensor. Must be: `[1, stride_height, stride_width, 1]`.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The input stride for atrous morphological dilation. Must be:\n`[1, rate_height, rate_width, 1]`.", + "minimum": 4, + "name": "rates", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + } + ], + "description": "The `input` tensor has shape `[batch, in_height, in_width, depth]` and the\n`filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each\ninput channel is processed independently of the others with its own structuring\nfunction. The `output` tensor has shape\n`[batch, out_height, out_width, depth]`. The spatial dimensions of the output\ntensor depend on the `padding` algorithm. We currently only support the default\n\"NHWC\" `data_format`.\n\nIn detail, the grayscale morphological 2-D dilation is the max-sum correlation\n(for consistency with `conv2d`, we use unmirrored filters):\n\n output[b, y, x, c] =\n max_{dy, dx} input[b,\n strides[1] * y + rates[1] * dy,\n strides[2] * x + rates[2] * dx,\n c] +\n filter[dy, dx, c]\n\nMax-pooling is a special case when the filter has size equal to the pooling\nkernel size and contains all zeros.\n\nNote on duality: The dilation of `input` by the `filter` is equal to the\nnegation of the erosion of `-input` by the reflected `filter`.", + "inputs": [ + { + "description": "4-D with shape `[batch, in_height, in_width, depth]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "3-D with shape `[filter_height, filter_width, depth]`.", + "name": "filter", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "4-D with shape `[batch, out_height, out_width, depth]`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors." + } + }, + { + "name": "Dilation2DBackpropFilter", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "description": "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`.", + "minimum": 4, + "name": "rates", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + } + ], + "inputs": [ + { + "description": "4-D with shape `[batch, in_height, in_width, depth]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "3-D with shape `[filter_height, filter_width, depth]`.", + "name": "filter", + "typeAttr": "T" + }, + { + "description": "4-D with shape `[batch, out_height, out_width, depth]`.", + "name": "out_backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "3-D with shape `[filter_height, filter_width, depth]`.", + "name": "filter_backprop", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient of morphological 2-D dilation with respect to the filter." + } + }, + { + "name": "Dilation2DBackpropInput", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "1-D of length 4. The stride of the sliding window for each dimension of\nthe input tensor. Must be: `[1, stride_height, stride_width, 1]`.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "description": "1-D of length 4. The input stride for atrous morphological dilation.\nMust be: `[1, rate_height, rate_width, 1]`.", + "minimum": 4, + "name": "rates", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + } + ], + "inputs": [ + { + "description": "4-D with shape `[batch, in_height, in_width, depth]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "3-D with shape `[filter_height, filter_width, depth]`.", + "name": "filter", + "typeAttr": "T" + }, + { + "description": "4-D with shape `[batch, out_height, out_width, depth]`.", + "name": "out_backprop", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "4-D with shape `[batch, in_height, in_width, depth]`.", + "name": "in_backprop", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient of morphological 2-D dilation with respect to the input." + } + }, + { + "name": "DirectedInterleaveDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "minimum": 1, + "name": "N", + "type": "int64" + } + ], + "inputs": [ + { + "description": "A dataset of scalar `DT_INT64` elements that determines which of the\n`N` data inputs should produce the next output element.", + "name": "selector_input_dataset", + "type": 21 + }, + { + "description": "`N` datasets with the same type that will be interleaved according to\nthe values of `selector_input_dataset`.", + "name": "data_input_datasets", + "numberAttr": "N", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "A substitute for `InterleaveDataset` on a fixed list of `N` datasets." + } + }, + { + "name": "Div", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `Div` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns x / y element-wise." + } + }, + { + "name": "DivNoNan", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "\n*NOTE*: `DivNoNan` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns 0 if the denominator is zero." + } + }, + { + "name": "DrawBoundingBoxes", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float16`.", + "name": "T", + "type": "type" + } + ], + "description": "Outputs a copy of `images` but draws on top of the pixels zero or more bounding\nboxes specified by the locations in `boxes`. The coordinates of the each\nbounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nheight of the underlying image.\n\nFor example, if an image is 100 x 200 pixels (height x width) and the bounding\nbox is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of\nthe bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).\n\nParts of the bounding box may fall outside the image.", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, depth]`. A batch of images.", + "name": "images", + "typeAttr": "T" + }, + { + "description": "3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding\nboxes.", + "name": "boxes", + "type": 1 + } + ], + "outputs": [ + { + "description": "4-D with the same shape as `images`. The batch of input images with\nbounding boxes drawn on the images.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Draw bounding boxes on a batch of images." + } + }, + { + "name": "DrawBoundingBoxesV2", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float16`.", + "name": "T", + "type": "type" + } + ], + "description": "Outputs a copy of `images` but draws on top of the pixels zero or more bounding\nboxes specified by the locations in `boxes`. The coordinates of the each\nbounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nheight of the underlying image.\n\nFor example, if an image is 100 x 200 pixels (height x width) and the bounding\nbox is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of\nthe bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).\n\nParts of the bounding box may fall outside the image.", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, depth]`. A batch of images.", + "name": "images", + "typeAttr": "T" + }, + { + "description": "3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding\nboxes.", + "name": "boxes", + "type": 1 + }, + { + "description": "2-D. A list of RGBA colors to cycle through for the boxes.", + "name": "colors", + "type": 1 + } + ], + "outputs": [ + { + "description": "4-D with the same shape as `images`. The batch of input images with\nbounding boxes drawn on the images.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Draw bounding boxes on a batch of images." + } + }, + { + "name": "DummyIterationCounter", + "schema": { + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + } + }, + { + "name": "DummyMemoryCache", + "schema": { + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + } + }, + { + "name": "DummySeedGenerator", + "schema": { + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + } + }, + { + "name": "DynamicPartition", + "schema": { + "attributes": [ + { + "description": "The number of partitions to output.", + "minimum": 1, + "name": "num_partitions", + "type": "int64" + }, + { + "name": "T", + "type": "type" + } + ], + "description": "For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`\nbecomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i`\nare placed in `outputs[i]` in lexicographic order of `js`, and the first\ndimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.\nIn detail,\n\n```python\n outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]\n\n outputs[i] = pack([data[js, ...] for js if partitions[js] == i])\n```\n\n`data.shape` must start with `partitions.shape`.\n\nFor example:\n\n```python\n # Scalar partitions.\n partitions = 1\n num_partitions = 2\n data = [10, 20]\n outputs[0] = [] # Empty with shape [0, 2]\n outputs[1] = [[10, 20]]\n\n # Vector partitions.\n partitions = [0, 0, 1, 1, 0]\n num_partitions = 2\n data = [10, 20, 30, 40, 50]\n outputs[0] = [10, 20, 50]\n outputs[1] = [30, 40]\n```\n\nSee `dynamic_stitch` for an example on how to merge partitions back.\n\n
    \n\n
    ", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "Any shape. Indices in the range `[0, num_partitions)`.", + "name": "partitions", + "type": 3 + } + ], + "outputs": [ + { + "name": "outputs", + "numberAttr": "num_partitions", + "typeAttr": "T" + } + ], + "summary": "Partitions `data` into `num_partitions` tensors using indices from `partitions`." + } + }, + { + "name": "DynamicStitch", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "name": "T", + "type": "type" + } + ], + "description": "Builds a merged tensor such that\n\n```python\n merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]\n```\n\nFor example, if each `indices[m]` is scalar or vector, we have\n\n```python\n # Scalar indices:\n merged[indices[m], ...] = data[m][...]\n\n # Vector indices:\n merged[indices[m][i], ...] = data[m][i, ...]\n```\n\nEach `data[i].shape` must start with the corresponding `indices[i].shape`,\nand the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we\nmust have `data[i].shape = indices[i].shape + constant`. In terms of this\n`constant`, the output shape is\n\n merged.shape = [max(indices)] + constant\n\nValues are merged in order, so if an index appears in both `indices[m][i]` and\n`indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the\nmerged result. If you do not need this guarantee, ParallelDynamicStitch might\nperform better on some devices.\n\nFor example:\n\n```python\n indices[0] = 6\n indices[1] = [4, 1]\n indices[2] = [[5, 2], [0, 3]]\n data[0] = [61, 62]\n data[1] = [[41, 42], [11, 12]]\n data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]\n merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],\n [51, 52], [61, 62]]\n```\n\nThis method can be used to merge partitions created by `dynamic_partition`\nas illustrated on the following example:\n\n```python\n # Apply function (increments x_i) on elements for which a certain condition\n # apply (x_i != -1 in this example).\n x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])\n condition_mask=tf.not_equal(x,tf.constant(-1.))\n partitioned_data = tf.dynamic_partition(\n x, tf.cast(condition_mask, tf.int32) , 2)\n partitioned_data[1] = partitioned_data[1] + 1.0\n condition_indices = tf.dynamic_partition(\n tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)\n x = tf.dynamic_stitch(condition_indices, partitioned_data)\n # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain\n # unchanged.\n```\n\n
    \n\n
    ", + "inputs": [ + { + "name": "indices", + "numberAttr": "N", + "type": 3 + }, + { + "name": "data", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "merged", + "typeAttr": "T" + } + ], + "summary": "Interleave the values from the `data` tensors into a single tensor." + } + }, + { + "name": "EagerPyFunc", + "schema": { + "attributes": [ + { + "name": "token", + "type": "string" + }, + { + "default": false, + "name": "is_async", + "type": "boolean" + }, + { + "minimum": 0, + "name": "Tin", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Tout", + "type": "type[]" + } + ], + "description": "semantics of the input, output, and attributes are the same as those for\nPyFunc.", + "inputs": [ + { + "name": "input", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "typeListAttr": "Tout" + } + ], + "summary": "Eagerly executes a python function to compute func(input)->output. The" + } + }, + { + "name": "EditDistance", + "schema": { + "attributes": [ + { + "default": true, + "description": "boolean (if true, edit distances are normalized by length of truth).\n\nThe output is:", + "name": "normalize", + "type": "boolean" + }, + { + "name": "T", + "type": "type" + } + ], + "description": "The inputs are variable-length sequences provided by SparseTensors\n (hypothesis_indices, hypothesis_values, hypothesis_shape)\nand\n (truth_indices, truth_values, truth_shape).\n\nThe inputs are:", + "inputs": [ + { + "description": "The indices of the hypothesis list SparseTensor.\nThis is an N x R int64 matrix.", + "name": "hypothesis_indices", + "type": 9 + }, + { + "description": "The values of the hypothesis list SparseTensor.\nThis is an N-length vector.", + "name": "hypothesis_values", + "typeAttr": "T" + }, + { + "description": "The shape of the hypothesis list SparseTensor.\nThis is an R-length vector.", + "name": "hypothesis_shape", + "type": 9 + }, + { + "description": "The indices of the truth list SparseTensor.\nThis is an M x R int64 matrix.", + "name": "truth_indices", + "type": 9 + }, + { + "description": "The values of the truth list SparseTensor.\nThis is an M-length vector.", + "name": "truth_values", + "typeAttr": "T" + }, + { + "description": "truth indices, vector.", + "name": "truth_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "A dense float tensor with rank R - 1.\n\nFor the example input:\n\n // hypothesis represents a 2x1 matrix with variable-length values:\n // (0,0) = [\"a\"]\n // (1,0) = [\"b\"]\n hypothesis_indices = [[0, 0, 0],\n [1, 0, 0]]\n hypothesis_values = [\"a\", \"b\"]\n hypothesis_shape = [2, 1, 1]\n\n // truth represents a 2x2 matrix with variable-length values:\n // (0,0) = []\n // (0,1) = [\"a\"]\n // (1,0) = [\"b\", \"c\"]\n // (1,1) = [\"a\"]\n truth_indices = [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]]\n truth_values = [\"a\", \"b\", \"c\", \"a\"]\n truth_shape = [2, 2, 2]\n normalize = true\n\nThe output will be:\n\n // output is a 2x2 matrix with edit distances normalized by truth lengths.\n output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis\n [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis", + "name": "output", + "type": 1 + } + ], + "summary": "Computes the (possibly normalized) Levenshtein Edit Distance." + } + }, + { + "name": "Eig", + "schema": { + "attributes": [ + { + "default": true, + "description": "If `True` then eigenvectors will be computed and returned in `v`.\nOtherwise, only the eigenvalues will be computed.", + "name": "compute_v", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tout", + "type": "type" + } + ], + "description": "Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in\n`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues\nare sorted in non-decreasing order.\n\n```python\n# a is a tensor.\n# e is a tensor of eigenvalues.\n# v is a tensor of eigenvectors.\ne, v = eig(a)\ne = eig(a, compute_v=False)\n```", + "inputs": [ + { + "description": "`Tensor` input of shape `[N, N]`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Eigenvalues. Shape is `[N]`.", + "name": "e", + "typeAttr": "Tout" + }, + { + "description": "Eigenvectors. Shape is `[N, N]`.", + "name": "v", + "typeAttr": "Tout" + } + ], + "summary": "Computes the eigen decomposition of one or more square matrices." + } + }, + { + "name": "Einsum", + "schema": { + "attributes": [ + { + "description": "String describing the Einstein Summation operation; in the format of np.einsum.", + "name": "equation", + "type": "string" + }, + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "name": "T", + "type": "type" + } + ], + "description": "Implements generalized Tensor contraction and reduction. Each input Tensor must\nhave a corresponding input subscript appearing in the comma-separated left-hand\nside of the equation. The right-hand side of the equation consists of the\noutput subscript. The input subscripts and the output subscript should consist\nof zero or more named axis labels and at most one ellipsis (`...`).\n\nThe named axis labels may be any single character other than those having\nspecial meaning, namely `,.->`. The behavior of this Op is undefined if it\nreceives an ill-formatted equation; since the validation is done at\ngraph-building time, we omit format validation checks at runtime.\n\nNote: This Op is *not* intended to be called by the user; instead users should\ncall `tf.einsum` directly. It is a hidden Op used by `tf.einsum`.\n\nOperations are applied to the input(s) according to the following rules:\n\n (a) Generalized Diagonals: For input dimensions corresponding to axis labels\n appearing more than once in the same input subscript, we take the\n generalized (`k`-dimensional) diagonal.\n For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the\n generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`,\n `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`.\n\n (b) Reduction: Axes corresponding to labels appearing only in one input\n subscript but not in the output subscript are summed over prior to Tensor\n contraction.\n For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are\n the reduction axis labels.\n\n (c) Batch Dimensions: Axes corresponding to labels appearing in each of the\n input subscripts and also in the output subscript make up the batch\n dimensions in Tensor contraction. Unnamed axis labels corresponding to\n ellipsis (`...`) also correspond to batch dimensions.\n For example, for the equation denoting batch matrix multiplication,\n `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension.\n\n (d) Contraction: In case of binary einsum, axes corresponding to labels\n appearing in two different inputs (and not in the output) are contracted\n against each other.\n Considering the batch matrix multiplication equation again\n (`bij,bjk->bik`), the contracted axis label is `j`.\n\n (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis\n labels, the opposite operation of (a) is applied. For example, in the\n equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]`\n are all zeros, except for the (generalized) diagonal which is populated\n with values from the input.\n Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is\n provided to enable computing the symbolic gradient of `tf.einsum`.\n\nThe output subscripts must contain only labels appearing in at least one of the\ninput subscripts. Furthermore, all dimensions mapping to the same axis label\nmust be equal.\n\nAny of the input and output subscripts may contain at most a single ellipsis\n(`...`). These ellipsis are mapped against dimensions not corresponding to any\nnamed axis label. If two inputs contain ellipsis, then they are broadcasted\naccording to standard NumPy broadcasting\n[rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).\n\nThe broadcasted dimensions are placed in the corresponding location of the\nellipsis in the output subscript. If the broadcasted dimensions are non-empty\nand the output subscripts do not contain ellipsis, then an InvalidArgument error\nis raised.\n\n@compatibility(numpy)\nSimilar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html).\n\nComparison with `numpy.einsum`:\n\n * This Op only supports unary and binary forms of `numpy.einsum`.\n * This Op does not support implicit form. (i.e. equations without `->`).\n * This Op also supports repeated indices in the output subscript, which is not\n supported by `numpy.einsum`.\n@end_compatibility\n", + "inputs": [ + { + "description": "List of 1 or 2 Tensors.", + "name": "inputs", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Output Tensor with shape depending upon `equation`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Tensor contraction according to Einstein summation convention." + } + }, + { + "name": "Elu", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "category": "Activation", + "description": "See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)\n](http://arxiv.org/abs/1511.07289)", + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ], + "summary": "Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise." + } + }, + { + "name": "EluGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The backpropagated gradients to the corresponding Elu operation.", + "name": "gradients", + "typeAttr": "T" + }, + { + "description": "The outputs of the corresponding Elu operation.", + "name": "outputs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The gradients: `gradients * (outputs + 1)` if outputs < 0,\n`gradients` otherwise.", + "name": "backprops", + "typeAttr": "T" + } + ], + "summary": "Computes gradients for the exponential linear (Elu) operation." + } + }, + { + "name": "Empty", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "default": false, + "description": "If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content.", + "name": "init", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "1-D. Represents the shape of the output tensor.", + "name": "shape", + "type": 3 + } + ], + "outputs": [ + { + "description": "A `Tensor` of type `T`.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Creates a tensor with the given shape.\n\nThis operation creates a tensor of `shape` and `dtype`." + } + }, + { + "name": "EmptyTensorList", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "shape_type", + "type": "type" + } + ], + "description": "All list elements must be tensors of dtype element_dtype and shape compatible\nwith element_shape.\n\nhandle: an empty tensor list.\nelement_dtype: the type of elements in the list.\nelement_shape: a shape compatible with that of elements in the list.", + "inputs": [ + { + "name": "element_shape", + "typeAttr": "shape_type" + }, + { + "name": "max_num_elements", + "type": 3 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates and returns an empty tensor list." + } + }, + { + "name": "EncodeBase64", + "schema": { + "attributes": [ + { + "default": false, + "description": "Bool whether padding is applied at the ends.", + "name": "pad", + "type": "boolean" + } + ], + "description": "Refer to the following article for more information on base64 format:\nen.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the\nend so that the encoded has length multiple of 4. See Padding section of the\nlink above.\n\nWeb-safe means that the encoder uses - and _ instead of + and /.", + "inputs": [ + { + "description": "Strings to be encoded.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "Input strings encoded in base64.", + "name": "output", + "type": 7 + } + ], + "summary": "Encode strings into web-safe base64 format." + } + }, + { + "name": "EncodeJpeg", + "schema": { + "attributes": [ + { + "default": "", + "description": "Per pixel image format. Must be one of the following: ``, `grayscale`, `rgb`.", + "name": "format", + "type": "string" + }, + { + "default": 95, + "description": "Quality of the compression from 0 to 100 (higher is better and slower).", + "name": "quality", + "type": "int64" + }, + { + "default": false, + "description": "If True, create a JPEG that loads progressively (coarse to fine).", + "name": "progressive", + "type": "boolean" + }, + { + "default": false, + "description": "If True, spend CPU/RAM to reduce size with no quality change.", + "name": "optimize_size", + "type": "boolean" + }, + { + "default": true, + "description": "See http://en.wikipedia.org/wiki/Chroma_subsampling.", + "name": "chroma_downsampling", + "type": "boolean" + }, + { + "default": "in", + "description": "Unit used to specify `x_density` and `y_density`:\npixels per inch (`'in'`) or centimeter (`'cm'`). Must be one of the following: `in`, `cm`.", + "name": "density_unit", + "type": "string" + }, + { + "default": 300, + "description": "Horizontal pixels per density unit.", + "name": "x_density", + "type": "int64" + }, + { + "default": 300, + "description": "Vertical pixels per density unit.", + "name": "y_density", + "type": "int64" + }, + { + "default": "", + "description": "If not empty, embed this XMP metadata in the image header.", + "name": "xmp_metadata", + "type": "string" + } + ], + "description": "`image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.\n\nThe attr `format` can be used to override the color format of the encoded\noutput. Values can be:\n\n* `''`: Use a default format based on the number of channels in the image.\n* `grayscale`: Output a grayscale JPEG image. The `channels` dimension\n of `image` must be 1.\n* `rgb`: Output an RGB JPEG image. The `channels` dimension\n of `image` must be 3.\n\nIf `format` is not specified or is the empty string, a default format is picked\nin function of the number of channels in `image`:\n\n* 1: Output a grayscale image.\n* 3: Output an RGB image.", + "inputs": [ + { + "description": "3-D with shape `[height, width, channels]`.", + "name": "image", + "type": 4 + } + ], + "outputs": [ + { + "description": "0-D. JPEG-encoded image.", + "name": "contents", + "type": 7 + } + ], + "summary": "JPEG-encode an image." + } + }, + { + "name": "EncodeJpegVariableQuality", + "schema": { + "description": "`image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.\n`quality` is an int32 jpeg compression quality value between 0 and 100.\n", + "inputs": [ + { + "description": "Images to adjust. At least 3-D.", + "name": "images", + "type": 4 + }, + { + "description": "An int quality to encode to.", + "name": "quality", + "type": 3 + } + ], + "outputs": [ + { + "description": "0-D. JPEG-encoded image.", + "name": "contents", + "type": 7 + } + ], + "summary": "JPEG encode input image with provided compression quality." + } + }, + { + "name": "EncodePng", + "schema": { + "attributes": [ + { + "default": -1, + "description": "Compression level.", + "name": "compression", + "type": "int64" + }, + { + "default": { + "type": "type", + "value": 4 + }, + "description": "Must be one of the following: `uint8`, `uint16`.", + "name": "T", + "type": "type" + } + ], + "description": "`image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`\nwhere `channels` is:\n\n* 1: for grayscale.\n* 2: for grayscale + alpha.\n* 3: for RGB.\n* 4: for RGBA.\n\nThe ZLIB compression level, `compression`, can be -1 for the PNG-encoder\ndefault or a value from 0 to 9. 9 is the highest compression level, generating\nthe smallest output, but is slower.", + "inputs": [ + { + "description": "3-D with shape `[height, width, channels]`.", + "name": "image", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "0-D. PNG-encoded image.", + "name": "contents", + "type": 7 + } + ], + "summary": "PNG-encode an image." + } + }, + { + "name": "EncodeProto", + "schema": { + "attributes": [ + { + "description": "List of strings containing proto field names.", + "name": "field_names", + "type": "string[]" + }, + { + "description": "Name of the proto message type to decode.", + "name": "message_type", + "type": "string" + }, + { + "default": "local://", + "name": "descriptor_source", + "type": "string" + }, + { + "description": "The input types.", + "minimum": 1, + "name": "Tinput_types", + "type": "type[]" + } + ], + "description": "The types of the tensors in `values` must match the schema for the fields\nspecified in `field_names`. All the tensors in `values` must have a common\nshape prefix, *batch_shape*.\n\nThe `sizes` tensor specifies repeat counts for each field. The repeat count\n(last dimension) of a each tensor in `values` must be greater than or equal\nto corresponding repeat count in `sizes`.\n\nA `message_type` name must be provided to give context for the field names.\nThe actual message descriptor can be looked up either in the linked-in\ndescriptor pool or a filename provided by the caller using the\n`descriptor_source` attribute.\n\nFor the most part, the mapping between Proto field types and TensorFlow dtypes\nis straightforward. However, there are a few special cases:\n\n- A proto field that contains a submessage or group can only be converted\nto `DT_STRING` (the serialized submessage). This is to reduce the complexity\nof the API. The resulting string can be used as input to another instance of\nthe decode_proto op.\n\n- TensorFlow lacks support for unsigned integers. The ops represent uint64\ntypes as a `DT_INT64` with the same twos-complement bit pattern (the obvious\nway). Unsigned int32 values can be represented exactly by specifying type\n`DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in\nthe `output_types` attribute.\n\nThe `descriptor_source` attribute selects the source of protocol\ndescriptors to consult when looking up `message_type`. This may be:\n\n- An empty string or \"local://\", in which case protocol descriptors are\ncreated for C++ (not Python) proto definitions linked to the binary.\n\n- A file, in which case protocol descriptors are created from the file,\nwhich is expected to contain a `FileDescriptorSet` serialized as a string.\nNOTE: You can build a `descriptor_source` file using the `--descriptor_set_out`\nand `--include_imports` options to the protocol compiler `protoc`.\n\n- A \"bytes://\", in which protocol descriptors are created from ``,\nwhich is expected to be a `FileDescriptorSet` serialized as a string.", + "inputs": [ + { + "description": "Tensor of int32 with shape `[batch_shape, len(field_names)]`.", + "name": "sizes", + "type": 3 + }, + { + "description": "List of tensors containing values for the corresponding field.", + "name": "values", + "typeListAttr": "Tinput_types" + } + ], + "outputs": [ + { + "description": "Tensor of serialized protos with shape `batch_shape`.", + "name": "bytes", + "type": 7 + } + ], + "summary": "The op serializes protobuf messages provided in the input tensors." + } + }, + { + "name": "EncodeWav", + "schema": { + "description": "This operation will generate a string suitable to be saved out to create a .wav\naudio file. It will be encoded in the 16-bit PCM format. It takes in float\nvalues in the range -1.0f to 1.0f, and any outside that value will be clamped to\nthat range.\n\n`audio` is a 2-D float Tensor of shape `[length, channels]`.\n`sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).", + "inputs": [ + { + "description": "2-D with shape `[length, channels]`.", + "name": "audio", + "type": 1 + }, + { + "description": "Scalar containing the sample frequency.", + "name": "sample_rate", + "type": 3 + } + ], + "outputs": [ + { + "description": "0-D. WAV-encoded file contents.", + "name": "contents", + "type": 7 + } + ], + "summary": "Encode audio data using the WAV file format." + } + }, + { + "name": "EnqueueTPUEmbeddingIntegerBatch", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "default": -1, + "description": "The TPU device to use. Should be >= 0 and less than the number\nof TPU cores in the task on which the node is placed.", + "name": "device_ordinal", + "type": "int64" + } + ], + "inputs": [ + { + "description": "A list of 1D tensors, one for each embedding table, containing the\nindices into the tables.", + "name": "batch", + "numberAttr": "N", + "type": 3 + }, + { + "description": "A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',\n'training', 'backward_pass_only'}. When set to 'unspecified', the mode set\nin TPUEmbeddingConfiguration is used, otherwise mode_override is used.", + "name": "mode_override", + "type": 7 + } + ], + "summary": "An op that enqueues a list of input batch tensors to TPUEmbedding." + } + }, + { + "name": "EnqueueTPUEmbeddingRaggedTensorBatch", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T1", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T2", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T3", + "type": "type" + }, + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "default": -1, + "description": "The TPU device to use. Should be >= 0 and less than the number\nof TPU cores in the task on which the node is placed.", + "name": "device_ordinal", + "type": "int64" + }, + { + "default": [], + "description": "A list of string scalars, one for each embedding table that specify\nhow to normalize the embedding activations after weighted summation.\nSupported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have\nthe sum of the weights be 0 for 'mean' or the sum of the squared weights be\n0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for\nall tables.", + "name": "combiners", + "type": "string[]" + }, + { + "description": "A list of integers specifying the identifier of the embedding table\n(offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the\ncorresponding input. The ith input is looked up using table_ids[i]. The size\nof the table_ids list must be equal to that of sample_indices,\nembedding_indices and aggregation_weights.", + "name": "table_ids", + "type": "int64[]" + }, + { + "default": [], + "name": "max_sequence_lengths", + "type": "int64[]" + } + ], + "description": "sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond\nto the ith feature. table_ids[i] indicates which embedding table to look up ith\nfeature.\n\nThe tensors at corresponding positions in two of the input lists,\nembedding_indices and aggregation_weights, must have the same shape, i.e. rank 1\nwith dim_size() equal to the total number of lookups into the table described by\nthe corresponding feature.", + "inputs": [ + { + "description": "A list of rank 1 Tensors specifying the break points for splitting\nembedding_indices and aggregation_weights into rows.\nIt corresponds to ids.row_splits in embedding_lookup(), when ids is a\nRaggedTensor.", + "name": "sample_splits", + "numberAttr": "N", + "typeAttr": "T1" + }, + { + "description": "A list of rank 1 Tensors, indices into the embedding tables.\nIt corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor.", + "name": "embedding_indices", + "numberAttr": "N", + "typeAttr": "T2" + }, + { + "description": "A list of rank 1 Tensors containing per training example\naggregation weights. It corresponds to the values field of a RaggedTensor\nwith the same row_splits as ids in embedding_lookup(), when ids is a\nRaggedTensor.", + "name": "aggregation_weights", + "numberAttr": "N", + "typeAttr": "T3" + }, + { + "description": "A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',\n'training', 'backward_pass_only'}. When set to 'unspecified', the mode set\nin TPUEmbeddingConfiguration is used, otherwise mode_override is used.", + "name": "mode_override", + "type": 7 + } + ], + "summary": "Eases the porting of code that uses tf.nn.embedding_lookup()." + } + }, + { + "name": "EnqueueTPUEmbeddingSparseBatch", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T1", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T2", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T3", + "type": "type" + }, + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "default": -1, + "description": "The TPU device to use. Should be >= 0 and less than the number\nof TPU cores in the task on which the node is placed.", + "name": "device_ordinal", + "type": "int64" + }, + { + "default": [], + "description": "A list of string scalars, one for each embedding table that specify\nhow to normalize the embedding activations after weighted summation.\nSupported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have\nthe sum of the weights be 0 for 'mean' or the sum of the squared weights be\n0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for\nall tables.", + "name": "combiners", + "type": "string[]" + } + ], + "description": "This Op eases the porting of code that uses embedding_lookup_sparse(),\nalthough some Python preprocessing of the SparseTensor arguments to\nembedding_lookup_sparse() is required to produce the arguments to this Op,\nsince only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training\nstep.\n\nThe tensors at corresponding positions in the three input lists\nmust have the same shape, i.e. rank 1 with dim_size() equal to the total\nnumber of lookups into the table described by the corresponding table_id.", + "inputs": [ + { + "description": "A list of rank 1 Tensors specifying the training example and\nfeature to which the corresponding embedding_indices and aggregation_weights\nvalues belong. sample_indices[i] must equal b * nf + f, where nf is the\nnumber of features from the corresponding table, f is in [0, nf), and\nb is in [0, batch size).", + "name": "sample_indices", + "numberAttr": "N", + "typeAttr": "T1" + }, + { + "description": "A list of rank 1 Tensors, indices into the embedding tables.", + "name": "embedding_indices", + "numberAttr": "N", + "typeAttr": "T2" + }, + { + "description": "A list of rank 1 Tensors containing per sample -- i.e. per\n(training example, feature) -- aggregation weights.", + "name": "aggregation_weights", + "numberAttr": "N", + "typeAttr": "T3" + }, + { + "description": "A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',\n'training', 'backward_pass_only'}. When set to 'unspecified', the mode set\nin TPUEmbeddingConfiguration is used, otherwise mode_override is used.", + "name": "mode_override", + "type": 7 + } + ], + "summary": "An op that enqueues TPUEmbedding input indices from a SparseTensor." + } + }, + { + "name": "EnqueueTPUEmbeddingSparseTensorBatch", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T1", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T2", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T3", + "type": "type" + }, + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "default": -1, + "description": "The TPU device to use. Should be >= 0 and less than the number\nof TPU cores in the task on which the node is placed.", + "name": "device_ordinal", + "type": "int64" + }, + { + "default": [], + "description": "A list of string scalars, one for each embedding table that specify\nhow to normalize the embedding activations after weighted summation.\nSupported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have\nthe sum of the weights be 0 for 'mean' or the sum of the squared weights be\n0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for\nall tables.", + "name": "combiners", + "type": "string[]" + }, + { + "description": "A list of integers specifying the identifier of the embedding table\n(offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the\ncorresponding input. The ith input is looked up using table_ids[i]. The size\nof the table_ids list must be equal to that of sample_indices,\nembedding_indices and aggregation_weights.", + "name": "table_ids", + "type": "int64[]" + }, + { + "default": [], + "name": "max_sequence_lengths", + "type": "int64[]" + } + ], + "description": "sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond\nto the ith feature. table_ids[i] indicates which embedding table to look up ith\nfeature.\n\nThe tensors at corresponding positions in the three input lists (sample_indices,\nembedding_indices and aggregation_weights) must have the same shape, i.e. rank 1\nwith dim_size() equal to the total number of lookups into the table described by\nthe corresponding feature.", + "inputs": [ + { + "description": "A list of rank 1 Tensors specifying the training example to\nwhich the corresponding embedding_indices and aggregation_weights values\nbelong. It corresponds to sp_ids.indices[:,0] in embedding_lookup_sparse().", + "name": "sample_indices", + "numberAttr": "N", + "typeAttr": "T1" + }, + { + "description": "A list of rank 1 Tensors, indices into the embedding tables.\nIt corresponds to sp_ids.values in embedding_lookup_sparse().", + "name": "embedding_indices", + "numberAttr": "N", + "typeAttr": "T2" + }, + { + "description": "A list of rank 1 Tensors containing per training example\naggregation weights. It corresponds to sp_weights.values in\nembedding_lookup_sparse().", + "name": "aggregation_weights", + "numberAttr": "N", + "typeAttr": "T3" + }, + { + "description": "A string input that overrides the mode specified in the\nTPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',\n'training', 'backward_pass_only'}. When set to 'unspecified', the mode set\nin TPUEmbeddingConfiguration is used, otherwise mode_override is used.", + "name": "mode_override", + "type": 7 + } + ], + "summary": "Eases the porting of code that uses tf.nn.embedding_lookup_sparse()." + } + }, + { + "name": "EnsureShape", + "schema": { + "attributes": [ + { + "description": "The expected (possibly partially specified) shape of the input tensor.", + "name": "shape", + "type": "shape" + }, + { + "name": "T", + "type": "type" + } + ], + "description": "Raises an error if the input tensor's shape does not match the specified shape.\nReturns the input tensor otherwise.", + "inputs": [ + { + "description": "A tensor, whose shape is to be validated.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A tensor with the same shape and contents as the input tensor or value.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Ensures that the tensor's shape matches the expected shape." + } + }, + { + "name": "Enter", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "The name of the child frame.", + "name": "frame_name", + "type": "string" + }, + { + "default": false, + "description": "If true, the output is constant within the child frame.", + "name": "is_constant", + "type": "boolean" + }, + { + "default": 10, + "description": "The number of iterations allowed to run in parallel.", + "name": "parallel_iterations", + "type": "int64" + } + ], + "description": "This op is used together with `Exit` to create loops in the graph.\nThe unique `frame_name` is used by the `Executor` to identify frames. If\n`is_constant` is true, `output` is a constant in the child frame; otherwise\nit may be changed in the child frame. At most `parallel_iterations` iterations\nare run in parallel in the child frame.", + "inputs": [ + { + "description": "The tensor to be made available to the child frame.", + "name": "data", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The same tensor as `data`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Creates or finds a child frame, and makes `data` available to the child frame." + } + }, + { + "name": "Equal", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `uint16`, `uint32`, `uint64`, `complex64`, `quint8`, `qint8`, `qint32`, `string`, `bool`, `complex128`.", + "name": "T", + "type": "type" + }, + { + "default": true, + "name": "incompatible_shape_error", + "type": "boolean" + } + ], + "description": "*NOTE*: `Equal` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n```python\nx = tf.constant([2, 4])\ny = tf.constant(2)\ntf.math.equal(x, y) ==> array([True, False])\n\nx = tf.constant([2, 4])\ny = tf.constant([2, 4])\ntf.math.equal(x, y) ==> array([True, True])\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ], + "summary": "Returns the truth value of (x == y) element-wise." + } + }, + { + "name": "Erf", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes the Gauss error function of `x` element-wise." + } + }, + { + "name": "Erfc", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes the complementary error function of `x` element-wise." + } + }, + { + "name": "Erfinv", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + } + }, + { + "name": "EuclideanNorm", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "inputs": [ + { + "description": "The tensor to reduce.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "name": "reduction_indices", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "The reduced tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the euclidean norm of elements across dimensions of a tensor." + } + }, + { + "name": "Exit", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Exit makes its input `data` available to the parent frame.", + "inputs": [ + { + "description": "The tensor to be made available to the parent frame.", + "name": "data", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The same tensor as `data`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Exits the current frame to its parent frame." + } + }, + { + "name": "Exp", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": " This function computes the exponential of every element in the input tensor.\n i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor.\n `e` denotes Euler's number and is approximately equal to 2.718281.\n Output is positive for any real input.\n\n ```python\n x = tf.constant(2.0)\n tf.math.exp(x) ==> 7.389056\n\n x = tf.constant([2.0, 8.0])\n tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32)\n ```\n\n For complex numbers, the exponential value is calculated as follows:\n\n ```\n e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y)\n ```\n\n Let's consider complex number 1+1j as an example.\n e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j)\n\n ```python\n x = tf.constant(1 + 1j)\n tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j\n ```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes exponential of x element-wise. \\\\(y = e^x\\\\)." + } + }, + { + "name": "ExpandDims", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tdim", + "type": "type" + } + ], + "description": "Given a tensor `input`, this operation inserts a dimension of 1 at the\ndimension index `dim` of `input`'s shape. The dimension index `dim` starts at\nzero; if you specify a negative number for `dim` it is counted backward from\nthe end.\n\nThis operation is useful if you want to add a batch dimension to a single\nelement. For example, if you have a single image of shape `[height, width,\nchannels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,\nwhich will make the shape `[1, height, width, channels]`.\n\nOther examples:\n\n```\n# 't' is a tensor of shape [2]\nshape(expand_dims(t, 0)) ==> [1, 2]\nshape(expand_dims(t, 1)) ==> [2, 1]\nshape(expand_dims(t, -1)) ==> [2, 1]\n\n# 't2' is a tensor of shape [2, 3, 5]\nshape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]\nshape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]\nshape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]\n```\n\nThis operation requires that:\n\n`-1-input.dims() <= dim <= input.dims()`\n\nThis operation is related to `squeeze()`, which removes dimensions of\nsize 1.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "description": "0-D (scalar). Specifies the dimension index at which to\nexpand the shape of `input`. Must be in the range\n`[-rank(input) - 1, rank(input)]`.", + "name": "dim", + "typeAttr": "Tdim" + } + ], + "outputs": [ + { + "description": "Contains the same data as `input`, but its shape has an additional\ndimension of size 1 added.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Inserts a dimension of 1 into a tensor's shape." + } + }, + { + "name": "ExperimentalAssertNextDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "transformations", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ExperimentalAutoShardDataset", + "schema": { + "attributes": [ + { + "default": 0, + "name": "auto_shard_policy", + "type": "int64" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "Creates a dataset that shards the input dataset by num_workers, returning a\nsharded dataset for the index-th worker. This attempts to automatically shard\na dataset by examining the Dataset graph and inserting a shard op before the\ninputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).\n\nThis dataset will throw a NotFound error if we cannot shard the dataset\nautomatically.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of workers to distribute this dataset across.", + "name": "num_workers", + "type": 9 + }, + { + "description": "A scalar representing the index of the current worker out of num_workers.", + "name": "index", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that shards the input dataset." + } + }, + { + "name": "ExperimentalBytesProducedStatsDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "tag", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Records the bytes size of each element of `input_dataset` in a StatsAggregator." + } + }, + { + "name": "ExperimentalCSVDataset", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`, `string`.", + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "filenames", + "type": 7 + }, + { + "name": "compression_type", + "type": 7 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "header", + "type": 10 + }, + { + "name": "field_delim", + "type": 7 + }, + { + "name": "use_quote_delim", + "type": 10 + }, + { + "name": "na_value", + "type": 7 + }, + { + "name": "select_cols", + "type": 9 + }, + { + "name": "record_defaults", + "typeListAttr": "output_types" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ExperimentalChooseFastestDataset", + "schema": { + "attributes": [ + { + "minimum": 2, + "name": "N", + "type": "int64" + }, + { + "name": "num_experiments", + "type": "int64" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_datasets", + "numberAttr": "N", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ExperimentalDatasetCardinality", + "schema": { + "description": "Returns the cardinality of `input_dataset`.", + "inputs": [ + { + "description": "A variant tensor representing the dataset to return cardinality for.", + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "description": "The cardinality of `input_dataset`. Named constants are used to represent\ninfinite and unknown cardinality.", + "name": "cardinality", + "type": 9 + } + ], + "summary": "Returns the cardinality of `input_dataset`." + } + }, + { + "name": "ExperimentalDatasetToTFRecord", + "schema": { + "inputs": [ + { + "description": "A variant tensor representing the dataset to write.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar string tensor representing the filename to use.", + "name": "filename", + "type": 7 + }, + { + "description": "A scalar string tensor containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\".", + "name": "compression_type", + "type": 7 + } + ], + "summary": "Writes the given dataset to the given file using the TFRecord format." + } + }, + { + "name": "ExperimentalDenseToSparseBatchDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A handle to an input dataset. Must have a single component.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of elements to accumulate in a\nbatch.", + "name": "batch_size", + "type": 9 + }, + { + "description": "A vector representing the dense shape of each row in the produced\nSparseTensor. The shape may be partially specified, using `-1` to indicate\nthat a particular dimension should use the maximum size of all batch elements.", + "name": "row_shape", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that batches input elements into a SparseTensor." + } + }, + { + "name": "ExperimentalDirectedInterleaveDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "minimum": 1, + "name": "N", + "type": "int64" + } + ], + "inputs": [ + { + "description": "A dataset of scalar `DT_INT64` elements that determines which of the\n`N` data inputs should produce the next output element.", + "name": "selector_input_dataset", + "type": 21 + }, + { + "description": "`N` datasets with the same type that will be interleaved according to\nthe values of `selector_input_dataset`.", + "name": "data_input_datasets", + "numberAttr": "N", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "A substitute for `InterleaveDataset` on a fixed list of `N` datasets." + } + }, + { + "name": "ExperimentalGroupByReducerDataset", + "schema": { + "attributes": [ + { + "description": "A function mapping an element of `input_dataset`, concatenated\nwith `key_func_other_arguments` to a scalar value of type DT_INT64.", + "name": "key_func", + "type": "function" + }, + { + "description": "A function mapping a key of type DT_INT64, concatenated with\n`init_func_other_arguments` to the initial reducer state.", + "name": "init_func", + "type": "function" + }, + { + "description": "A function mapping the current reducer state and an element of `input_dataset`,\nconcatenated with `reduce_func_other_arguments` to a new reducer state.", + "name": "reduce_func", + "type": "function" + }, + { + "description": "A function mapping the final reducer state to an output element.", + "name": "finalize_func", + "type": "function" + }, + { + "minimum": 0, + "name": "Tkey_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Tinit_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Treduce_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Tfinalize_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "Creates a dataset that computes a group-by on `input_dataset`.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `key_func`.", + "name": "key_func_other_arguments", + "typeListAttr": "Tkey_func_other_arguments" + }, + { + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `init_func`.", + "name": "init_func_other_arguments", + "typeListAttr": "Tinit_func_other_arguments" + }, + { + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `reduce_func`.", + "name": "reduce_func_other_arguments", + "typeListAttr": "Treduce_func_other_arguments" + }, + { + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `finalize_func`.", + "name": "finalize_func_other_arguments", + "typeListAttr": "Tfinalize_func_other_arguments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that computes a group-by on `input_dataset`." + } + }, + { + "name": "ExperimentalGroupByWindowDataset", + "schema": { + "attributes": [ + { + "description": "A function mapping an element of `input_dataset`, concatenated\nwith `key_func_other_arguments` to a scalar value of type DT_INT64.", + "name": "key_func", + "type": "function" + }, + { + "name": "reduce_func", + "type": "function" + }, + { + "name": "window_size_func", + "type": "function" + }, + { + "minimum": 0, + "name": "Tkey_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Treduce_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Twindow_size_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "// TODO(mrry): Support non-int64 keys.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "key_func_other_arguments", + "typeListAttr": "Tkey_func_other_arguments" + }, + { + "name": "reduce_func_other_arguments", + "typeListAttr": "Treduce_func_other_arguments" + }, + { + "name": "window_size_func_other_arguments", + "typeListAttr": "Twindow_size_func_other_arguments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that computes a windowed group-by on `input_dataset`." + } + }, + { + "name": "ExperimentalIgnoreErrorsDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that contains the elements of `input_dataset` ignoring errors." + } + }, + { + "name": "ExperimentalIteratorGetDevice", + "schema": { + "inputs": [ + { + "name": "resource", + "type": 20 + } + ], + "outputs": [ + { + "name": "device", + "type": 7 + } + ], + "summary": "Returns the name of the device on which `resource` has been placed." + } + }, + { + "name": "ExperimentalLMDBDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "filenames", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ExperimentalLatencyStatsDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "tag", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Records the latency of producing `input_dataset` elements in a StatsAggregator." + } + }, + { + "name": "ExperimentalMapAndBatchDataset", + "schema": { + "attributes": [ + { + "description": "A function to apply to the outputs of `input_dataset`.", + "name": "f", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": false, + "name": "preserve_cardinality", + "type": "boolean" + } + ], + "description": "Creates a dataset that applies `f` to the outputs of `input_dataset` and then\nbatches `batch_size` of them.\n\nUnlike a \"MapDataset\", which applies `f` sequentially, this dataset invokes up\nto `batch_size * num_parallel_batches` copies of `f` in parallel.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A list of tensors, typically values that were captured when building a closure\nfor `f`.", + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "description": "A scalar representing the number of elements to accumulate in a\nbatch. It determines the number of concurrent invocations of `f` that process\nelements from `input_dataset` in parallel.", + "name": "batch_size", + "type": 9 + }, + { + "description": "A scalar representing the maximum number of parallel invocations of the `map_fn`\nfunction. Applying the `map_fn` on consecutive input elements in parallel has\nthe potential to improve input pipeline throughput.", + "name": "num_parallel_calls", + "type": 9 + }, + { + "description": "A scalar representing whether the last batch should be dropped in case its size\nis smaller than desired.", + "name": "drop_remainder", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that fuses mapping with batching." + } + }, + { + "name": "ExperimentalMapDataset", + "schema": { + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": true, + "name": "use_inter_op_parallelism", + "type": "boolean" + }, + { + "default": false, + "name": "preserve_cardinality", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "ExperimentalMatchingFilesDataset", + "schema": { + "inputs": [ + { + "name": "patterns", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ExperimentalMaxIntraOpParallelismDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "Identifies the maximum intra-op parallelism to use.", + "name": "max_intra_op_parallelism", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that overrides the maximum intra-op parallelism." + } + }, + { + "name": "ExperimentalNonSerializableDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ExperimentalParallelInterleaveDataset", + "schema": { + "attributes": [ + { + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`.", + "name": "f", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "The resulting dataset is similar to the `InterleaveDataset`, with the exception\nthat if retrieving the next value from a dataset would cause the requester to\nblock, it will skip that input dataset. This dataset is especially useful\nwhen loading data from a variable-latency datastores (e.g. HDFS, GCS), as it\nallows the training step to proceed so long as some data is available.\n\n!! WARNING !! This dataset is not deterministic!", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "name": "cycle_length", + "type": 9 + }, + { + "name": "block_length", + "type": 9 + }, + { + "name": "sloppy", + "type": 10 + }, + { + "name": "buffer_output_elements", + "type": 9 + }, + { + "name": "prefetch_input_elements", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "ExperimentalParseExampleDataset", + "schema": { + "attributes": [ + { + "description": "A list of string keys in the examples features.\nThe results for these keys will be returned as `SparseTensor` objects.", + "minimum": 0, + "name": "sparse_keys", + "type": "string[]" + }, + { + "description": "A list of Ndense string Tensors (scalars).\nThe keys expected in the Examples features associated with dense values.", + "minimum": 0, + "name": "dense_keys", + "type": "string[]" + }, + { + "description": "A list of `DTypes` of the same length as `sparse_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported. Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "sparse_types", + "type": "type[]" + }, + { + "description": "A list of DTypes of the same length as `dense_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported.\n Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "Tdense", + "type": "type[]" + }, + { + "description": "List of tuples with the same length as `dense_keys`.\nThe shape of the data for each dense feature referenced by `dense_keys`.\nRequired for any input tensors identified by `dense_keys`. Must be\neither fully defined, or may contain an unknown first dimension.\nAn unknown first dimension means the feature is treated as having\na variable number of blocks, and the output shape along this dimension\nis considered unknown at graph build time. Padding is applied for\nminibatch elements smaller than the maximum number of blocks for the\ngiven feature along this dimension.", + "minimum": 0, + "name": "dense_shapes", + "type": "shape[]" + }, + { + "description": "The type list for the return values.", + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "description": "The list of shapes being produced.", + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": false, + "name": "sloppy", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "num_parallel_calls", + "type": 9 + }, + { + "description": "A dict mapping string keys to `Tensor`s.\nThe keys of the dict must match the dense_keys of the feature.", + "name": "dense_defaults", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features." + } + }, + { + "name": "ExperimentalPrivateThreadPoolDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "Identifies the number of threads to use for the private threadpool.", + "name": "num_threads", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`." + } + }, + { + "name": "ExperimentalRandomDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A scalar seed for the random number generator. If either seed or\nseed2 is set to be non-zero, the random number generator is seeded\nby the given seed. Otherwise, a random seed is used.", + "name": "seed", + "type": 9 + }, + { + "description": "A second scalar seed to avoid seed collision.", + "name": "seed2", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a Dataset that returns pseudorandom numbers." + } + }, + { + "name": "ExperimentalRebatchDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": true, + "name": "use_fallback", + "type": "boolean" + } + ], + "description": "Creates a dataset that changes the batch size of the dataset to current batch\nsize // num_replicas.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of replicas to distribute this batch across. As\na result of this transformation the current batch size would end up being\ndivided by this parameter.", + "name": "num_replicas", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that changes the batch size." + } + }, + { + "name": "ExperimentalScanDataset", + "schema": { + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "minimum": 1, + "name": "Tstate", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": false, + "name": "preserve_cardinality", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "initial_state", + "typeListAttr": "Tstate" + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset successively reduces `f` over the elements of `input_dataset`." + } + }, + { + "name": "ExperimentalSetStatsAggregatorDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "stats_aggregator", + "type": 20 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "counter_prefix", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ExperimentalSleepDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "sleep_microseconds", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ExperimentalSlidingWindowDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of elements in the\nsliding window.", + "name": "window_size", + "type": 9 + }, + { + "description": "A scalar representing the steps moving the sliding window\nforward in one iteration. It must be positive.", + "name": "window_shift", + "type": 9 + }, + { + "description": "A scalar representing the stride of the input elements of the sliding window.\nIt must be positive.", + "name": "window_stride", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that passes a sliding window over `input_dataset`." + } + }, + { + "name": "ExperimentalSqlDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "The database type. Currently, the only supported type is 'sqlite'.", + "name": "driver_name", + "type": 7 + }, + { + "description": "A connection string to connect to the database.", + "name": "data_source_name", + "type": 7 + }, + { + "description": "A SQL query to execute.", + "name": "query", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that executes a SQL query and emits rows of the result set." + } + }, + { + "name": "ExperimentalStatsAggregatorHandle", + "schema": { + "attributes": [ + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + } + ], + "summary": "Creates a statistics manager resource." + } + }, + { + "name": "ExperimentalStatsAggregatorSummary", + "schema": { + "inputs": [ + { + "name": "iterator", + "type": 20 + } + ], + "outputs": [ + { + "name": "summary", + "type": 7 + } + ], + "summary": "Produces a summary of any statistics recorded by the given statistics manager." + } + }, + { + "name": "ExperimentalTakeWhileDataset", + "schema": { + "attributes": [ + { + "description": "A function returning a scalar boolean.", + "name": "predicate", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "The `predicate` function must return a scalar boolean and accept the\nfollowing arguments:\n\n* One tensor for each component of an element of `input_dataset`.\n* One tensor for each value in `other_arguments`.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `predicate`.", + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that stops iteration when predicate` is false." + } + }, + { + "name": "ExperimentalThreadPoolDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A resource produced by the ThreadPoolHandle op.", + "name": "thread_pool", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`." + } + }, + { + "name": "ExperimentalThreadPoolHandle", + "schema": { + "attributes": [ + { + "description": "The number of threads in the thread pool.", + "name": "num_threads", + "type": "int64" + }, + { + "default": 1, + "description": "The maximum degree of parallelism to use within operations that execute on this\nthreadpool.", + "name": "max_intra_op_parallelism", + "type": "int64" + }, + { + "description": "A human-readable name for the threads that may be visible in some\nvisualizations.\nthreadpool.", + "name": "display_name", + "type": "string" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "description": "A resource that can be consumed by one or more ExperimentalThreadPoolDataset\nops.", + "name": "handle", + "type": 20 + } + ], + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`." + } + }, + { + "name": "ExperimentalUnbatchDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "A dataset that splits the elements of its input into multiple elements." + } + }, + { + "name": "ExperimentalUniqueDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that contains the unique elements of `input_dataset`." + } + }, + { + "name": "Expint", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + } + }, + { + "name": "Expm1", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": " i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor.\n `e` denotes Euler's number and is approximately equal to 2.718281.\n\n ```python\n x = tf.constant(2.0)\n tf.math.expm1(x) ==> 6.389056\n\n x = tf.constant([2.0, 8.0])\n tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32)\n\n x = tf.constant(1 + 1j)\n tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j)\n ```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes `exp(x) - 1` element-wise." + } + }, + { + "name": "ExtractGlimpse", + "schema": { + "attributes": [ + { + "default": true, + "description": "indicates if the offset coordinates are centered relative to\nthe image, in which case the (0, 0) offset is relative to the center\nof the input images. If false, the (0,0) offset corresponds to the\nupper left corner of the input images.", + "name": "centered", + "type": "boolean" + }, + { + "default": true, + "description": "indicates if the offset coordinates are normalized.", + "name": "normalized", + "type": "boolean" + }, + { + "default": true, + "description": "indicates if the noise should be generated using a\nuniform distribution or a Gaussian distribution.", + "name": "uniform_noise", + "type": "boolean" + }, + { + "default": "uniform", + "description": "indicates if the noise should `uniform`, `gaussian`, or\n`zero`. The default is `uniform` which means the the noise type\nwill be decided by `uniform_noise`.", + "name": "noise", + "type": "string" + } + ], + "description": "Returns a set of windows called glimpses extracted at location\n`offsets` from the input tensor. If the windows only partially\noverlaps the inputs, the non overlapping areas will be filled with\nrandom noise.\n\nThe result is a 4-D tensor of shape `[batch_size, glimpse_height,\nglimpse_width, channels]`. The channels and batch dimensions are the\nsame as that of the input tensor. The height and width of the output\nwindows are specified in the `size` parameter.\n\nThe argument `normalized` and `centered` controls how the windows are built:\n\n* If the coordinates are normalized but not centered, 0.0 and 1.0\n correspond to the minimum and maximum of each height and width\n dimension.\n* If the coordinates are both normalized and centered, they range from\n -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper\n left corner, the lower right corner is located at (1.0, 1.0) and the\n center is at (0, 0).\n* If the coordinates are not normalized they are interpreted as\n numbers of pixels.", + "inputs": [ + { + "description": "A 4-D float tensor of shape `[batch_size, height, width, channels]`.", + "name": "input", + "type": 1 + }, + { + "description": "A 1-D tensor of 2 elements containing the size of the glimpses\nto extract. The glimpse height must be specified first, following\nby the glimpse width.", + "name": "size", + "type": 3 + }, + { + "description": "A 2-D integer tensor of shape `[batch_size, 2]` containing\nthe y, x locations of the center of each window.", + "name": "offsets", + "type": 1 + } + ], + "outputs": [ + { + "description": "A tensor representing the glimpses `[batch_size,\nglimpse_height, glimpse_width, channels]`.", + "name": "glimpse", + "type": 1 + } + ], + "summary": "Extracts a glimpse from the input tensor." + } + }, + { + "name": "ExtractGlimpseV2", + "schema": { + "attributes": [ + { + "default": true, + "description": "indicates if the offset coordinates are centered relative to\nthe image, in which case the (0, 0) offset is relative to the center\nof the input images. If false, the (0,0) offset corresponds to the\nupper left corner of the input images.", + "name": "centered", + "type": "boolean" + }, + { + "default": true, + "description": "indicates if the offset coordinates are normalized.", + "name": "normalized", + "type": "boolean" + }, + { + "default": true, + "description": "indicates if the noise should be generated using a\nuniform distribution or a Gaussian distribution.", + "name": "uniform_noise", + "type": "boolean" + }, + { + "default": "uniform", + "description": "indicates if the noise should `uniform`, `gaussian`, or\n`zero`. The default is `uniform` which means the the noise type\nwill be decided by `uniform_noise`.", + "name": "noise", + "type": "string" + } + ], + "description": "Returns a set of windows called glimpses extracted at location\n`offsets` from the input tensor. If the windows only partially\noverlaps the inputs, the non overlapping areas will be filled with\nrandom noise.\n\nThe result is a 4-D tensor of shape `[batch_size, glimpse_height,\nglimpse_width, channels]`. The channels and batch dimensions are the\nsame as that of the input tensor. The height and width of the output\nwindows are specified in the `size` parameter.\n\nThe argument `normalized` and `centered` controls how the windows are built:\n\n* If the coordinates are normalized but not centered, 0.0 and 1.0\n correspond to the minimum and maximum of each height and width\n dimension.\n* If the coordinates are both normalized and centered, they range from\n -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper\n left corner, the lower right corner is located at (1.0, 1.0) and the\n center is at (0, 0).\n* If the coordinates are not normalized they are interpreted as\n numbers of pixels.", + "inputs": [ + { + "description": "A 4-D float tensor of shape `[batch_size, height, width, channels]`.", + "name": "input", + "type": 1 + }, + { + "description": "A 1-D tensor of 2 elements containing the size of the glimpses\nto extract. The glimpse height must be specified first, following\nby the glimpse width.", + "name": "size", + "type": 3 + }, + { + "description": "A 2-D integer tensor of shape `[batch_size, 2]` containing\nthe y, x locations of the center of each window.", + "name": "offsets", + "type": 1 + } + ], + "outputs": [ + { + "description": "A tensor representing the glimpses `[batch_size,\nglimpse_height, glimpse_width, channels]`.", + "name": "glimpse", + "type": 1 + } + ], + "summary": "Extracts a glimpse from the input tensor." + } + }, + { + "name": "ExtractImagePatches", + "schema": { + "attributes": [ + { + "description": "The size of the sliding window for each dimension of `images`.", + "minimum": 4, + "name": "ksizes", + "type": "int64[]" + }, + { + "description": "How far the centers of two consecutive patches are in\nthe images. Must be: `[1, stride_rows, stride_cols, 1]`.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be: `[1, rate_rows, rate_cols, 1]`. This is the\ninput stride, specifying how far two consecutive patch samples are in the\ninput. Equivalent to extracting patches with\n`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by\nsubsampling them spatially by a factor of `rates`. This is equivalent to\n`rate` in dilated (a.k.a. Atrous) convolutions.", + "minimum": 4, + "name": "rates", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `complex64`, `complex128`, `bool`.", + "name": "T", + "type": "type" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + } + ], + "inputs": [ + { + "description": "4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.", + "name": "images", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *\nksize_cols * depth]` containing image patches with size\n`ksize_rows x ksize_cols x depth` vectorized in the \"depth\" dimension. Note\n`out_rows` and `out_cols` are the dimensions of the output patches.", + "name": "patches", + "typeAttr": "T" + } + ], + "summary": "Extract `patches` from `images` and put them in the \"depth\" output dimension." + } + }, + { + "name": "ExtractJpegShape", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "(Optional) The output type of the operation (int32 or int64).\nDefaults to int32. Must be one of the following: `int32`, `int64`.", + "name": "output_type", + "type": "type" + } + ], + "description": "This op only parses the image header, so it is much faster than DecodeJpeg.", + "inputs": [ + { + "description": "0-D. The JPEG-encoded image.", + "name": "contents", + "type": 7 + } + ], + "outputs": [ + { + "description": "1-D. The image shape with format [height, width, channels].", + "name": "image_shape", + "typeAttr": "output_type" + } + ], + "summary": "Extract the shape information of a JPEG-encoded image." + } + }, + { + "name": "ExtractVolumePatches", + "schema": { + "attributes": [ + { + "description": "The size of the sliding window for each dimension of `input`.", + "minimum": 5, + "name": "ksizes", + "type": "int64[]" + }, + { + "description": "1-D of length 5. How far the centers of two consecutive patches are in\n`input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.", + "minimum": 5, + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "The type of padding algorithm to use.\n\nWe specify the size-related attributes as:\n\n```python\n ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]\n strides = [1, stride_planes, strides_rows, strides_cols, 1]\n``` Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + } + ], + "inputs": [ + { + "description": "5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "5-D Tensor with shape `[batch, out_planes, out_rows, out_cols,\nksize_planes * ksize_rows * ksize_cols * depth]` containing patches\nwith size `ksize_planes x ksize_rows x ksize_cols x depth` vectorized\nin the \"depth\" dimension. Note `out_planes`, `out_rows` and `out_cols`\nare the dimensions of the output patches.", + "name": "patches", + "typeAttr": "T" + } + ], + "summary": "Extract `patches` from `input` and put them in the \"depth\" output dimension. 3D extension of `extract_image_patches`." + } + }, + { + "name": "FFT", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the 1-dimensional discrete Fourier transform over the inner-most\ndimension of `input`.", + "inputs": [ + { + "description": "A complex tensor.", + "name": "input", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "description": "A complex tensor of the same shape as `input`. The inner-most\n dimension of `input` is replaced with its 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fft\n@end_compatibility", + "name": "output", + "typeAttr": "Tcomplex" + } + ], + "summary": "Fast Fourier transform." + } + }, + { + "name": "FFT2D", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the 2-dimensional discrete Fourier transform over the inner-most\n2 dimensions of `input`.", + "inputs": [ + { + "description": "A complex tensor.", + "name": "input", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "description": "A complex tensor of the same shape as `input`. The inner-most 2\n dimensions of `input` are replaced with their 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fft2\n@end_compatibility", + "name": "output", + "typeAttr": "Tcomplex" + } + ], + "summary": "2D fast Fourier transform." + } + }, + { + "name": "FFT3D", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the 3-dimensional discrete Fourier transform over the inner-most 3\ndimensions of `input`.", + "inputs": [ + { + "description": "A complex tensor.", + "name": "input", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "description": "A complex tensor of the same shape as `input`. The inner-most 3\n dimensions of `input` are replaced with their 3D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.fftn with 3 dimensions.\n@end_compatibility", + "name": "output", + "typeAttr": "Tcomplex" + } + ], + "summary": "3D fast Fourier transform." + } + }, + { + "name": "FIFOQueue", + "schema": { + "attributes": [ + { + "description": "The type of each component in a value.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": [], + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0, + "name": "shapes", + "type": "shape[]" + }, + { + "default": -1, + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "name": "capacity", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "description": "The handle to the queue.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "A queue that produces elements in first-in first-out order." + } + }, + { + "name": "FIFOQueueV2", + "schema": { + "attributes": [ + { + "description": "The type of each component in a value.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": [], + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0, + "name": "shapes", + "type": "shape[]" + }, + { + "default": -1, + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "name": "capacity", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "description": "The handle to the queue.", + "name": "handle", + "type": 20 + } + ], + "summary": "A queue that produces elements in first-in first-out order." + } + }, + { + "name": "Fact", + "schema": { + "outputs": [ + { + "name": "fact", + "type": 7 + } + ], + "summary": "Output a fact about factorials." + } + }, + { + "name": "FakeParam", + "schema": { + "attributes": [ + { + "description": "The type of the output.", + "name": "dtype", + "type": "type" + }, + { + "description": " The purported shape of the output. This is only used for shape inference;\n the output will not necessarily have this shape. Can be a partial shape.", + "name": "shape", + "type": "shape" + } + ], + "outputs": [ + { + "description": " \\\"Fake\\\" output value. This should not be consumed by another op.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": " This op is used as a placeholder in If branch functions. It doesn't provide a\n valid output when run, so must either be removed (e.g. replaced with a\n function input) or guaranteed not to be used (e.g. if mirroring an\n intermediate output needed for the gradient computation of the other branch)." + } + }, + { + "name": "FakeQuantWithMinMaxArgs", + "schema": { + "attributes": [ + { + "default": -6.0, + "name": "min", + "type": "float32" + }, + { + "default": 6.0, + "name": "max", + "type": "float32" + }, + { + "default": 8, + "name": "num_bits", + "type": "int64" + }, + { + "default": false, + "name": "narrow_range", + "type": "boolean" + } + ], + "description": "Attributes `[min; max]` define the clamping range for the `inputs` data.\n`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`\nwhen `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and\nthen de-quantized and output as floats in `[min; max]` interval.\n`num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.\n\nBefore quantization, `min` and `max` values are adjusted with the following\nlogic.\nIt is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,\nthe behavior can be unexpected:\nIf `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.\nIf `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.\nIf `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,\n`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.\n\nQuantization is called fake since the output is still in floating point.", + "inputs": [ + { + "name": "inputs", + "type": 1 + } + ], + "outputs": [ + { + "name": "outputs", + "type": 1 + } + ], + "summary": "Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type." + } + }, + { + "name": "FakeQuantWithMinMaxArgsGradient", + "schema": { + "attributes": [ + { + "default": -6.0, + "name": "min", + "type": "float32" + }, + { + "default": 6.0, + "name": "max", + "type": "float32" + }, + { + "default": 8, + "name": "num_bits", + "type": "int64" + }, + { + "default": false, + "name": "narrow_range", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.", + "name": "gradients", + "type": 1 + }, + { + "description": "Values passed as inputs to the FakeQuantWithMinMaxArgs operation.", + "name": "inputs", + "type": 1 + } + ], + "outputs": [ + { + "description": "Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:\n`gradients * (inputs >= min && inputs <= max)`.", + "name": "backprops", + "type": 1 + } + ], + "summary": "Compute gradients for a FakeQuantWithMinMaxArgs operation." + } + }, + { + "name": "FakeQuantWithMinMaxVars", + "schema": { + "attributes": [ + { + "default": 8, + "name": "num_bits", + "type": "int64" + }, + { + "default": false, + "name": "narrow_range", + "type": "boolean" + } + ], + "description": "and `max` to 'outputs' tensor of same shape as `inputs`.\n\n`[min; max]` define the clamping range for the `inputs` data.\n`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`\nwhen `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and\nthen de-quantized and output as floats in `[min; max]` interval.\n`num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.\n\nBefore quantization, `min` and `max` values are adjusted with the following\nlogic.\nIt is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,\nthe behavior can be unexpected:\nIf `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.\nIf `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.\nIf `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,\n`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.\n\nThis operation has a gradient and thus allows for training `min` and `max`\nvalues.", + "inputs": [ + { + "name": "inputs", + "type": 1 + }, + { + "name": "min", + "type": 1 + }, + { + "name": "max", + "type": 1 + } + ], + "outputs": [ + { + "name": "outputs", + "type": 1 + } + ], + "summary": "Fake-quantize the 'inputs' tensor of type float via global float scalars `min`" + } + }, + { + "name": "FakeQuantWithMinMaxVarsGradient", + "schema": { + "attributes": [ + { + "default": 8, + "description": "The bitwidth of the quantization; between 2 and 8, inclusive.", + "name": "num_bits", + "type": "int64" + }, + { + "default": false, + "description": "Whether to quantize into 2^num_bits - 1 distinct values.", + "name": "narrow_range", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Backpropagated gradients above the FakeQuantWithMinMaxVars operation.", + "name": "gradients", + "type": 1 + }, + { + "description": "Values passed as inputs to the FakeQuantWithMinMaxVars operation.\nmin, max: Quantization interval, scalar floats.", + "name": "inputs", + "type": 1 + }, + { + "name": "min", + "type": 1 + }, + { + "name": "max", + "type": 1 + } + ], + "outputs": [ + { + "description": "Backpropagated gradients w.r.t. inputs:\n`gradients * (inputs >= min && inputs <= max)`.", + "name": "backprops_wrt_input", + "type": 1 + }, + { + "description": "Backpropagated gradients w.r.t. min parameter:\n`sum(gradients * (inputs < min))`.", + "name": "backprop_wrt_min", + "type": 1 + }, + { + "description": "Backpropagated gradients w.r.t. max parameter:\n`sum(gradients * (inputs > max))`.", + "name": "backprop_wrt_max", + "type": 1 + } + ], + "summary": "Compute gradients for a FakeQuantWithMinMaxVars operation." + } + }, + { + "name": "FakeQuantWithMinMaxVarsPerChannel", + "schema": { + "attributes": [ + { + "default": 8, + "name": "num_bits", + "type": "int64" + }, + { + "default": false, + "name": "narrow_range", + "type": "boolean" + } + ], + "description": "`[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`\nto 'outputs' tensor of same shape as `inputs`.\n\n`[min; max]` define the clamping range for the `inputs` data.\n`inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`\nwhen `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and\nthen de-quantized and output as floats in `[min; max]` interval.\n`num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.\n\nBefore quantization, `min` and `max` values are adjusted with the following\nlogic.\nIt is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,\nthe behavior can be unexpected:\nIf `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.\nIf `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.\nIf `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,\n`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.\n\nThis operation has a gradient and thus allows for training `min` and `max`\nvalues.", + "inputs": [ + { + "name": "inputs", + "type": 1 + }, + { + "name": "min", + "type": 1 + }, + { + "name": "max", + "type": 1 + } + ], + "outputs": [ + { + "name": "outputs", + "type": 1 + } + ], + "summary": "Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`," + } + }, + { + "name": "FakeQuantWithMinMaxVarsPerChannelGradient", + "schema": { + "attributes": [ + { + "default": 8, + "description": "The bitwidth of the quantization; between 2 and 16, inclusive.", + "name": "num_bits", + "type": "int64" + }, + { + "default": false, + "description": "Whether to quantize into 2^num_bits - 1 distinct values.", + "name": "narrow_range", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Backpropagated gradients above the FakeQuantWithMinMaxVars operation,\nshape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.", + "name": "gradients", + "type": 1 + }, + { + "description": "Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape\n same as `gradients`.\nmin, max: Quantization interval, floats of shape `[d]`.", + "name": "inputs", + "type": 1 + }, + { + "name": "min", + "type": 1 + }, + { + "name": "max", + "type": 1 + } + ], + "outputs": [ + { + "description": "Backpropagated gradients w.r.t. inputs, shape same as\n`inputs`:\n `gradients * (inputs >= min && inputs <= max)`.", + "name": "backprops_wrt_input", + "type": 1 + }, + { + "description": "Backpropagated gradients w.r.t. min parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs < min))`.", + "name": "backprop_wrt_min", + "type": 1 + }, + { + "description": "Backpropagated gradients w.r.t. max parameter, shape `[d]`:\n`sum_per_d(gradients * (inputs > max))`.", + "name": "backprop_wrt_max", + "type": 1 + } + ], + "summary": "Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation." + } + }, + { + "name": "FakeQueue", + "schema": { + "inputs": [ + { + "name": "resource", + "type": 20 + } + ], + "outputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "Deprecated. Do not use." + } + }, + { + "name": "Fill", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "index_type", + "type": "type" + } + ], + "description": "This operation creates a tensor of shape `dims` and fills it with `value`.\n\nFor example:\n\n```\n# Output tensor has shape [2, 3].\nfill([2, 3], 9) ==> [[9, 9, 9]\n [9, 9, 9]]\n```\n\n`tf.fill` differs from `tf.constant` in a few ways:\n\n* `tf.fill` only supports scalar contents, whereas `tf.constant` supports\n Tensor values.\n* `tf.fill` creates an Op in the computation graph that constructs the actual\n Tensor value at runtime. This is in contrast to `tf.constant` which embeds\n the entire Tensor into the graph with a `Const` node.\n* Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes\n based on other runtime Tensors, unlike `tf.constant`.", + "inputs": [ + { + "description": "1-D. Represents the shape of the output tensor.", + "name": "dims", + "typeAttr": "index_type" + }, + { + "description": "0-D (scalar). Value to fill the returned tensor.\n\n@compatibility(numpy)\nEquivalent to np.full\n@end_compatibility", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Creates a tensor filled with a scalar value." + } + }, + { + "name": "FilterByLastComponentDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "output", + "type": 21 + } + ], + "summary": "Creates a dataset containing elements of first component of `input_dataset` having true in the last component." + } + }, + { + "name": "FilterDataset", + "schema": { + "attributes": [ + { + "description": "A function returning a scalar boolean.", + "name": "predicate", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "The `predicate` function must return a scalar boolean and accept the\nfollowing arguments:\n\n* One tensor for each component of an element of `input_dataset`.\n* One tensor for each value in `other_arguments`.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `predicate`.", + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset containing elements of `input_dataset` matching `predicate`." + } + }, + { + "name": "Fingerprint", + "schema": { + "attributes": [ + { + "description": "This can be a POD-type or string type.", + "name": "T", + "type": "type" + } + ], + "description": "Generates fingerprint values of `data`.\n\nFingerprint op considers the first dimension of `data` as the batch dimension,\nand `output[i]` contains the fingerprint value generated from contents in\n`data[i, ...]` for all `i`.\n\nFingerprint op writes fingerprint values as byte arrays. For example, the\ndefault method `farmhash64` generates a 64-bit fingerprint value at a time.\nThis 8-byte value is written out as an `uint8` array of size 8, in little-endian\norder.\n\nFor example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4),\nand that the fingerprint method is `farmhash64`. In this case, the output shape\nis (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of\neach fingerprint value in bytes. `output[0, :]` is generated from 12 integers in\n`data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers\nin `data[1, :, :]`.\n\nNote that this op fingerprints the raw underlying buffer, and it does not\nfingerprint Tensor's metadata such as data type and/or shape. For example, the\nfingerprint values are invariant under reshapes and bitcasts as long as the\nbatch dimension remain the same:\n\n```\nFingerprint(data) == Fingerprint(Reshape(data, ...))\nFingerprint(data) == Fingerprint(Bitcast(data, ...))\n```\n\nFor string data, one should expect `Fingerprint(data) !=\nFingerprint(ReduceJoin(data))` in general.", + "inputs": [ + { + "description": "Must have rank 1 or higher.", + "name": "data", + "typeAttr": "T" + }, + { + "description": "Fingerprint method used by this op. Currently available method is\n`farmhash::fingerprint64`.", + "name": "method", + "type": 7 + } + ], + "outputs": [ + { + "description": "A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to\n`data`'s first dimension, and the second dimension size depends on the\nfingerprint algorithm.", + "name": "fingerprint", + "type": 4 + } + ], + "summary": "Generates fingerprint values." + } + }, + { + "name": "FixedLengthRecordDataset", + "schema": { + "inputs": [ + { + "description": "A scalar or a vector containing the name(s) of the file(s) to be\nread.", + "name": "filenames", + "type": 7 + }, + { + "description": "A scalar representing the number of bytes to skip at the\nbeginning of a file.", + "name": "header_bytes", + "type": 9 + }, + { + "description": "A scalar representing the number of bytes in each record.", + "name": "record_bytes", + "type": 9 + }, + { + "description": "A scalar representing the number of bytes to skip at the end\nof a file.", + "name": "footer_bytes", + "type": 9 + }, + { + "description": "A scalar representing the number of bytes to buffer. Must be > 0.", + "name": "buffer_size", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that emits the records from one or more binary files." + } + }, + { + "name": "FixedLengthRecordDatasetV2", + "schema": { + "inputs": [ + { + "name": "filenames", + "type": 7 + }, + { + "name": "header_bytes", + "type": 9 + }, + { + "name": "record_bytes", + "type": 9 + }, + { + "name": "footer_bytes", + "type": 9 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "compression_type", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "FixedLengthRecordReader", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Number of bytes in the header, defaults to 0.", + "name": "header_bytes", + "type": "int64" + }, + { + "description": "Number of bytes in the record.", + "name": "record_bytes", + "type": "int64" + }, + { + "default": 0, + "description": "Number of bytes in the footer, defaults to 0.", + "name": "footer_bytes", + "type": "int64" + }, + { + "default": 0, + "description": "Number of bytes to hop before each read. Default of 0 means using\nrecord_bytes.", + "name": "hop_bytes", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "description": "The handle to reference the Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + } + ], + "summary": "A Reader that outputs fixed-length records from a file." + } + }, + { + "name": "FixedLengthRecordReaderV2", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Number of bytes in the header, defaults to 0.", + "name": "header_bytes", + "type": "int64" + }, + { + "description": "Number of bytes in the record.", + "name": "record_bytes", + "type": "int64" + }, + { + "default": 0, + "description": "Number of bytes in the footer, defaults to 0.", + "name": "footer_bytes", + "type": "int64" + }, + { + "default": 0, + "description": "Number of bytes to hop before each read. Default of 0 means using\nrecord_bytes.", + "name": "hop_bytes", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + }, + { + "default": "", + "description": "The type of encoding for the file. Currently ZLIB and GZIP\nare supported. Defaults to none.", + "name": "encoding", + "type": "string" + } + ], + "outputs": [ + { + "description": "The handle to reference the Reader.", + "name": "reader_handle", + "type": 20 + } + ], + "summary": "A Reader that outputs fixed-length records from a file." + } + }, + { + "name": "FixedUnigramCandidateSampler", + "schema": { + "attributes": [ + { + "description": "Number of true labels per context.", + "minimum": 1, + "name": "num_true", + "type": "int64" + }, + { + "description": "Number of candidates to randomly sample.", + "minimum": 1, + "name": "num_sampled", + "type": "int64" + }, + { + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities.", + "name": "unique", + "type": "boolean" + }, + { + "description": "The sampler will sample integers from the interval [0, range_max).", + "minimum": 1, + "name": "range_max", + "type": "int64" + }, + { + "default": "", + "description": "Each valid line in this file (which should have a CSV-like format)\ncorresponds to a valid word ID. IDs are in sequential order, starting from\nnum_reserved_ids. The last entry in each line is expected to be a value\ncorresponding to the count or relative probability. Exactly one of vocab_file\nand unigrams needs to be passed to this op.", + "name": "vocab_file", + "type": "string" + }, + { + "default": 1.0, + "description": "The distortion is used to skew the unigram probability distribution.\nEach weight is first raised to the distortion's power before adding to the\ninternal unigram distribution. As a result, distortion = 1.0 gives regular\nunigram sampling (as defined by the vocab file), and distortion = 0.0 gives\na uniform distribution.", + "name": "distortion", + "type": "float32" + }, + { + "default": 0, + "description": "Optionally some reserved IDs can be added in the range [0,\n..., num_reserved_ids) by the users. One use case is that a special unknown\nword token is used as ID 0. These IDs will have a sampling probability of 0.", + "name": "num_reserved_ids", + "type": "int64" + }, + { + "default": 1, + "description": "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with 'shard') indicates the number of partitions that are being\nused in the overall computation.", + "minimum": 1, + "name": "num_shards", + "type": "int64" + }, + { + "default": 0, + "description": "A sampler can be used to sample from a subset of the original range\nin order to speed up the whole computation through parallelism. This parameter\n(together with 'num_shards') indicates the particular partition number of a\nsampler op, when partitioning is being used.", + "minimum": 0, + "name": "shard", + "type": "int64" + }, + { + "default": [], + "description": "A list of unigram counts or probabilities, one per ID in sequential\norder. Exactly one of vocab_file and unigrams should be passed to this op.", + "name": "unigrams", + "type": "float32[]" + }, + { + "default": 0, + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "An second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + } + ], + "description": "A unigram sampler could use a fixed unigram distribution read from a\nfile or passed in as an in-memory array instead of building up the distribution\nfrom data on the fly. There is also an option to skew the distribution by\napplying a distortion power to the weights.\n\nThe vocabulary file should be in CSV-like format, with the last field\nbeing the weight associated with the word.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "inputs": [ + { + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "name": "true_classes", + "type": 9 + } + ], + "outputs": [ + { + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "name": "sampled_candidates", + "type": 9 + }, + { + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "name": "true_expected_count", + "type": 1 + }, + { + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "name": "sampled_expected_count", + "type": 1 + } + ], + "summary": "Generates labels for candidate sampling with a learned unigram distribution." + } + }, + { + "name": "FlatMapDataset", + "schema": { + "attributes": [ + { + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`.", + "name": "f", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "Unlike MapDataset, the `f` in FlatMapDataset is expected to return a\nDataset variant, and FlatMapDataset will flatten successive results\ninto a single Dataset.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "Floor", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Returns element-wise largest integer not greater than x." + } + }, + { + "name": "FloorDiv", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns x // y element-wise." + } + }, + { + "name": "FloorMod", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`, `uint64`, `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "true, this follows Python semantics in that the result here is consistent\nwith a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.\n\n*NOTE*: `FloorMod` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns element-wise remainder of division. When `x < 0` xor `y < 0` is" + } + }, + { + "name": "FlushSummaryWriter", + "schema": { + "inputs": [ + { + "name": "writer", + "type": 20 + } + ] + } + }, + { + "name": "For", + "schema": { + "attributes": [ + { + "description": "A list of dtypes.", + "minimum": 0, + "name": "T", + "type": "type[]" + }, + { + "description": " A function that takes a list of tensors (int32, T) and returns another\n list of tensors (T).", + "name": "body", + "type": "function" + } + ], + "inputs": [ + { + "description": "The lower bound. An int32", + "name": "start", + "type": 3 + }, + { + "description": "The upper bound. An int32", + "name": "limit", + "type": 3 + }, + { + "description": "The increment. An int32", + "name": "delta", + "type": 3 + }, + { + "description": "A list of input tensors whose types are T.", + "name": "input", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "description": "A list of output tensors whose types are T.", + "name": "output", + "typeListAttr": "T" + } + ], + "summary": " ```python\n output = input;\n for i in range(start, limit, delta)\n output = body(i, output);\n ```" + } + }, + { + "name": "FractionalAvgPool", + "schema": { + "attributes": [ + { + "description": "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don't allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively.", + "minimum": 4, + "name": "pooling_ratio", + "type": "float32[]" + }, + { + "default": false, + "description": "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random.", + "name": "pseudo_random", + "type": "boolean" + }, + { + "default": false, + "description": "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling.", + "name": "overlapping", + "type": "boolean" + }, + { + "default": false, + "description": "When set to True, a fixed pooling region will be used when\niterating over a FractionalAvgPool node in the computation graph. Mainly used\nin unit test to make FractionalAvgPool deterministic.", + "name": "deterministic", + "type": "boolean" + }, + { + "default": 0, + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "An second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "Fractional average pooling is similar to Fractional max pooling in the pooling\nregion generation step. The only difference is that after pooling regions are\ngenerated, a mean operation is performed instead of a max operation in each\npooling region.", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "output tensor after fractional avg pooling.", + "name": "output", + "typeAttr": "T" + }, + { + "description": "row pooling sequence, needed to calculate gradient.", + "name": "row_pooling_sequence", + "type": 9 + }, + { + "description": "column pooling sequence, needed to calculate gradient.", + "name": "col_pooling_sequence", + "type": 9 + } + ], + "summary": "Performs fractional average pooling on the input." + } + }, + { + "name": "FractionalAvgPoolGrad", + "schema": { + "attributes": [ + { + "default": false, + "description": "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [41/3, 26/3] for fractional avg pooling.", + "name": "overlapping", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "Unlike FractionalMaxPoolGrad, we don't need to find arg_max for\nFractionalAvgPoolGrad, we just need to evenly back-propagate each element of\nout_backprop to those indices that form the same pooling cell. Therefore, we\njust need to know the shape of original input tensor, instead of the whole\ntensor.", + "inputs": [ + { + "description": "Original input tensor shape for `fractional_avg_pool`", + "name": "orig_input_tensor_shape", + "type": 9 + }, + { + "description": "4-D with shape `[batch, height, width, channels]`. Gradients\nw.r.t. the output of `fractional_avg_pool`.", + "name": "out_backprop", + "typeAttr": "T" + }, + { + "description": "row pooling sequence, form pooling region with\ncol_pooling_sequence.", + "name": "row_pooling_sequence", + "type": 9 + }, + { + "description": "column pooling sequence, form pooling region with\nrow_pooling sequence.", + "name": "col_pooling_sequence", + "type": 9 + } + ], + "outputs": [ + { + "description": "4-D. Gradients w.r.t. the input of `fractional_avg_pool`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes gradient of the FractionalAvgPool function." + } + }, + { + "name": "FractionalMaxPool", + "schema": { + "attributes": [ + { + "description": "Pooling ratio for each dimension of `value`, currently only\nsupports row and col dimension and should be >= 1.0. For example, a valid\npooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements\nmust be 1.0 because we don't allow pooling on batch and channels\ndimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions\nrespectively.", + "minimum": 4, + "name": "pooling_ratio", + "type": "float32[]" + }, + { + "default": false, + "description": "When set to True, generates the pooling sequence in a\npseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin\nGraham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for\ndifference between pseudorandom and random.", + "name": "pseudo_random", + "type": "boolean" + }, + { + "default": false, + "description": "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling.", + "name": "overlapping", + "type": "boolean" + }, + { + "default": false, + "description": "When set to True, a fixed pooling region will be used when\niterating over a FractionalMaxPool node in the computation graph. Mainly used\nin unit test to make FractionalMaxPool deterministic.", + "name": "deterministic", + "type": "boolean" + }, + { + "default": 0, + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "An second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "Fractional max pooling is slightly different than regular max pooling. In\nregular max pooling, you downsize an input set by taking the maximum value of\nsmaller N x N subsections of the set (often 2x2), and try to reduce the set by\na factor of N, where N is an integer. Fractional max pooling, as you might\nexpect from the word \"fractional\", means that the overall reduction ratio N\ndoes not have to be an integer.\n\nThe sizes of the pooling regions are generated randomly but are fairly uniform.\nFor example, let's look at the height dimension, and the constraints on the\nlist of rows that will be pool boundaries.\n\nFirst we define the following:\n\n1. input_row_length : the number of rows from the input set\n2. output_row_length : which will be smaller than the input\n3. alpha = input_row_length / output_row_length : our reduction ratio\n4. K = floor(alpha)\n5. row_pooling_sequence : this is the result list of pool boundary rows\n\nThen, row_pooling_sequence should satisfy:\n\n1. a[0] = 0 : the first value of the sequence is 0\n2. a[end] = input_row_length : the last value of the sequence is the size\n3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size\n4. length(row_pooling_sequence) = output_row_length+1\n\nFor more details on fractional max pooling, see this paper:\n[Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "output tensor after fractional max pooling.", + "name": "output", + "typeAttr": "T" + }, + { + "description": "row pooling sequence, needed to calculate gradient.", + "name": "row_pooling_sequence", + "type": 9 + }, + { + "description": "column pooling sequence, needed to calculate gradient.", + "name": "col_pooling_sequence", + "type": 9 + } + ], + "summary": "Performs fractional max pooling on the input." + } + }, + { + "name": "FractionalMaxPoolGrad", + "schema": { + "attributes": [ + { + "default": false, + "description": "When set to True, it means when pooling, the values at the boundary\nof adjacent pooling cells are used by both cells. For example:\n\n`index 0 1 2 3 4`\n\n`value 20 5 16 3 7`\n\nIf the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.\nThe result would be [20, 16] for fractional max pooling.", + "name": "overlapping", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "Original input for `fractional_max_pool`", + "name": "orig_input", + "typeAttr": "T" + }, + { + "description": "Original output for `fractional_max_pool`", + "name": "orig_output", + "typeAttr": "T" + }, + { + "description": "4-D with shape `[batch, height, width, channels]`. Gradients\nw.r.t. the output of `fractional_max_pool`.", + "name": "out_backprop", + "typeAttr": "T" + }, + { + "description": "row pooling sequence, form pooling region with\ncol_pooling_sequence.", + "name": "row_pooling_sequence", + "type": 9 + }, + { + "description": "column pooling sequence, form pooling region with\nrow_pooling sequence.", + "name": "col_pooling_sequence", + "type": 9 + } + ], + "outputs": [ + { + "description": "4-D. Gradients w.r.t. the input of `fractional_max_pool`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes gradient of the FractionalMaxPool function." + } + }, + { + "name": "FresnelCos", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + } + }, + { + "name": "FresnelSin", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + } + }, + { + "name": "FusedBatchNorm", + "schema": { + "attributes": [ + { + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float32`.", + "name": "T", + "type": "type" + }, + { + "default": 9.999999747378752e-05, + "description": "A small float number added to the variance of x.", + "name": "epsilon", + "type": "float32" + }, + { + "default": 1.0, + "name": "exponential_avg_factor", + "type": "float32" + }, + { + "default": "NHWC", + "description": "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": true, + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "name": "is_training", + "type": "boolean" + } + ], + "category": "Normalization", + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "inputs": [ + { + "description": "A 4D Tensor for input data.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "name": "scale", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for offset, to shift to the normalized x.", + "name": "offset", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training.", + "name": "mean", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training.", + "name": "variance", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A 4D Tensor for output data.", + "name": "y", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean.", + "name": "batch_mean", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance.", + "name": "batch_variance", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation.", + "name": "reserve_space_1", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be reused in the gradient computation.", + "name": "reserve_space_2", + "typeAttr": "T" + } + ], + "summary": "Batch normalization." + } + }, + { + "name": "FusedBatchNormGrad", + "schema": { + "attributes": [ + { + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float32`.", + "name": "T", + "type": "type" + }, + { + "default": 9.999999747378752e-05, + "description": "A small float number added to the variance of x.", + "name": "epsilon", + "type": "float32" + }, + { + "default": "NHWC", + "description": "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": true, + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "name": "is_training", + "type": "boolean" + } + ], + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "inputs": [ + { + "description": "A 4D Tensor for the gradient with respect to y.", + "name": "y_backprop", + "typeAttr": "T" + }, + { + "description": "A 4D Tensor for input data.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "name": "scale", + "typeAttr": "T" + }, + { + "description": "When is_training is True, a 1D Tensor for the computed batch\nmean to be reused in gradient computation. When is_training is\nFalse, a 1D Tensor for the population mean to be reused in both\n1st and 2nd order gradient computation.", + "name": "reserve_space_1", + "typeAttr": "T" + }, + { + "description": "When is_training is True, a 1D Tensor for the computed batch\nvariance (inverted variance in the cuDNN case) to be reused in\ngradient computation. When is_training is False, a 1D Tensor\nfor the population variance to be reused in both 1st and 2nd\norder gradient computation.", + "name": "reserve_space_2", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A 4D Tensor for the gradient with respect to x.", + "name": "x_backprop", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for the gradient with respect to scale.", + "name": "scale_backprop", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for the gradient with respect to offset.", + "name": "offset_backprop", + "typeAttr": "T" + }, + { + "description": "Unused placeholder to match the mean input in FusedBatchNorm.", + "name": "reserve_space_3", + "typeAttr": "T" + }, + { + "description": "Unused placeholder to match the variance input\nin FusedBatchNorm.", + "name": "reserve_space_4", + "typeAttr": "T" + } + ], + "summary": "Gradient for batch normalization." + } + }, + { + "name": "FusedBatchNormGradV2", + "schema": { + "attributes": [ + { + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float16`, `bfloat16`, `float32`.", + "name": "T", + "type": "type" + }, + { + "description": "The data type for the scale, offset, mean, and variance. Must be one of the following: `float32`.", + "name": "U", + "type": "type" + }, + { + "default": 9.999999747378752e-05, + "description": "A small float number added to the variance of x.", + "name": "epsilon", + "type": "float32" + }, + { + "default": "NHWC", + "description": "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": true, + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "name": "is_training", + "type": "boolean" + } + ], + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "inputs": [ + { + "description": "A 4D Tensor for the gradient with respect to y.", + "name": "y_backprop", + "typeAttr": "T" + }, + { + "description": "A 4D Tensor for input data.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "name": "scale", + "type": 1 + }, + { + "description": "When is_training is True, a 1D Tensor for the computed batch\nmean to be reused in gradient computation. When is_training is\nFalse, a 1D Tensor for the population mean to be reused in both\n1st and 2nd order gradient computation.", + "name": "reserve_space_1", + "typeAttr": "U" + }, + { + "description": "When is_training is True, a 1D Tensor for the computed batch\nvariance (inverted variance in the cuDNN case) to be reused in\ngradient computation. When is_training is False, a 1D Tensor\nfor the population variance to be reused in both 1st and 2nd\norder gradient computation.", + "name": "reserve_space_2", + "typeAttr": "U" + } + ], + "outputs": [ + { + "description": "A 4D Tensor for the gradient with respect to x.", + "name": "x_backprop", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for the gradient with respect to scale.", + "name": "scale_backprop", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for the gradient with respect to offset.", + "name": "offset_backprop", + "typeAttr": "U" + }, + { + "description": "Unused placeholder to match the mean input in FusedBatchNorm.", + "name": "reserve_space_3", + "typeAttr": "U" + }, + { + "description": "Unused placeholder to match the variance input\nin FusedBatchNorm.", + "name": "reserve_space_4", + "typeAttr": "U" + } + ], + "summary": "Gradient for batch normalization." + } + }, + { + "name": "FusedBatchNormGradV3", + "schema": { + "attributes": [ + { + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float16`, `bfloat16`, `float32`.", + "name": "T", + "type": "type" + }, + { + "description": "The data type for the scale, offset, mean, and variance. Must be one of the following: `float32`.", + "name": "U", + "type": "type" + }, + { + "default": 9.999999747378752e-05, + "description": "A small float number added to the variance of x.", + "name": "epsilon", + "type": "float32" + }, + { + "default": "NHWC", + "description": "The data format for y_backprop, x, x_backprop.\nEither \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": true, + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "name": "is_training", + "type": "boolean" + } + ], + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "inputs": [ + { + "description": "A 4D Tensor for the gradient with respect to y.", + "name": "y_backprop", + "typeAttr": "T" + }, + { + "description": "A 4D Tensor for input data.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "name": "scale", + "type": 1 + }, + { + "description": "When is_training is True, a 1D Tensor for the computed batch\nmean to be reused in gradient computation. When is_training is\nFalse, a 1D Tensor for the population mean to be reused in both\n1st and 2nd order gradient computation.", + "name": "reserve_space_1", + "typeAttr": "U" + }, + { + "description": "When is_training is True, a 1D Tensor for the computed batch\nvariance (inverted variance in the cuDNN case) to be reused in\ngradient computation. When is_training is False, a 1D Tensor\nfor the population variance to be reused in both 1st and 2nd\norder gradient computation.", + "name": "reserve_space_2", + "typeAttr": "U" + }, + { + "description": "When is_training is True, a 1D Tensor for some intermediate results to be reused\nin gradient computation. When is_training is False, a dummy empty Tensor will be\ncreated.", + "name": "reserve_space_3", + "typeAttr": "U" + } + ], + "outputs": [ + { + "description": "A 4D Tensor for the gradient with respect to x.", + "name": "x_backprop", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for the gradient with respect to scale.", + "name": "scale_backprop", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for the gradient with respect to offset.", + "name": "offset_backprop", + "typeAttr": "U" + }, + { + "description": "Unused placeholder to match the mean input in FusedBatchNorm.", + "name": "reserve_space_4", + "typeAttr": "U" + }, + { + "description": "Unused placeholder to match the variance input\nin FusedBatchNorm.", + "name": "reserve_space_5", + "typeAttr": "U" + } + ], + "summary": "Gradient for batch normalization." + } + }, + { + "name": "FusedBatchNormV2", + "schema": { + "attributes": [ + { + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float16`, `bfloat16`, `float32`.", + "name": "T", + "type": "type" + }, + { + "description": "The data type for the scale, offset, mean, and variance. Must be one of the following: `float32`.", + "name": "U", + "type": "type" + }, + { + "default": 9.999999747378752e-05, + "description": "A small float number added to the variance of x.", + "name": "epsilon", + "type": "float32" + }, + { + "default": 1.0, + "name": "exponential_avg_factor", + "type": "float32" + }, + { + "default": "NHWC", + "description": "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": true, + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "name": "is_training", + "type": "boolean" + } + ], + "category": "Normalization", + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "inputs": [ + { + "description": "A 4D Tensor for input data.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "name": "scale", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for offset, to shift to the normalized x.", + "name": "offset", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training.", + "name": "mean", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training.", + "name": "variance", + "typeAttr": "U" + } + ], + "outputs": [ + { + "description": "A 4D Tensor for output data.", + "name": "y", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean.", + "name": "batch_mean", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance.", + "name": "batch_variance", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation.", + "name": "reserve_space_1", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be reused in the gradient computation.", + "name": "reserve_space_2", + "typeAttr": "U" + } + ], + "summary": "Batch normalization." + } + }, + { + "name": "FusedBatchNormV3", + "schema": { + "attributes": [ + { + "description": "The data type for the elements of input and output Tensors. Must be one of the following: `float16`, `bfloat16`, `float32`.", + "name": "T", + "type": "type" + }, + { + "description": "The data type for the scale, offset, mean, and variance. Must be one of the following: `float32`.", + "name": "U", + "type": "type" + }, + { + "default": 9.999999747378752e-05, + "description": "A small float number added to the variance of x.", + "name": "epsilon", + "type": "float32" + }, + { + "default": 1.0, + "name": "exponential_avg_factor", + "type": "float32" + }, + { + "default": "NHWC", + "description": "The data format for x and y. Either \"NHWC\" (default) or \"NCHW\". Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": true, + "description": "A bool value to indicate the operation is for training (default)\nor inference.", + "name": "is_training", + "type": "boolean" + } + ], + "category": "Normalization", + "description": "Note that the size of 4D Tensors are defined by either \"NHWC\" or \"NCHW\".\nThe size of 1D Tensors matches the dimension C of the 4D Tensors.", + "inputs": [ + { + "description": "A 4D Tensor for input data.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for scaling factor, to scale the normalized x.", + "name": "scale", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for offset, to shift to the normalized x.", + "name": "offset", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for population mean. Used for inference only;\nmust be empty for training.", + "name": "mean", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for population variance. Used for inference only;\nmust be empty for training.", + "name": "variance", + "typeAttr": "U" + } + ], + "outputs": [ + { + "description": "A 4D Tensor for output data.", + "name": "y", + "typeAttr": "T" + }, + { + "description": "A 1D Tensor for the computed batch mean, to be used by TensorFlow\nto compute the running mean.", + "name": "batch_mean", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for the computed batch variance, to be used by\nTensorFlow to compute the running variance.", + "name": "batch_variance", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for the computed batch mean, to be reused\nin the gradient computation.", + "name": "reserve_space_1", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for the computed batch variance (inverted variance\nin the cuDNN case), to be reused in the gradient computation.", + "name": "reserve_space_2", + "typeAttr": "U" + }, + { + "description": "A 1D Tensor for some intermediate results, to be reused in the gradient\ncomputation for better efficiency.", + "name": "reserve_space_3", + "typeAttr": "U" + } + ], + "summary": "Batch normalization." + } + }, + { + "name": "FusedPadConv2D", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `REFLECT`, `SYMMETRIC`.", + "name": "mode", + "type": "string" + }, + { + "description": "1-D of length 4. The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + } + ], + "description": "Similar to FusedResizeAndPadConv2d, this op allows for an optimized\nimplementation where the spatial padding transformation stage is fused with the\nim2col lookup, but in this case without the bilinear filtering required for\nresizing. Fusing the padding prevents the need to write out the intermediate\nresults as whole tensors, reducing memory pressure, and we can get some latency\ngains by merging the transformation calculations.\nThe data_format attribute for Conv2D isn't supported by this op, and 'NHWC'\norder is used instead.\nInternally this op uses a single per-graph scratch buffer, which means that it\nwill block if multiple versions are being run in parallel. This is because this\noperator is primarily an optimization to minimize memory usage.", + "inputs": [ + { + "description": "4-D with shape `[batch, in_height, in_width, in_channels]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`.", + "name": "paddings", + "type": 3 + }, + { + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.", + "name": "filter", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Performs a padding as a preprocess during a convolution." + } + }, + { + "name": "FusedResizeAndPadConv2D", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "name": "resize_align_corners", + "type": "boolean" + }, + { + "description": "Must be one of the following: `REFLECT`, `SYMMETRIC`.", + "name": "mode", + "type": "string" + }, + { + "description": "1-D of length 4. The stride of the sliding window for each dimension\nof `input`. Must be in the same order as the dimension specified with format.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + } + ], + "description": "It's often possible to do spatial transformations more efficiently as part of\nthe packing stage of a convolution, so this op allows for an optimized\nimplementation where these stages are fused together. This prevents the need to\nwrite out the intermediate results as whole tensors, reducing memory pressure,\nand we can get some latency gains by merging the transformation calculations.\nThe data_format attribute for Conv2D isn't supported by this op, and defaults to\n'NHWC' order.\nInternally this op uses a single per-graph scratch buffer, which means that it\nwill block if multiple versions are being run in parallel. This is because this\noperator is primarily an optimization to minimize memory usage.", + "inputs": [ + { + "description": "4-D with shape `[batch, in_height, in_width, in_channels]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "name": "size", + "type": 3 + }, + { + "description": "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`.", + "name": "paddings", + "type": 3 + }, + { + "description": "4-D with shape\n`[filter_height, filter_width, in_channels, out_channels]`.", + "name": "filter", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Performs a resize and padding as a preprocess during a convolution." + } + }, + { + "name": "GRUBlockCell", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "Args\n x: Input to the GRU cell.\n h_prev: State input from the previous GRU cell.\n w_ru: Weight matrix for the reset and update gate.\n w_c: Weight matrix for the cell connection gate.\n b_ru: Bias vector for the reset and update gate.\n b_c: Bias vector for the cell connection gate.\n\nReturns\n r: Output of the reset gate.\n u: Output of the update gate.\n c: Output of the cell connection gate.\n h: Current state of the GRU cell.\n\nNote on notation of the variables:\n\nConcatenation of a and b is represented by a_b\nElement-wise dot product of a and b is represented by ab\nElement-wise dot product is represented by \\circ\nMatrix multiplication is represented by *\n\nBiases are initialized with :\n`b_ru` - constant_initializer(1.0)\n`b_c` - constant_initializer(0.0)\n\nThis kernel op implements the following mathematical equations:\n\n```\nx_h_prev = [x, h_prev]\n\n[r_bar u_bar] = x_h_prev * w_ru + b_ru\n\nr = sigmoid(r_bar)\nu = sigmoid(u_bar)\n\nh_prevr = h_prev \\circ r\n\nx_h_prevr = [x h_prevr]\n\nc_bar = x_h_prevr * w_c + b_c\nc = tanh(c_bar)\n\nh = (1-u) \\circ c + u \\circ h_prev\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "h_prev", + "typeAttr": "T" + }, + { + "name": "w_ru", + "typeAttr": "T" + }, + { + "name": "w_c", + "typeAttr": "T" + }, + { + "name": "b_ru", + "typeAttr": "T" + }, + { + "name": "b_c", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "r", + "typeAttr": "T" + }, + { + "name": "u", + "typeAttr": "T" + }, + { + "name": "c", + "typeAttr": "T" + }, + { + "name": "h", + "typeAttr": "T" + } + ], + "summary": "Computes the GRU cell forward propagation for 1 time step." + } + }, + { + "name": "GRUBlockCellGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "Args\n x: Input to the GRU cell.\n h_prev: State input from the previous GRU cell.\n w_ru: Weight matrix for the reset and update gate.\n w_c: Weight matrix for the cell connection gate.\n b_ru: Bias vector for the reset and update gate.\n b_c: Bias vector for the cell connection gate.\n r: Output of the reset gate.\n u: Output of the update gate.\n c: Output of the cell connection gate.\n d_h: Gradients of the h_new wrt to objective function.\n\nReturns\n d_x: Gradients of the x wrt to objective function.\n d_h_prev: Gradients of the h wrt to objective function.\n d_c_bar Gradients of the c_bar wrt to objective function.\n d_r_bar_u_bar Gradients of the r_bar & u_bar wrt to objective function.\n\nThis kernel op implements the following mathematical equations:\n\nNote on notation of the variables:\n\nConcatenation of a and b is represented by a_b\nElement-wise dot product of a and b is represented by ab\nElement-wise dot product is represented by \\circ\nMatrix multiplication is represented by *\n\nAdditional notes for clarity:\n\n`w_ru` can be segmented into 4 different matrices.\n```\nw_ru = [w_r_x w_u_x\n w_r_h_prev w_u_h_prev]\n```\nSimilarly, `w_c` can be segmented into 2 different matrices.\n```\nw_c = [w_c_x w_c_h_prevr]\n```\nSame goes for biases.\n```\nb_ru = [b_ru_x b_ru_h]\nb_c = [b_c_x b_c_h]\n```\nAnother note on notation:\n```\nd_x = d_x_component_1 + d_x_component_2\n\nwhere d_x_component_1 = d_r_bar * w_r_x^T + d_u_bar * w_r_x^T\nand d_x_component_2 = d_c_bar * w_c_x^T\n\nd_h_prev = d_h_prev_component_1 + d_h_prevr \\circ r + d_h \\circ u\nwhere d_h_prev_componenet_1 = d_r_bar * w_r_h_prev^T + d_u_bar * w_r_h_prev^T\n```\n\nMathematics behind the Gradients below:\n```\nd_c_bar = d_h \\circ (1-u) \\circ (1-c \\circ c)\nd_u_bar = d_h \\circ (h-c) \\circ u \\circ (1-u)\n\nd_r_bar_u_bar = [d_r_bar d_u_bar]\n\n[d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T\n\n[d_x_component_2 d_h_prevr] = d_c_bar * w_c^T\n\nd_x = d_x_component_1 + d_x_component_2\n\nd_h_prev = d_h_prev_component_1 + d_h_prevr \\circ r + u\n```\nBelow calculation is performed in the python wrapper for the Gradients\n(not in the gradient kernel.)\n```\nd_w_ru = x_h_prevr^T * d_c_bar\n\nd_w_c = x_h_prev^T * d_r_bar_u_bar\n\nd_b_ru = sum of d_r_bar_u_bar along axis = 0\n\nd_b_c = sum of d_c_bar along axis = 0\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "h_prev", + "typeAttr": "T" + }, + { + "name": "w_ru", + "typeAttr": "T" + }, + { + "name": "w_c", + "typeAttr": "T" + }, + { + "name": "b_ru", + "typeAttr": "T" + }, + { + "name": "b_c", + "typeAttr": "T" + }, + { + "name": "r", + "typeAttr": "T" + }, + { + "name": "u", + "typeAttr": "T" + }, + { + "name": "c", + "typeAttr": "T" + }, + { + "name": "d_h", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "d_x", + "typeAttr": "T" + }, + { + "name": "d_h_prev", + "typeAttr": "T" + }, + { + "name": "d_c_bar", + "typeAttr": "T" + }, + { + "name": "d_r_bar_u_bar", + "typeAttr": "T" + } + ], + "summary": "Computes the GRU cell back-propagation for 1 time step." + } + }, + { + "name": "Gather", + "schema": { + "attributes": [ + { + "default": true, + "name": "validate_indices", + "type": "boolean" + }, + { + "name": "Tparams", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).\nProduces an output tensor with shape `indices.shape + params.shape[1:]` where:\n\n```python\n # Scalar indices\n output[:, ..., :] = params[indices, :, ... :]\n\n # Vector indices\n output[i, :, ..., :] = params[indices[i], :, ... :]\n\n # Higher rank indices\n output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]\n```\n\nIf `indices` is a permutation and `len(indices) == params.shape[0]` then\nthis operation will permute `params` accordingly.\n\n`validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in\n`indices` are always validated to be within range. If assigned to GPU,\nout-of-bound indices result in safe but unspecified behavior, which may include\nraising an error.\n\n
    \n\n
    ", + "inputs": [ + { + "name": "params", + "typeAttr": "Tparams" + }, + { + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tparams" + } + ], + "summary": "Gather slices from `params` according to `indices`." + } + }, + { + "name": "GatherNd", + "schema": { + "attributes": [ + { + "name": "Tparams", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "`indices` is a K-dimensional integer tensor, best thought of as a\n(K-1)-dimensional tensor of indices into `params`, where each element defines a\nslice of `params`:\n\n output[\\\\(i_0, ..., i_{K-2}\\\\)] = params[indices[\\\\(i_0, ..., i_{K-2}\\\\)]]\n\nWhereas in `tf.gather` `indices` defines slices into the `axis`\ndimension of `params`, in `tf.gather_nd`, `indices` defines slices into the\nfirst `N` dimensions of `params`, where `N = indices.shape[-1]`.\n\nThe last dimension of `indices` can be at most the rank of\n`params`:\n\n indices.shape[-1] <= params.rank\n\nThe last dimension of `indices` corresponds to elements\n(if `indices.shape[-1] == params.rank`) or slices\n(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`\nof `params`. The output tensor has shape\n\n indices.shape[:-1] + params.shape[indices.shape[-1]:]\n\nNote that on CPU, if an out of bound index is found, an error is returned.\nOn GPU, if an out of bound index is found, a 0 is stored in the\ncorresponding output value.\n\nSome examples below.\n\nSimple indexing into a matrix:\n\n```python\n indices = [[0, 0], [1, 1]]\n params = [['a', 'b'], ['c', 'd']]\n output = ['a', 'd']\n```\n\nSlice indexing into a matrix:\n\n```python\n indices = [[1], [0]]\n params = [['a', 'b'], ['c', 'd']]\n output = [['c', 'd'], ['a', 'b']]\n```\n\nIndexing into a 3-tensor:\n\n```python\n indices = [[1]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[['a1', 'b1'], ['c1', 'd1']]]\n\n\n indices = [[0, 1], [1, 0]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['c0', 'd0'], ['a1', 'b1']]\n\n\n indices = [[0, 0, 1], [1, 0, 1]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = ['b0', 'b1']\n```\n\nBatched indexing into a matrix:\n\n```python\n indices = [[[0, 0]], [[0, 1]]]\n params = [['a', 'b'], ['c', 'd']]\n output = [['a'], ['b']]\n```\n\nBatched slice indexing into a matrix:\n\n```python\n indices = [[[1]], [[0]]]\n params = [['a', 'b'], ['c', 'd']]\n output = [[['c', 'd']], [['a', 'b']]]\n```\n\nBatched indexing into a 3-tensor:\n\n```python\n indices = [[[1]], [[0]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[[['a1', 'b1'], ['c1', 'd1']]],\n [[['a0', 'b0'], ['c0', 'd0']]]]\n\n indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [[['c0', 'd0'], ['a1', 'b1']],\n [['a0', 'b0'], ['c1', 'd1']]]\n\n\n indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]\n params = [[['a0', 'b0'], ['c0', 'd0']],\n [['a1', 'b1'], ['c1', 'd1']]]\n output = [['b0', 'b1'], ['d0', 'c1']]\n```\n\nSee also `tf.gather` and `tf.batch_gather`.", + "inputs": [ + { + "description": "The tensor from which to gather values.", + "name": "params", + "typeAttr": "Tparams" + }, + { + "description": "Index tensor.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Values from `params` gathered from indices given by `indices`, with\nshape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.", + "name": "output", + "typeAttr": "Tparams" + } + ], + "summary": "Gather slices from `params` into a Tensor with shape specified by `indices`." + } + }, + { + "name": "GatherV2", + "schema": { + "attributes": [ + { + "default": 0, + "name": "batch_dims", + "type": "int64" + }, + { + "name": "Tparams", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Taxis", + "type": "type" + } + ], + "description": "`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).\nProduces an output tensor with shape `params.shape[:axis] +\nindices.shape[batch_dims:] + params.shape[axis + 1:]` where:\n\n```python\n # Scalar indices (output is rank(params) - 1).\n output[a_0, ..., a_n, b_0, ..., b_n] =\n params[a_0, ..., a_n, indices, b_0, ..., b_n]\n\n # Vector indices (output is rank(params)).\n output[a_0, ..., a_n, i, b_0, ..., b_n] =\n params[a_0, ..., a_n, indices[i], b_0, ..., b_n]\n\n # Higher rank indices (output is rank(params) + rank(indices) - 1).\n output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =\n params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]\n```\n\n
    \n\n
    \n\nNote that on CPU, if an out of bound index is found, an error is returned.\nOn GPU, if an out of bound index is found, a 0 is stored in the\ncorresponding output value.\n\nSee also `tf.batch_gather` and `tf.gather_nd`.", + "inputs": [ + { + "description": "The tensor from which to gather values. Must be at least rank\n`axis + 1`.", + "name": "params", + "typeAttr": "Tparams" + }, + { + "description": "Index tensor. Must be in range `[0, params.shape[axis])`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "The axis in `params` to gather `indices` from. Defaults to the first\ndimension. Supports negative indexes.", + "name": "axis", + "typeAttr": "Taxis" + } + ], + "outputs": [ + { + "description": "Values from `params` gathered from indices given by `indices`, with\nshape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.", + "name": "output", + "typeAttr": "Tparams" + } + ], + "summary": "Gather slices from `params` axis `axis` according to `indices`." + } + }, + { + "name": "GenerateBoundingBoxProposals", + "schema": { + "attributes": [ + { + "default": 300, + "description": "An integer. Maximum number of rois in the output.", + "name": "post_nms_topn", + "type": "int64" + } + ], + "description": " The op selects top `pre_nms_topn` scoring boxes, decodes them with respect to anchors,\n applies non-maximal suppression on overlapping boxes with higher than\n `nms_threshold` intersection-over-union (iou) value, discarding boxes where shorter\n side is less than `min_size`.\n Inputs:\n `scores`: A 4D tensor of shape [Batch, Height, Width, Num Anchors] containing the scores per anchor at given position\n `bbox_deltas`: is a tensor of shape [Batch, Height, Width, 4 x Num Anchors] boxes encoded to each anchor\n `anchors`: A 1D tensor of shape [4 x Num Anchors], representing the anchors.\n Outputs:\n `rois`: output RoIs, a 3D tensor of shape [Batch, post_nms_topn, 4], padded by 0 if less than post_nms_topn candidates found.\n `roi_probabilities`: probability scores of each roi in 'rois', a 2D tensor of shape [Batch,post_nms_topn], padded with 0 if needed, sorted by scores.", + "inputs": [ + { + "description": "A 4-D float tensor of shape `[num_images, height, width, num_achors]` containing scores of the boxes for given anchors, can be unsorted.", + "name": "scores", + "type": 1 + }, + { + "description": "A 4-D float tensor of shape `[num_images, height, width, 4 x num_anchors]`. encoding boxes with respec to each anchor.\nCoordinates are given in the form [dy, dx, dh, dw].", + "name": "bbox_deltas", + "type": 1 + }, + { + "description": "A 2-D float tensor of shape `[num_images, 5]` containing image information Height, Width, Scale.", + "name": "image_info", + "type": 1 + }, + { + "description": "A 2-D float tensor of shape `[num_anchors, 4]` describing the anchor boxes. Boxes are formatted in the form [y1, x1, y2, x2].", + "name": "anchors", + "type": 1 + }, + { + "description": "A scalar float tensor for non-maximal-suppression threshold.", + "name": "nms_threshold", + "type": 1 + }, + { + "description": "A scalar int tensor for the number of top scoring boxes to be used as input.", + "name": "pre_nms_topn", + "type": 3 + }, + { + "description": "A scalar float tensor. Any box that has a smaller size than min_size will be discarded.", + "name": "min_size", + "type": 1 + } + ], + "outputs": [ + { + "description": "A 3-D float tensor of shape `[num_images,post_nms_topn,4]` representing the selected\nregion of interest boxes. Sorted in descending order in scores.", + "name": "rois", + "type": 1 + }, + { + "description": "A 2-D float tensor of shape `[num_images, post_nms_topn]` representing the score of the\nregion of interest box in `rois` tensor at the same index.", + "name": "roi_probabilities", + "type": 1 + } + ], + "summary": "This op produces Region of Interests from given bounding boxes(bbox_deltas) encoded wrt anchors according to eq.2 in arXiv:1506.01497" + } + }, + { + "name": "GenerateVocabRemapping", + "schema": { + "attributes": [ + { + "description": "How many entries into the new vocab file to start reading.", + "minimum": 0, + "name": "new_vocab_offset", + "type": "int64" + }, + { + "description": "Number of entries in the new vocab file to remap.", + "minimum": 0, + "name": "num_new_vocab", + "type": "int64" + }, + { + "default": -1, + "description": "Number of entries in the old vocab file to consider. If -1,\nuse the entire old vocabulary.", + "minimum": -1, + "name": "old_vocab_size", + "type": "int64" + } + ], + "description": "length `num_new_vocab`, where `remapping[i]` contains the row number in the old\nvocabulary that corresponds to row `i` in the new vocabulary (starting at line\n`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`\nin the new vocabulary is not in the old vocabulary. The old vocabulary is\nconstrained to the first `old_vocab_size` entries if `old_vocab_size` is not the\ndefault value of -1.\n\n`num_vocab_offset` enables\nuse in the partitioned variable case, and should generally be set through\nexamining partitioning info. The format of the files should be a text file,\nwith each line containing a single entity within the vocabulary.\n\nFor example, with `new_vocab_file` a text file containing each of the following\nelements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],\n`num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be\n`[0, -1, 2]`.\n\nThe op also returns a count of how many entries in the new vocabulary\nwere present in the old vocabulary, which is used to calculate the number of\nvalues to initialize in a weight matrix remapping\n\nThis functionality can be used to remap both row vocabularies (typically,\nfeatures) and column vocabularies (typically, classes) from TensorFlow\ncheckpoints. Note that the partitioning logic relies on contiguous vocabularies\ncorresponding to div-partitioned variables. Moreover, the underlying remapping\nuses an IndexTable (as opposed to an inexact CuckooTable), so client code should\nuse the corresponding index_table_from_file() as the FeatureColumn framework\ndoes (as opposed to tf.feature_to_id(), which uses a CuckooTable).", + "inputs": [ + { + "description": "Path to the new vocab file.", + "name": "new_vocab_file", + "type": 7 + }, + { + "description": "Path to the old vocab file.", + "name": "old_vocab_file", + "type": 7 + } + ], + "outputs": [ + { + "description": "A Tensor of length num_new_vocab where the element at index i\nis equal to the old ID that maps to the new ID i. This element is -1 for any\nnew ID that is not found in the old vocabulary.", + "name": "remapping", + "type": 9 + }, + { + "description": "Number of new vocab entries found in old vocab.", + "name": "num_present", + "type": 3 + } + ], + "summary": "Given a path to new and old vocabulary files, returns a remapping Tensor of" + } + }, + { + "name": "GeneratorDataset", + "schema": { + "attributes": [ + { + "name": "init_func", + "type": "function" + }, + { + "name": "next_func", + "type": "function" + }, + { + "name": "finalize_func", + "type": "function" + }, + { + "minimum": 0, + "name": "Tinit_func_args", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Tnext_func_args", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Tfinalize_func_args", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "init_func_other_args", + "typeListAttr": "Tinit_func_args" + }, + { + "name": "next_func_other_args", + "typeListAttr": "Tnext_func_args" + }, + { + "name": "finalize_func_other_args", + "typeListAttr": "Tfinalize_func_args" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that invokes a function to generate elements." + } + }, + { + "name": "GetSessionHandle", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The tensor to be stored.", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The handle for the tensor stored in the session state, represented\nas a string.", + "name": "handle", + "type": 7 + } + ], + "summary": "Store the input tensor in the state of the current session." + } + }, + { + "name": "GetSessionHandleV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The tensor to be stored.", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The handle for the tensor stored in the session state, represented\nas a ResourceHandle object.", + "name": "handle", + "type": 20 + } + ], + "summary": "Store the input tensor in the state of the current session." + } + }, + { + "name": "GetSessionTensor", + "schema": { + "attributes": [ + { + "description": "The type of the output value.", + "name": "dtype", + "type": "type" + } + ], + "inputs": [ + { + "description": "The handle for a tensor stored in the session state.", + "name": "handle", + "type": 7 + } + ], + "outputs": [ + { + "description": "The tensor for the given handle.", + "name": "value", + "typeAttr": "dtype" + } + ], + "summary": "Get the value of the tensor specified by its handle." + } + }, + { + "name": "Greater", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `Greater` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\nExample:\n\n```python\nx = tf.constant([5, 4, 6])\ny = tf.constant([5, 2, 5])\ntf.math.greater(x, y) ==> [False, True, True]\n\nx = tf.constant([5, 4, 6])\ny = tf.constant([5])\ntf.math.greater(x, y) ==> [False, False, True]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ], + "summary": "Returns the truth value of (x > y) element-wise." + } + }, + { + "name": "GreaterEqual", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\nExample:\n\n```python\nx = tf.constant([5, 4, 6, 7])\ny = tf.constant([5, 2, 5, 10])\ntf.math.greater_equal(x, y) ==> [True, True, True, False]\n\nx = tf.constant([5, 4, 6, 7])\ny = tf.constant([5])\ntf.math.greater_equal(x, y) ==> [True, False, True, True]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ], + "summary": "Returns the truth value of (x >= y) element-wise." + } + }, + { + "name": "GroupByReducerDataset", + "schema": { + "attributes": [ + { + "description": "A function mapping an element of `input_dataset`, concatenated\nwith `key_func_other_arguments` to a scalar value of type DT_INT64.", + "name": "key_func", + "type": "function" + }, + { + "description": "A function mapping a key of type DT_INT64, concatenated with\n`init_func_other_arguments` to the initial reducer state.", + "name": "init_func", + "type": "function" + }, + { + "description": "A function mapping the current reducer state and an element of `input_dataset`,\nconcatenated with `reduce_func_other_arguments` to a new reducer state.", + "name": "reduce_func", + "type": "function" + }, + { + "description": "A function mapping the final reducer state to an output element.", + "name": "finalize_func", + "type": "function" + }, + { + "minimum": 0, + "name": "Tkey_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Tinit_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Treduce_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Tfinalize_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "Creates a dataset that computes a group-by on `input_dataset`.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `key_func`.", + "name": "key_func_other_arguments", + "typeListAttr": "Tkey_func_other_arguments" + }, + { + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `init_func`.", + "name": "init_func_other_arguments", + "typeListAttr": "Tinit_func_other_arguments" + }, + { + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `reduce_func`.", + "name": "reduce_func_other_arguments", + "typeListAttr": "Treduce_func_other_arguments" + }, + { + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `finalize_func`.", + "name": "finalize_func_other_arguments", + "typeListAttr": "Tfinalize_func_other_arguments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that computes a group-by on `input_dataset`." + } + }, + { + "name": "GroupByWindowDataset", + "schema": { + "attributes": [ + { + "description": "A function mapping an element of `input_dataset`, concatenated\nwith `key_func_other_arguments` to a scalar value of type DT_INT64.", + "name": "key_func", + "type": "function" + }, + { + "name": "reduce_func", + "type": "function" + }, + { + "name": "window_size_func", + "type": "function" + }, + { + "minimum": 0, + "name": "Tkey_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Treduce_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Twindow_size_func_other_arguments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "// TODO(mrry): Support non-int64 keys.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "key_func_other_arguments", + "typeListAttr": "Tkey_func_other_arguments" + }, + { + "name": "reduce_func_other_arguments", + "typeListAttr": "Treduce_func_other_arguments" + }, + { + "name": "window_size_func_other_arguments", + "typeListAttr": "Twindow_size_func_other_arguments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that computes a windowed group-by on `input_dataset`." + } + }, + { + "name": "GuaranteeConst", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "The runtime is then free to make optimizations based on this.\n\nOnly accepts value typed tensors as inputs and rejects resource variable handles\nas input.\n\nReturns the input tensor without modification.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Gives a guarantee to the TF runtime that the input tensor is a constant." + } + }, + { + "name": "HSVToRGB", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "Outputs a tensor of the same shape as the `images` tensor, containing the RGB\nvalue of the pixels. The output is only well defined if the value in `images`\nare in `[0,1]`.\n\nSee `rgb_to_hsv` for a description of the HSV encoding.", + "inputs": [ + { + "description": "1-D or higher rank. HSV data to convert. Last dimension must be size 3.", + "name": "images", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "`images` converted to RGB.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Convert one or more images from HSV to RGB." + } + }, + { + "name": "HashTable", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "default": false, + "description": "If true and shared_name is empty, the table is shared\nusing the node name.", + "name": "use_node_name_sharing", + "type": "boolean" + }, + { + "description": "Type of the table keys.", + "name": "key_dtype", + "type": "type" + }, + { + "description": "Type of the table values.", + "name": "value_dtype", + "type": "type" + } + ], + "description": "This op creates a hash table, specifying the type of its keys and values.\nBefore using the table you will have to initialize it. After initialization the\ntable will be immutable.", + "outputs": [ + { + "description": "Handle to a table.", + "isRef": true, + "name": "table_handle", + "type": 7 + } + ], + "summary": "Creates a non-initialized hash table." + } + }, + { + "name": "HashTableV2", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "default": false, + "description": "If true and shared_name is empty, the table is shared\nusing the node name.", + "name": "use_node_name_sharing", + "type": "boolean" + }, + { + "description": "Type of the table keys.", + "name": "key_dtype", + "type": "type" + }, + { + "description": "Type of the table values.", + "name": "value_dtype", + "type": "type" + } + ], + "description": "This op creates a hash table, specifying the type of its keys and values.\nBefore using the table you will have to initialize it. After initialization the\ntable will be immutable.", + "outputs": [ + { + "description": "Handle to a table.", + "name": "table_handle", + "type": 20 + } + ], + "summary": "Creates a non-initialized hash table." + } + }, + { + "name": "HistogramFixedWidth", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "dtype", + "type": "type" + } + ], + "description": "Given the tensor `values`, this operation returns a rank 1 histogram counting\nthe number of entries in `values` that fall into every bin. The bins are\nequal width and determined by the arguments `value_range` and `nbins`.\n\n```python\n# Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)\nnbins = 5\nvalue_range = [0.0, 5.0]\nnew_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]\n\nwith tf.get_default_session() as sess:\n hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)\n variables.global_variables_initializer().run()\n sess.run(hist) => [2, 1, 1, 0, 2]\n```", + "inputs": [ + { + "description": "Numeric `Tensor`.", + "name": "values", + "typeAttr": "T" + }, + { + "description": "Shape [2] `Tensor` of same `dtype` as `values`.\nvalues <= value_range[0] will be mapped to hist[0],\nvalues >= value_range[1] will be mapped to hist[-1].", + "name": "value_range", + "typeAttr": "T" + }, + { + "description": "Scalar `int32 Tensor`. Number of histogram bins.", + "name": "nbins", + "type": 3 + } + ], + "outputs": [ + { + "description": "A 1-D `Tensor` holding histogram of values.", + "name": "out", + "typeAttr": "dtype" + } + ], + "summary": "Return histogram of values." + } + }, + { + "name": "HistogramSummary", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "The generated\n[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\nhas one summary value containing a histogram for `values`.\n\nThis op reports an `InvalidArgument` error if any value is not finite.", + "inputs": [ + { + "description": "Scalar. Tag to use for the `Summary.Value`.", + "name": "tag", + "type": 7 + }, + { + "description": "Any shape. Values to use to build the histogram.", + "name": "values", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Scalar. Serialized `Summary` protocol buffer.", + "name": "summary", + "type": 7 + } + ], + "summary": "Outputs a `Summary` protocol buffer with a histogram." + } + }, + { + "name": "HostConst", + "schema": { + "attributes": [ + { + "description": "Attr `value` is the tensor to return.", + "name": "value", + "type": "tensor" + }, + { + "name": "dtype", + "type": "type" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Returns a constant tensor on the host. Only for writing C++ tests." + } + }, + { + "name": "IFFT", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the inverse 1-dimensional discrete Fourier transform over the\ninner-most dimension of `input`.", + "inputs": [ + { + "description": "A complex tensor.", + "name": "input", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "description": "A complex tensor of the same shape as `input`. The inner-most\n dimension of `input` is replaced with its inverse 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifft\n@end_compatibility", + "name": "output", + "typeAttr": "Tcomplex" + } + ], + "summary": "Inverse fast Fourier transform." + } + }, + { + "name": "IFFT2D", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the inverse 2-dimensional discrete Fourier transform over the\ninner-most 2 dimensions of `input`.", + "inputs": [ + { + "description": "A complex tensor.", + "name": "input", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "description": "A complex tensor of the same shape as `input`. The inner-most 2\n dimensions of `input` are replaced with their inverse 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifft2\n@end_compatibility", + "name": "output", + "typeAttr": "Tcomplex" + } + ], + "summary": "Inverse 2D fast Fourier transform." + } + }, + { + "name": "IFFT3D", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the inverse 3-dimensional discrete Fourier transform over the\ninner-most 3 dimensions of `input`.", + "inputs": [ + { + "description": "A complex tensor.", + "name": "input", + "typeAttr": "Tcomplex" + } + ], + "outputs": [ + { + "description": "A complex tensor of the same shape as `input`. The inner-most 3\n dimensions of `input` are replaced with their inverse 3D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.ifftn with 3 dimensions.\n@end_compatibility", + "name": "output", + "typeAttr": "Tcomplex" + } + ], + "summary": "Inverse 3D fast Fourier transform." + } + }, + { + "name": "IRFFT", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "Treal", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the inverse 1-dimensional discrete Fourier transform of a real-valued\nsignal over the inner-most dimension of `input`.\n\nThe inner-most dimension of `input` is assumed to be the result of `RFFT`: the\n`fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If\n`fft_length` is not provided, it is computed from the size of the inner-most\ndimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to\ncompute `input` is odd, it should be provided since it cannot be inferred\nproperly.\n\nAlong the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller\nthan the corresponding dimension of `input`, the dimension is cropped. If it is\nlarger, the dimension is padded with zeros.", + "inputs": [ + { + "description": "A complex tensor.", + "name": "input", + "typeAttr": "Tcomplex" + }, + { + "description": "An int32 tensor of shape [1]. The FFT length.", + "name": "fft_length", + "type": 3 + } + ], + "outputs": [ + { + "description": "A float32 tensor of the same rank as `input`. The inner-most\n dimension of `input` is replaced with the `fft_length` samples of its inverse\n 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.irfft\n@end_compatibility", + "name": "output", + "typeAttr": "Treal" + } + ], + "summary": "Inverse real-valued fast Fourier transform." + } + }, + { + "name": "IRFFT2D", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "Treal", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the inverse 2-dimensional discrete Fourier transform of a real-valued\nsignal over the inner-most 2 dimensions of `input`.\n\nThe inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:\nThe inner-most dimension contains the `fft_length / 2 + 1` unique components of\nthe DFT of a real-valued signal. If `fft_length` is not provided, it is computed\nfrom the size of the inner-most 2 dimensions of `input`. If the FFT length used\nto compute `input` is odd, it should be provided since it cannot be inferred\nproperly.\n\nAlong each axis `IRFFT2D` is computed on, if `fft_length` (or\n`fft_length / 2 + 1` for the inner-most dimension) is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros.", + "inputs": [ + { + "description": "A complex tensor.", + "name": "input", + "typeAttr": "Tcomplex" + }, + { + "description": "An int32 tensor of shape [2]. The FFT length for each dimension.", + "name": "fft_length", + "type": 3 + } + ], + "outputs": [ + { + "description": "A float32 tensor of the same rank as `input`. The inner-most 2\n dimensions of `input` are replaced with the `fft_length` samples of their\n inverse 2D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.irfft2\n@end_compatibility", + "name": "output", + "typeAttr": "Treal" + } + ], + "summary": "Inverse 2D real-valued fast Fourier transform." + } + }, + { + "name": "IRFFT3D", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "Treal", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the inverse 3-dimensional discrete Fourier transform of a real-valued\nsignal over the inner-most 3 dimensions of `input`.\n\nThe inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:\nThe inner-most dimension contains the `fft_length / 2 + 1` unique components of\nthe DFT of a real-valued signal. If `fft_length` is not provided, it is computed\nfrom the size of the inner-most 3 dimensions of `input`. If the FFT length used\nto compute `input` is odd, it should be provided since it cannot be inferred\nproperly.\n\nAlong each axis `IRFFT3D` is computed on, if `fft_length` (or\n`fft_length / 2 + 1` for the inner-most dimension) is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros.", + "inputs": [ + { + "description": "A complex tensor.", + "name": "input", + "typeAttr": "Tcomplex" + }, + { + "description": "An int32 tensor of shape [3]. The FFT length for each dimension.", + "name": "fft_length", + "type": 3 + } + ], + "outputs": [ + { + "description": "A float32 tensor of the same rank as `input`. The inner-most 3\n dimensions of `input` are replaced with the `fft_length` samples of their\n inverse 3D real Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.irfftn with 3 dimensions.\n@end_compatibility", + "name": "output", + "typeAttr": "Treal" + } + ], + "summary": "Inverse 3D real-valued fast Fourier transform." + } + }, + { + "name": "Identity", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "category": "Control", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Return a tensor with the same shape and contents as the input tensor or value." + } + }, + { + "name": "IdentityN", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "T", + "type": "type[]" + } + ], + "description": "tensors.\n\nThis op can be used to override the gradient for complicated functions. For\nexample, suppose y = f(x) and we wish to apply a custom function g for backprop\nsuch that dx = g(dy). In Python,\n\n```python\nwith tf.get_default_graph().gradient_override_map(\n {'IdentityN': 'OverrideGradientWithG'}):\n y, _ = identity_n([f(x), x])\n\n@tf.RegisterGradient('OverrideGradientWithG')\ndef ApplyG(op, dy, _):\n return [None, g(dy)] # Do not backprop to f(x).\n```", + "inputs": [ + { + "name": "input", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeListAttr": "T" + } + ], + "summary": "Returns a list of tensors with the same shapes and contents as the input" + } + }, + { + "name": "IdentityReader", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + } + ], + "description": "To use, enqueue strings in a Queue. ReaderRead will take the front\nwork string and output (work, work).", + "outputs": [ + { + "description": "The handle to reference the Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + } + ], + "summary": "A Reader that outputs the queued work as both the key and value." + } + }, + { + "name": "IdentityReaderV2", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + } + ], + "description": "To use, enqueue strings in a Queue. ReaderRead will take the front\nwork string and output (work, work).", + "outputs": [ + { + "description": "The handle to reference the Reader.", + "name": "reader_handle", + "type": 20 + } + ], + "summary": "A Reader that outputs the queued work as both the key and value." + } + }, + { + "name": "If", + "schema": { + "attributes": [ + { + "name": "Tcond", + "type": "type" + }, + { + "description": "A list of input types.", + "minimum": 0, + "name": "Tin", + "type": "type[]" + }, + { + "description": "A list of output types.", + "minimum": 0, + "name": "Tout", + "type": "type[]" + }, + { + "description": " A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what else_branch returns.", + "name": "then_branch", + "type": "function" + }, + { + "description": " A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what then_branch returns.", + "name": "else_branch", + "type": "function" + }, + { + "default": [], + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": " A Tensor. If the tensor is a scalar of non-boolean type, the\n scalar is converted to a boolean according to the\n following rule: if the scalar is a numerical value, non-zero means\n `True` and zero means False; if the scalar is a string, non-empty\n means `True` and empty means `False`. If the tensor is not a scalar,\n being empty means False and being non-empty means True.", + "name": "cond", + "typeAttr": "Tcond" + }, + { + "description": "A list of input tensors.", + "name": "input", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "description": "A list of return values.", + "name": "output", + "typeListAttr": "Tout" + } + ], + "summary": "output = cond ? then_branch(input) : else_branch(input)" + } + }, + { + "name": "Igamma", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "The lower regularized incomplete Gamma function is defined as:\n\n\n\\\\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\\\)\n\nwhere\n\n\\\\(gamma(a, x) = \\\\int_{0}^{x} t^{a-1} exp(-t) dt\\\\)\n\nis the lower incomplete Gamma function.\n\nNote, above `Q(a, x)` (`Igammac`) is the upper regularized complete\nGamma function.", + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Compute the lower regularized incomplete Gamma function `P(a, x)`." + } + }, + { + "name": "IgammaGradA", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient of `igamma(a, x)` wrt `a`." + } + }, + { + "name": "Igammac", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "The upper regularized incomplete Gamma function is defined as:\n\n\\\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\\\)\n\nwhere\n\n\\\\(Gamma(a, x) = int_{x}^{\\infty} t^{a-1} exp(-t) dt\\\\)\n\nis the upper incomplete Gama function.\n\nNote, above `P(a, x)` (`Igamma`) is the lower regularized complete\nGamma function.", + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Compute the upper regularized incomplete Gamma function `Q(a, x)`." + } + }, + { + "name": "IgnoreErrorsDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that contains the elements of `input_dataset` ignoring errors." + } + }, + { + "name": "Imag", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "Tout", + "type": "type" + } + ], + "description": "Given a tensor `input` of complex numbers, this operation returns a tensor of\ntype `float` that is the imaginary part of each element in `input`. All\nelements in `input` must be complex numbers of the form \\\\(a + bj\\\\), where *a*\nis the real part and *b* is the imaginary part returned by this operation.\n\nFor example:\n\n```\n# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\ntf.imag(input) ==> [4.75, 5.75]\n```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tout" + } + ], + "summary": "Returns the imaginary part of a complex number." + } + }, + { + "name": "ImageProjectiveTransformV2", + "schema": { + "attributes": [ + { + "description": "Input dtype. Must be one of the following: `uint8`, `int32`, `int64`, `float16`, `float32`, `float64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Interpolation method, \"NEAREST\" or \"BILINEAR\".", + "name": "interpolation", + "type": "string" + }, + { + "default": "CONSTANT", + "description": "Fill mode, \"REFLECT\", \"WRAP\", or \"CONSTANT\".", + "name": "fill_mode", + "type": "string" + } + ], + "description": "If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps\nthe *output* point `(x, y)` to a transformed *input* point\n`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where\n`k = c0 x + c1 y + 1`. If the transformed point lays outside of the input\nimage, the output pixel is set to 0.", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "images", + "typeAttr": "dtype" + }, + { + "description": "2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3\nprojective transformation matrix, with the last entry assumed to be 1. If there\nis one row, the same transformation will be applied to all images.", + "name": "transforms", + "type": 1 + }, + { + "description": "1-D Tensor [new_height, new_width].", + "name": "output_shape", + "type": 3 + } + ], + "outputs": [ + { + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "name": "transformed_images", + "typeAttr": "dtype" + } + ], + "summary": "Applies the given transform to each of the images." + } + }, + { + "name": "ImageSummary", + "schema": { + "attributes": [ + { + "default": 3, + "description": "Max number of batch elements to generate images for.", + "minimum": 1, + "name": "max_images", + "type": "int64" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `uint8`, `float32`, `float16`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "tensor", + "value": "?" + }, + "description": "Color to use for pixels with non-finite values.", + "name": "bad_color", + "type": "tensor" + } + ], + "description": "The summary has up to `max_images` summary values containing images. The\nimages are built from `tensor` which must be 4-D with shape `[batch_size,\nheight, width, channels]` and where `channels` can be:\n\n* 1: `tensor` is interpreted as Grayscale.\n* 3: `tensor` is interpreted as RGB.\n* 4: `tensor` is interpreted as RGBA.\n\nThe images have the same number of channels as the input tensor. For float\ninput, the values are normalized one image at a time to fit in the range\n`[0, 255]`. `uint8` values are unchanged. The op uses two different\nnormalization algorithms:\n\n* If the input values are all positive, they are rescaled so the largest one\n is 255.\n\n* If any input value is negative, the values are shifted so input value 0.0\n is at 127. They are then rescaled so that either the smallest value is 0,\n or the largest one is 255.\n\nThe `tag` argument is a scalar `Tensor` of type `string`. It is used to\nbuild the `tag` of the summary values:\n\n* If `max_images` is 1, the summary value tag is '*tag*/image'.\n* If `max_images` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.\n\nThe `bad_color` argument is the color to use in the generated images for\nnon-finite input values. It is a `uint8` 1-D tensor of length `channels`.\nEach element must be in the range `[0, 255]` (It represents the value of a\npixel in the output image). Non-finite values in the input tensor are\nreplaced by this tensor in the output image. The default value is the color\nred.", + "inputs": [ + { + "description": "Scalar. Used to build the `tag` attribute of the summary values.", + "name": "tag", + "type": 7 + }, + { + "description": "4-D of shape `[batch_size, height, width, channels]` where\n`channels` is 1, 3, or 4.", + "name": "tensor", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Scalar. Serialized `Summary` protocol buffer.", + "name": "summary", + "type": 7 + } + ], + "summary": "Outputs a `Summary` protocol buffer with images." + } + }, + { + "name": "ImmutableConst", + "schema": { + "attributes": [ + { + "description": "Type of the returned tensor.", + "name": "dtype", + "type": "type" + }, + { + "description": "Shape of the returned tensor.", + "name": "shape", + "type": "shape" + }, + { + "description": "Name of readonly memory region used by the tensor, see\nNewReadOnlyMemoryRegionFromFile in tensorflow::Env.", + "name": "memory_region_name", + "type": "string" + } + ], + "description": "The current implementation memmaps the tensor from a file.", + "outputs": [ + { + "name": "tensor", + "typeAttr": "dtype" + } + ], + "summary": "Returns immutable tensor from memory region." + } + }, + { + "name": "ImportEvent", + "schema": { + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "event", + "type": 7 + } + ] + } + }, + { + "name": "InTopK", + "schema": { + "attributes": [ + { + "description": "Number of top elements to look at for computing precision.", + "name": "k", + "type": "int64" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the\nprediction for the target class is among the top `k` predictions among\nall predictions for example `i`. Note that the behavior of `InTopK` differs\nfrom the `TopK` op in its handling of ties; if multiple classes have the\nsame prediction value and straddle the top-`k` boundary, all of those\nclasses are considered to be in the top `k`.\n\nMore formally, let\n\n \\\\(predictions_i\\\\) be the predictions for all classes for example `i`,\n \\\\(targets_i\\\\) be the target class for example `i`,\n \\\\(out_i\\\\) be the output for example `i`,\n\n$$out_i = predictions_{i, targets_i} \\in TopKIncludingTies(predictions_i)$$", + "inputs": [ + { + "description": "A `batch_size` x `classes` tensor.", + "name": "predictions", + "type": 1 + }, + { + "description": "A `batch_size` vector of class ids.", + "name": "targets", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Computed Precision at `k` as a `bool Tensor`.", + "name": "precision", + "type": 10 + } + ], + "summary": "Says whether the targets are in the top `K` predictions." + } + }, + { + "name": "InTopKV2", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the\nprediction for the target class is among the top `k` predictions among\nall predictions for example `i`. Note that the behavior of `InTopK` differs\nfrom the `TopK` op in its handling of ties; if multiple classes have the\nsame prediction value and straddle the top-`k` boundary, all of those\nclasses are considered to be in the top `k`.\n\nMore formally, let\n\n \\\\(predictions_i\\\\) be the predictions for all classes for example `i`,\n \\\\(targets_i\\\\) be the target class for example `i`,\n \\\\(out_i\\\\) be the output for example `i`,\n\n$$out_i = predictions_{i, targets_i} \\in TopKIncludingTies(predictions_i)$$", + "inputs": [ + { + "description": "A `batch_size` x `classes` tensor.", + "name": "predictions", + "type": 1 + }, + { + "description": "A `batch_size` vector of class ids.", + "name": "targets", + "typeAttr": "T" + }, + { + "description": "Number of top elements to look at for computing precision.", + "name": "k", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Computed precision at `k` as a `bool Tensor`.", + "name": "precision", + "type": 10 + } + ], + "summary": "Says whether the targets are in the top `K` predictions." + } + }, + { + "name": "InfeedDequeue", + "schema": { + "attributes": [ + { + "description": "The type of elements in the tensor.", + "name": "dtype", + "type": "type" + }, + { + "description": "The shape of the tensor.", + "name": "shape", + "type": "shape" + } + ], + "outputs": [ + { + "description": "A tensor that will be provided using the infeed mechanism.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "A placeholder op for a value that will be fed into the computation." + } + }, + { + "name": "InfeedDequeueTuple", + "schema": { + "attributes": [ + { + "description": "The element types of each element in `outputs`.", + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "description": "The shapes of each tensor in `outputs`.", + "name": "shapes", + "type": "shape[]" + } + ], + "outputs": [ + { + "description": "A list of tensors that will be provided using the infeed mechanism.", + "name": "outputs", + "typeListAttr": "dtypes" + } + ], + "summary": "Fetches multiple values from infeed as an XLA tuple." + } + }, + { + "name": "InfeedEnqueue", + "schema": { + "attributes": [ + { + "description": "The type of elements in the tensor.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "description": "The shape of the tensor.", + "name": "shape", + "type": "shape" + }, + { + "default": [], + "description": "A vector holding the requested layout in minor-to-major sequence.\nIf a layout attribute is passed, but its values are all -1, the layout will\nbe computed by the infeed operation.", + "name": "layout", + "type": "int64[]" + }, + { + "default": -1, + "description": "The TPU device to use. This should be -1 when the Op\nis running on a TPU device, and >= 0 when the Op is running on the CPU\ndevice.", + "name": "device_ordinal", + "type": "int64" + } + ], + "inputs": [ + { + "description": "A tensor that will be provided using the infeed mechanism.", + "name": "input", + "typeAttr": "dtype" + } + ], + "summary": "An op which feeds a single Tensor value into the computation." + } + }, + { + "name": "InfeedEnqueuePrelinearizedBuffer", + "schema": { + "attributes": [ + { + "default": -1, + "description": "The TPU device to use. This should be -1 when the Op is running on a TPU device\nand = 0 when the Op is running on the CPU device.", + "name": "device_ordinal", + "type": "int64" + } + ], + "inputs": [ + { + "description": "A variant tensor representing linearized output.", + "name": "input", + "type": 21 + } + ], + "summary": "An op which enqueues prelinearized buffer into TPU infeed." + } + }, + { + "name": "InfeedEnqueueTuple", + "schema": { + "attributes": [ + { + "description": "The element types of each element in `inputs`.", + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "description": "The shapes of each tensor in `inputs`.", + "name": "shapes", + "type": "shape[]" + }, + { + "default": [], + "description": "A vector holding the requested layout in minor-to-major sequence for\nall the tuple shapes, in the order the shapes appear in the \"shapes\" input.\nThe layout elements for a sub-shape can be set to -1, in which case the\ncorresponding layout will be computed by the infeed operation.", + "name": "layouts", + "type": "int64[]" + }, + { + "default": -1, + "description": "The TPU device to use. This should be -1 when the Op\nis running on a TPU device, and >= 0 when the Op is running on the CPU\ndevice.", + "name": "device_ordinal", + "type": "int64" + } + ], + "inputs": [ + { + "description": "A list of tensors that will be provided using the infeed mechanism.", + "name": "inputs", + "typeListAttr": "dtypes" + } + ], + "summary": "Feeds multiple Tensor values into the computation as an XLA tuple." + } + }, + { + "name": "InitializeTable", + "schema": { + "attributes": [ + { + "name": "Tkey", + "type": "type" + }, + { + "name": "Tval", + "type": "type" + } + ], + "inputs": [ + { + "description": "Handle to a table which will be initialized.", + "isRef": true, + "name": "table_handle", + "type": 7 + }, + { + "description": "Keys of type Tkey.", + "name": "keys", + "typeAttr": "Tkey" + }, + { + "description": "Values of type Tval.", + "name": "values", + "typeAttr": "Tval" + } + ], + "summary": "Table initializer that takes two tensors for keys and values respectively." + } + }, + { + "name": "InitializeTableFromDataset", + "schema": { + "inputs": [ + { + "name": "table_handle", + "type": 20 + }, + { + "name": "dataset", + "type": 21 + } + ] + } + }, + { + "name": "InitializeTableFromTextFile", + "schema": { + "attributes": [ + { + "description": "Column index in a line to get the table `key` values from.", + "minimum": -2, + "name": "key_index", + "type": "int64" + }, + { + "description": "Column index that represents information of a line to get the table\n`value` values from.", + "minimum": -2, + "name": "value_index", + "type": "int64" + }, + { + "default": -1, + "description": "Number of elements of the file, use -1 if unknown.", + "minimum": -1, + "name": "vocab_size", + "type": "int64" + }, + { + "default": "\t", + "description": "Delimiter to separate fields in a line.", + "name": "delimiter", + "type": "string" + } + ], + "description": "It inserts one key-value pair into the table for each line of the file.\nThe key and value is extracted from the whole line content, elements from the\nsplit line based on `delimiter` or the line number (starting from zero).\nWhere to extract the key and value from a line is specified by `key_index` and\n`value_index`.\n\n- A value of -1 means use the line number(starting from zero), expects `int64`.\n- A value of -2 means use the whole line content, expects `string`.\n- A value >= 0 means use the index (starting at zero) of the split line based\n on `delimiter`.", + "inputs": [ + { + "description": "Handle to a table which will be initialized.", + "isRef": true, + "name": "table_handle", + "type": 7 + }, + { + "description": "Filename of a vocabulary text file.", + "name": "filename", + "type": 7 + } + ], + "summary": "Initializes a table from a text file." + } + }, + { + "name": "InitializeTableFromTextFileV2", + "schema": { + "attributes": [ + { + "description": "Column index in a line to get the table `key` values from.", + "minimum": -2, + "name": "key_index", + "type": "int64" + }, + { + "description": "Column index that represents information of a line to get the table\n`value` values from.", + "minimum": -2, + "name": "value_index", + "type": "int64" + }, + { + "default": -1, + "description": "Number of elements of the file, use -1 if unknown.", + "minimum": -1, + "name": "vocab_size", + "type": "int64" + }, + { + "default": "\t", + "description": "Delimiter to separate fields in a line.", + "name": "delimiter", + "type": "string" + } + ], + "description": "It inserts one key-value pair into the table for each line of the file.\nThe key and value is extracted from the whole line content, elements from the\nsplit line based on `delimiter` or the line number (starting from zero).\nWhere to extract the key and value from a line is specified by `key_index` and\n`value_index`.\n\n- A value of -1 means use the line number(starting from zero), expects `int64`.\n- A value of -2 means use the whole line content, expects `string`.\n- A value >= 0 means use the index (starting at zero) of the split line based\n on `delimiter`.", + "inputs": [ + { + "description": "Handle to a table which will be initialized.", + "name": "table_handle", + "type": 20 + }, + { + "description": "Filename of a vocabulary text file.", + "name": "filename", + "type": 7 + } + ], + "summary": "Initializes a table from a text file." + } + }, + { + "name": "InitializeTableV2", + "schema": { + "attributes": [ + { + "name": "Tkey", + "type": "type" + }, + { + "name": "Tval", + "type": "type" + } + ], + "inputs": [ + { + "description": "Handle to a table which will be initialized.", + "name": "table_handle", + "type": 20 + }, + { + "description": "Keys of type Tkey.", + "name": "keys", + "typeAttr": "Tkey" + }, + { + "description": "Values of type Tval.", + "name": "values", + "typeAttr": "Tval" + } + ], + "summary": "Table initializer that takes two tensors for keys and values respectively." + } + }, + { + "name": "InplaceAdd", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "A `Tensor` of type T.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A vector. Indices into the left-most dimension of `x`.", + "name": "i", + "type": 3 + }, + { + "description": "A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.", + "name": "v", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.", + "name": "y", + "typeAttr": "T" + } + ], + "summary": " Adds v into specified rows of x.\n\n Computes y = x; y[i, :] += v; return y." + } + }, + { + "name": "InplaceSub", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "A `Tensor` of type T.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A vector. Indices into the left-most dimension of `x`.", + "name": "i", + "type": 3 + }, + { + "description": "A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.", + "name": "v", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.", + "name": "y", + "typeAttr": "T" + } + ], + "summary": " Subtracts `v` into specified rows of `x`.\n\n Computes y = x; y[i, :] -= v; return y." + } + }, + { + "name": "InplaceUpdate", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Computes `x[i, :] = v; return x`.\n\nOriginally this function is mutative however for compilation we make this\noperation create / operate on a copy of `x`.", + "inputs": [ + { + "description": "A tensor of type `T`.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A vector. Indices into the left-most dimension of `x`.", + "name": "i", + "type": 3 + }, + { + "description": "A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.", + "name": "v", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.", + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Updates specified rows 'i' with values 'v'." + } + }, + { + "name": "InterleaveDataset", + "schema": { + "attributes": [ + { + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`.", + "name": "f", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "Unlike MapDataset, the `f` in InterleaveDataset is expected to return\na Dataset variant, and InterleaveDataset will flatten successive\nresults into a single Dataset. Unlike FlatMapDataset,\nInterleaveDataset will interleave sequences of up to `block_length`\nconsecutive elements from `cycle_length` input elements.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "name": "cycle_length", + "type": 9 + }, + { + "name": "block_length", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "Inv", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "I.e., \\\\(y = 1 / x\\\\).", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes the reciprocal of x element-wise." + } + }, + { + "name": "InvGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`\nis the corresponding input gradient.", + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient for the inverse of `x` wrt its input." + } + }, + { + "name": "Invert", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101.\nThis operation is performed on each element of the tensor argument `x`.\n\nExample:\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\n\n# flip 2 (00000010) to -3 (11111101)\ntf.assert_equal(-3, bitwise_ops.invert(2))\n\ndtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,\n dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]\n\ninputs = [0, 5, 3, 14]\nfor dtype in dtype_list:\n # Because of issues with negative numbers, let's test this indirectly.\n # 1. invert(a) and a = 0\n # 2. invert(a) or a = invert(0)\n input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype)\n not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and(\n input_tensor, bitwise_ops.invert(input_tensor)),\n bitwise_ops.bitwise_or(\n input_tensor, bitwise_ops.invert(input_tensor)),\n bitwise_ops.invert(\n tf.constant(0, dtype=dtype))]\n\n expected = tf.constant([0, 0, 0, 0], dtype=tf.float32)\n tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected)\n\n expected = tf.cast([not_0] * 4, tf.float32)\n tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected)\n\n # For unsigned dtypes let's also check the result directly.\n if dtype.is_unsigned:\n inverted = bitwise_ops.invert(input_tensor)\n expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32)\n tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32))\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010." + } + }, + { + "name": "InvertPermutation", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "This operation computes the inverse of an index permutation. It takes a 1-D\ninteger tensor `x`, which represents the indices of a zero-based array, and\nswaps each value with its index position. In other words, for an output tensor\n`y` and an input tensor `x`, this operation computes the following:\n\n`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`\n\nThe values must include 0. There can be no duplicate values or negative values.\n\nFor example:\n\n```\n# tensor `x` is [3, 4, 0, 2, 1]\ninvert_permutation(x) ==> [2, 4, 3, 0, 1]\n```", + "inputs": [ + { + "description": "1-D.", + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1-D.", + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes the inverse permutation of a tensor." + } + }, + { + "name": "IsBoostedTreesEnsembleInitialized", + "schema": { + "inputs": [ + { + "description": "Handle to the tree ensemble resource.", + "name": "tree_ensemble_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "output boolean on whether it is initialized or not.", + "name": "is_initialized", + "type": 10 + } + ], + "summary": "Checks whether a tree ensemble has been initialized." + } + }, + { + "name": "IsBoostedTreesQuantileStreamResourceInitialized", + "schema": { + "description": "An Op that checks if quantile stream resource is initialized.", + "inputs": [ + { + "description": "resource; The reference to quantile stream resource handle.", + "name": "quantile_stream_resource_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "bool; True if the resource is initialized, False otherwise.", + "name": "is_initialized", + "type": 10 + } + ], + "summary": "Checks whether a quantile stream has been initialized." + } + }, + { + "name": "IsFinite", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "@compatibility(numpy)\nEquivalent to np.isfinite\n@end_compatibility\n\nExample:\n\n```python\nx = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan])\ntf.math.is_finite(x) ==> [True, True, True, False, False]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "type": 10 + } + ], + "summary": "Returns which elements of x are finite." + } + }, + { + "name": "IsInf", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "@compatibility(numpy)\nEquivalent to np.isinf\n@end_compatibility\n\nExample:\n\n```python\nx = tf.constant([5.0, np.inf, 6.8, np.inf])\ntf.math.is_inf(x) ==> [False, True, False, True]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "type": 10 + } + ], + "summary": "Returns which elements of x are Inf." + } + }, + { + "name": "IsNan", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "@compatibility(numpy)\nEquivalent to np.isnan\n@end_compatibility\n\nExample:\n\n```python\nx = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf])\ntf.math.is_nan(x) ==> [False, True, False, True, False]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "type": 10 + } + ], + "summary": "Returns which elements of x are NaN." + } + }, + { + "name": "IsVariableInitialized", + "schema": { + "attributes": [ + { + "description": "The type of elements in the variable tensor.", + "name": "dtype", + "type": "type" + } + ], + "description": "Outputs boolean scalar indicating whether the tensor has been initialized.", + "inputs": [ + { + "description": "Should be from a `Variable` node. May be uninitialized.", + "isRef": true, + "name": "ref", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "is_initialized", + "type": 10 + } + ], + "summary": "Checks whether a tensor has been initialized." + } + }, + { + "name": "Iterator", + "schema": { + "attributes": [ + { + "name": "shared_name", + "type": "string" + }, + { + "name": "container", + "type": "string" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "outputs": [ + { + "description": "A handle to the iterator that can be passed to a \"MakeIterator\"\nor \"IteratorGetNext\" op.", + "name": "handle", + "type": 20 + } + ], + "summary": "A container for an iterator resource." + } + }, + { + "name": "IteratorFromStringHandle", + "schema": { + "attributes": [ + { + "default": [], + "description": "If specified, defines the type of each tuple component in an\nelement produced by the resulting iterator.", + "minimum": 0, + "name": "output_types", + "type": "type[]" + }, + { + "default": [], + "description": "If specified, defines the shape of each tuple component in an\nelement produced by the resulting iterator.", + "minimum": 0, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A string representation of the given handle.", + "name": "string_handle", + "type": 7 + } + ], + "outputs": [ + { + "description": "A handle to an iterator resource.", + "name": "resource_handle", + "type": 20 + } + ], + "summary": "Converts the given string representing a handle to an iterator to a resource." + } + }, + { + "name": "IteratorFromStringHandleV2", + "schema": { + "attributes": [ + { + "default": [], + "minimum": 0, + "name": "output_types", + "type": "type[]" + }, + { + "default": [], + "minimum": 0, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "string_handle", + "type": 7 + } + ], + "outputs": [ + { + "name": "resource_handle", + "type": 20 + } + ] + } + }, + { + "name": "IteratorGetDevice", + "schema": { + "inputs": [ + { + "name": "resource", + "type": 20 + } + ], + "outputs": [ + { + "name": "device", + "type": 7 + } + ], + "summary": "Returns the name of the device on which `resource` has been placed." + } + }, + { + "name": "IteratorGetNext", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "iterator", + "type": 20 + } + ], + "outputs": [ + { + "name": "components", + "typeListAttr": "output_types" + } + ], + "summary": "Gets the next output from the given iterator ." + } + }, + { + "name": "IteratorGetNextAsOptional", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "iterator", + "type": 20 + } + ], + "outputs": [ + { + "name": "optional", + "type": 21 + } + ], + "summary": "Gets the next output from the given iterator as an Optional variant." + } + }, + { + "name": "IteratorGetNextSync", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "This operation is a synchronous version IteratorGetNext. It should only be used\nin situations where the iterator does not block the calling thread, or where\nthe calling thread is not a member of the thread pool used to execute parallel\noperations (e.g. in eager mode).", + "inputs": [ + { + "name": "iterator", + "type": 20 + } + ], + "outputs": [ + { + "name": "components", + "typeListAttr": "output_types" + } + ], + "summary": "Gets the next output from the given iterator." + } + }, + { + "name": "IteratorToStringHandle", + "schema": { + "inputs": [ + { + "description": "A handle to an iterator resource.", + "name": "resource_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "A string representation of the given handle.", + "name": "string_handle", + "type": 7 + } + ], + "summary": "Converts the given `resource_handle` representing an iterator to a string." + } + }, + { + "name": "IteratorV2", + "schema": { + "attributes": [ + { + "name": "shared_name", + "type": "string" + }, + { + "name": "container", + "type": "string" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + } + }, + { + "name": "KMC2ChainInitialization", + "schema": { + "description": "Entries in distances are assumed to be squared distances of candidate points to\nthe already sampled centers in the seed set. The op constructs one Markov chain\nof the k-MC^2 algorithm and returns the index of one candidate point to be added\nas an additional cluster center.", + "inputs": [ + { + "description": "Vector with squared distances to the closest previously sampled cluster center\nfor each candidate point.", + "name": "distances", + "type": 1 + }, + { + "description": "Scalar. Seed for initializing the random number generator.", + "name": "seed", + "type": 9 + } + ], + "outputs": [ + { + "description": "Scalar with the index of the sampled point.", + "name": "index", + "type": 9 + } + ], + "summary": "Returns the index of a data point that should be added to the seed set." + } + }, + { + "name": "KmeansPlusPlusInitialization", + "schema": { + "description": "Rows of points are assumed to be input points. One row is selected at random.\nSubsequent rows are sampled with probability proportional to the squared L2\ndistance from the nearest row selected thus far till num_to_sample rows have\nbeen sampled.", + "inputs": [ + { + "description": "Matrix of shape (n, d). Rows are assumed to be input points.", + "name": "points", + "type": 1 + }, + { + "description": "Scalar. The number of rows to sample. This value must not be larger than n.", + "name": "num_to_sample", + "type": 9 + }, + { + "description": "Scalar. Seed for initializing the random number generator.", + "name": "seed", + "type": 9 + }, + { + "description": "Scalar. For each row that is sampled, this parameter\nspecifies the number of additional points to draw from the current\ndistribution before selecting the best. If a negative value is specified, a\nheuristic is used to sample O(log(num_to_sample)) additional points.", + "name": "num_retries_per_sample", + "type": 9 + } + ], + "outputs": [ + { + "description": "Matrix of shape (num_to_sample, d). The sampled rows.", + "name": "samples", + "type": 1 + } + ], + "summary": "Selects num_to_sample rows of input using the KMeans++ criterion." + } + }, + { + "name": "L2Loss", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "Computes half the L2 norm of a tensor without the `sqrt`:\n\n output = sum(t ** 2) / 2", + "inputs": [ + { + "description": "Typically 2-D, but may have any dimensions.", + "name": "t", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "0-D.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "L2 Loss." + } + }, + { + "name": "LMDBDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "The Lightning Memory-Mapped Database Manager, or LMDB, is an embedded binary\nkey-value database. This dataset can read the contents of LMDB database files,\nthe names of which generally have the `.mdb` suffix.\n\nEach output element consists of a key-value pair represented as a pair of\nscalar string `Tensor`s, where the first `Tensor` contains the key and the\nsecond `Tensor` contains the value.\n\nLMDB uses different file formats on big- and little-endian machines.\n`LMDBDataset` can only read files in the format of the host machine.", + "inputs": [ + { + "description": "A scalar or a vector containing the name(s) of the binary file(s) to be\nread.", + "name": "filenames", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that emits the key-value pairs in one or more LMDB files." + } + }, + { + "name": "LMDBReader", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "description": "The handle to reference the Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + } + ], + "summary": "A Reader that outputs the records from a LMDB file." + } + }, + { + "name": "LRN", + "schema": { + "attributes": [ + { + "default": 5, + "description": "0-D. Half-width of the 1-D normalization window.", + "name": "depth_radius", + "type": "int64" + }, + { + "default": 1.0, + "description": "An offset (usually positive to avoid dividing by 0).", + "name": "bias", + "type": "float32" + }, + { + "default": 1.0, + "description": "A scale factor, usually positive.", + "name": "alpha", + "type": "float32" + }, + { + "default": 0.5, + "description": "An exponent.", + "name": "beta", + "type": "float32" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "category": "Normalization", + "description": "The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last\ndimension), and each vector is normalized independently. Within a given vector,\neach component is divided by the weighted, squared sum of inputs within\n`depth_radius`. In detail,\n\n sqr_sum[a, b, c, d] =\n sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)\n output = input / (bias + alpha * sqr_sum) ** beta\n\nFor details, see [Krizhevsky et al., ImageNet classification with deep\nconvolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).", + "inputs": [ + { + "description": "4-D.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Local Response Normalization." + } + }, + { + "name": "LRNGrad", + "schema": { + "attributes": [ + { + "default": 5, + "description": "A depth radius.", + "name": "depth_radius", + "type": "int64" + }, + { + "default": 1.0, + "description": "An offset (usually > 0 to avoid dividing by 0).", + "name": "bias", + "type": "float32" + }, + { + "default": 1.0, + "description": "A scale factor, usually positive.", + "name": "alpha", + "type": "float32" + }, + { + "default": 0.5, + "description": "An exponent.", + "name": "beta", + "type": "float32" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "input_grads", + "typeAttr": "T" + }, + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "input_image", + "typeAttr": "T" + }, + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "output_image", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The gradients for LRN.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Gradients for Local Response Normalization." + } + }, + { + "name": "LSTMBlockCell", + "schema": { + "attributes": [ + { + "default": 1.0, + "description": "The forget gate bias.", + "name": "forget_bias", + "type": "float32" + }, + { + "default": 3.0, + "description": "Value to clip the 'cs' value to.", + "name": "cell_clip", + "type": "float32" + }, + { + "default": false, + "description": "Whether to use peephole weights.", + "name": "use_peephole", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "This implementation uses 1 weight matrix and 1 bias vector, and there's an\noptional peephole connection.\n\nThis kernel op implements the following mathematical equations:\n\n```python\nxh = [x, h_prev]\n[i, f, ci, o] = xh * w + b\nf = f + forget_bias\n\nif not use_peephole:\n wci = wcf = wco = 0\n\ni = sigmoid(cs_prev * wci + i)\nf = sigmoid(cs_prev * wcf + f)\nci = tanh(ci)\n\ncs = ci .* i + cs_prev .* f\ncs = clip(cs, cell_clip)\n\no = sigmoid(cs * wco + o)\nco = tanh(cs)\nh = co .* o\n```", + "inputs": [ + { + "description": "The input to the LSTM cell, shape (batch_size, num_inputs).", + "name": "x", + "typeAttr": "T" + }, + { + "description": "Value of the cell state at previous time step.", + "name": "cs_prev", + "typeAttr": "T" + }, + { + "description": "Output of the previous cell at previous time step.", + "name": "h_prev", + "typeAttr": "T" + }, + { + "description": "The weight matrix.", + "name": "w", + "typeAttr": "T" + }, + { + "description": "The weight matrix for input gate peephole connection.", + "name": "wci", + "typeAttr": "T" + }, + { + "description": "The weight matrix for forget gate peephole connection.", + "name": "wcf", + "typeAttr": "T" + }, + { + "description": "The weight matrix for output gate peephole connection.", + "name": "wco", + "typeAttr": "T" + }, + { + "description": "The bias vector.", + "name": "b", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The input gate.", + "name": "i", + "typeAttr": "T" + }, + { + "description": "The cell state before the tanh.", + "name": "cs", + "typeAttr": "T" + }, + { + "description": "The forget gate.", + "name": "f", + "typeAttr": "T" + }, + { + "description": "The output gate.", + "name": "o", + "typeAttr": "T" + }, + { + "description": "The cell input.", + "name": "ci", + "typeAttr": "T" + }, + { + "description": "The cell after the tanh.", + "name": "co", + "typeAttr": "T" + }, + { + "description": "The output h vector.", + "name": "h", + "typeAttr": "T" + } + ], + "summary": "Computes the LSTM cell forward propagation for 1 time step." + } + }, + { + "name": "LSTMBlockCellGrad", + "schema": { + "attributes": [ + { + "description": "Whether the cell uses peephole connections.", + "name": "use_peephole", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "This implementation is to be used in conjunction of LSTMBlockCell.", + "inputs": [ + { + "description": "The input to the LSTM cell, shape (batch_size, num_inputs).", + "name": "x", + "typeAttr": "T" + }, + { + "description": "The previous cell state.", + "name": "cs_prev", + "typeAttr": "T" + }, + { + "description": "The previous h state.", + "name": "h_prev", + "typeAttr": "T" + }, + { + "description": "The weight matrix.", + "name": "w", + "typeAttr": "T" + }, + { + "description": "The weight matrix for input gate peephole connection.", + "name": "wci", + "typeAttr": "T" + }, + { + "description": "The weight matrix for forget gate peephole connection.", + "name": "wcf", + "typeAttr": "T" + }, + { + "description": "The weight matrix for output gate peephole connection.", + "name": "wco", + "typeAttr": "T" + }, + { + "description": "The bias vector.", + "name": "b", + "typeAttr": "T" + }, + { + "description": "The input gate.", + "name": "i", + "typeAttr": "T" + }, + { + "description": "The cell state before the tanh.", + "name": "cs", + "typeAttr": "T" + }, + { + "description": "The forget gate.", + "name": "f", + "typeAttr": "T" + }, + { + "description": "The output gate.", + "name": "o", + "typeAttr": "T" + }, + { + "description": "The cell input.", + "name": "ci", + "typeAttr": "T" + }, + { + "description": "The cell after the tanh.", + "name": "co", + "typeAttr": "T" + }, + { + "description": "The current gradient of cs.", + "name": "cs_grad", + "typeAttr": "T" + }, + { + "description": "The gradient of h vector.", + "name": "h_grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The gradient of cs to be back-propped.", + "name": "cs_prev_grad", + "typeAttr": "T" + }, + { + "description": "The derivative wrt to [i, cs, f, o].", + "name": "dicfo", + "typeAttr": "T" + }, + { + "description": "The gradient for wci to be back-propped.", + "name": "wci_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for wcf to be back-propped.", + "name": "wcf_grad", + "typeAttr": "T" + }, + { + "description": "The gradient for wco to be back-propped.", + "name": "wco_grad", + "typeAttr": "T" + } + ], + "summary": "Computes the LSTM cell backward propagation for 1 timestep." + } + }, + { + "name": "LatencyStatsDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "tag", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Records the latency of producing `input_dataset` elements in a StatsAggregator." + } + }, + { + "name": "LeakyRelu", + "schema": { + "attributes": [ + { + "default": 0.20000000298023224, + "name": "alpha", + "type": "float32" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ], + "summary": "Computes rectified linear: `max(features, features * alpha)`." + } + }, + { + "name": "LeakyReluGrad", + "schema": { + "attributes": [ + { + "default": 0.20000000298023224, + "name": "alpha", + "type": "float32" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The backpropagated gradients to the corresponding LeakyRelu operation.", + "name": "gradients", + "typeAttr": "T" + }, + { + "description": "The features passed as input to the corresponding LeakyRelu operation,\nOR the outputs of that operation (both work equivalently).", + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "`gradients * (features > 0) + alpha * gradients * (features <= 0)`.", + "name": "backprops", + "typeAttr": "T" + } + ], + "summary": "Computes rectified linear gradients for a LeakyRelu operation." + } + }, + { + "name": "LearnedUnigramCandidateSampler", + "schema": { + "attributes": [ + { + "description": "Number of true labels per context.", + "minimum": 1, + "name": "num_true", + "type": "int64" + }, + { + "description": "Number of candidates to randomly sample.", + "minimum": 1, + "name": "num_sampled", + "type": "int64" + }, + { + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities.", + "name": "unique", + "type": "boolean" + }, + { + "description": "The sampler will sample integers from the interval [0, range_max).", + "minimum": 1, + "name": "range_max", + "type": "int64" + }, + { + "default": 0, + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "An second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + } + ], + "description": "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "inputs": [ + { + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "name": "true_classes", + "type": 9 + } + ], + "outputs": [ + { + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "name": "sampled_candidates", + "type": 9 + }, + { + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "name": "true_expected_count", + "type": 1 + }, + { + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "name": "sampled_expected_count", + "type": 1 + } + ], + "summary": "Generates labels for candidate sampling with a learned unigram distribution." + } + }, + { + "name": "LeftShift", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "If `y` is negative, or greater than or equal to the width of `x` in bits the\nresult is implementation defined.\n\nExample:\n\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\nimport numpy as np\ndtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]\n\nfor dtype in dtype_list:\n lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)\n rhs = tf.constant([5, 0, 7, 11], dtype=dtype)\n\n left_shift_result = bitwise_ops.left_shift(lhs, rhs)\n\n print(left_shift_result)\n\n# This will print:\n# tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8)\n# tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16)\n# tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32)\n# tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64)\n\nlhs = np.array([-2, 64, 101, 32], dtype=np.int8)\nrhs = np.array([-1, -5, -3, -14], dtype=np.int8)\nbitwise_ops.left_shift(lhs, rhs)\n# \n```\n", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Elementwise computes the bitwise left-shift of `x` and `y`." + } + }, + { + "name": "LegacyParallelInterleaveDatasetV2", + "schema": { + "attributes": [ + { + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`.", + "name": "f", + "type": "function" + }, + { + "default": "default", + "name": "deterministic", + "type": "string" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "The resulting dataset is similar to the `InterleaveDataset`, with the exception\nthat if retrieving the next value from a dataset would cause the requester to\nblock, it will skip that input dataset. This dataset is especially useful\nwhen loading data from a variable-latency datastores (e.g. HDFS, GCS), as it\nallows the training step to proceed so long as some data is available.\n\n!! WARNING !! This dataset is not deterministic!", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "name": "cycle_length", + "type": 9 + }, + { + "name": "block_length", + "type": 9 + }, + { + "name": "buffer_output_elements", + "type": 9 + }, + { + "name": "prefetch_input_elements", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "Less", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `Less` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\nExample:\n\n```python\nx = tf.constant([5, 4, 6])\ny = tf.constant([5])\ntf.math.less(x, y) ==> [False, True, False]\n\nx = tf.constant([5, 4, 6])\ny = tf.constant([5, 6, 7])\ntf.math.less(x, y) ==> [False, True, True]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ], + "summary": "Returns the truth value of (x < y) element-wise." + } + }, + { + "name": "LessEqual", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `LessEqual` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\nExample:\n\n```python\nx = tf.constant([5, 4, 6])\ny = tf.constant([5])\ntf.math.less_equal(x, y) ==> [True, True, False]\n\nx = tf.constant([5, 4, 6])\ny = tf.constant([5, 6, 6])\ntf.math.less_equal(x, y) ==> [True, True, True]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ], + "summary": "Returns the truth value of (x <= y) element-wise." + } + }, + { + "name": "Lgamma", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": " For positive numbers, this function computes log((input - 1)!) for every element in the tensor.\n `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539`\n\nExample:\n\n```python\nx = tf.constant([0, 0.5, 1, 4.5, -4, -5.6])\ntf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes the log of the absolute value of `Gamma(x)` element-wise." + } + }, + { + "name": "LinSpace", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "A sequence of `num` evenly-spaced values are generated beginning at `start`.\nIf `num > 1`, the values in the sequence increase by `stop - start / num - 1`,\nso that the last one is exactly `stop`.\n\nFor example:\n\n```\ntf.linspace(10.0, 12.0, 3, name=\"linspace\") => [ 10.0 11.0 12.0]\n```", + "inputs": [ + { + "description": "0-D tensor. First entry in the range.", + "name": "start", + "typeAttr": "T" + }, + { + "description": "0-D tensor. Last entry in the range.", + "name": "stop", + "typeAttr": "T" + }, + { + "description": "0-D tensor. Number of values to generate.", + "name": "num", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "1-D. The generated values.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Generates values in an interval." + } + }, + { + "name": "ListDiff", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "out_idx", + "type": "type" + } + ], + "description": "Given a list `x` and a list `y`, this operation returns a list `out` that\nrepresents all values that are in `x` but not in `y`. The returned list `out`\nis sorted in the same order that the numbers appear in `x` (duplicates are\npreserved). This operation also returns a list `idx` that represents the\nposition of each `out` element in `x`. In other words:\n\n`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`\n\nFor example, given this input:\n\n```\nx = [1, 2, 3, 4, 5, 6]\ny = [1, 3, 5]\n```\n\nThis operation would return:\n\n```\nout ==> [2, 4, 6]\nidx ==> [1, 3, 5]\n```", + "inputs": [ + { + "description": "1-D. Values to keep.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "1-D. Values to remove.", + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1-D. Values present in `x` but not in `y`.", + "name": "out", + "typeAttr": "T" + }, + { + "description": "1-D. Positions of `x` values preserved in `out`.", + "name": "idx", + "typeAttr": "out_idx" + } + ], + "summary": "Computes the difference between two lists of numbers or strings." + } + }, + { + "name": "LoadAndRemapMatrix", + "schema": { + "attributes": [ + { + "description": "Number of rows (length of the 1st dimension) in the output matrix.", + "minimum": 0, + "name": "num_rows", + "type": "int64" + }, + { + "description": "Number of columns (length of the 2nd dimension) in the output matrix.", + "minimum": 1, + "name": "num_cols", + "type": "int64" + }, + { + "default": -1, + "description": "The maximum number of rows to load from the checkpoint at\nonce. If less than or equal to 0, the entire matrix will be loaded into\nmemory. Setting this arg trades increased disk reads for lower memory usage.", + "name": "max_rows_in_memory", + "type": "int64" + } + ], + "description": "at `ckpt_path` and potentially reorders its rows and columns using the\nspecified remappings.\n\nMost users should use one of the wrapper initializers (such as\n`tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this\nfunction directly.\n\nThe remappings are 1-D tensors with the following properties:\n\n* `row_remapping` must have exactly `num_rows` entries. Row `i` of the output\n matrix will be initialized from the row corresponding to index\n `row_remapping[i]` in the old `Tensor` from the checkpoint.\n* `col_remapping` must have either 0 entries (indicating that no column\n reordering is needed) or `num_cols` entries. If specified, column `j` of the\n output matrix will be initialized from the column corresponding to index\n `col_remapping[j]` in the old `Tensor` from the checkpoint.\n* A value of -1 in either of the remappings signifies a \"missing\" entry. In that\n case, values from the `initializing_values` tensor will be used to fill that\n missing row or column. If `row_remapping` has `r` missing entries and\n `col_remapping` has `c` missing entries, then the following condition must be\n true:\n\n`(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`\n\nThe remapping tensors can be generated using the GenerateVocabRemapping op.\n\nAs an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],\ninitializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing\nthe value from row i, column j of the old tensor in the checkpoint, the output\nmatrix will look like the following:\n\n[[w(1, 0), w(1, 2), 0.5],\n [w(0, 0), w(0, 2), -0.5],\n [0.25, -0.25, 42]]", + "inputs": [ + { + "description": "Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from\nwhich the old matrix `Tensor` will be loaded.", + "name": "ckpt_path", + "type": 7 + }, + { + "description": "Name of the 2-D `Tensor` to load from checkpoint.", + "name": "old_tensor_name", + "type": 7 + }, + { + "description": "An int `Tensor` of row remappings (generally created by\n`generate_vocab_remapping`). Even if no row remapping is needed, this must\nstill be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted\nindex-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).", + "name": "row_remapping", + "type": 9 + }, + { + "description": "An int `Tensor` of column remappings (generally created by\n`generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping\nis to be done (e.g. column ordering is the same).", + "name": "col_remapping", + "type": 9 + }, + { + "description": "A float `Tensor` containing values to fill in for cells\nin the output matrix that are not loaded from the checkpoint. Length must be\nexactly the same as the number of missing / new cells.", + "name": "initializing_values", + "type": 1 + } + ], + "outputs": [ + { + "description": "Output matrix containing existing values loaded from the\ncheckpoint, and with any missing values filled in from initializing_values.", + "name": "output_matrix", + "type": 1 + } + ], + "summary": "Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint" + } + }, + { + "name": "LoadTPUEmbeddingADAMParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the ADAM optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of momenta used in the ADAM optimization algorithm.", + "name": "momenta", + "type": 1 + }, + { + "description": "Value of velocities used in the ADAM optimization algorithm.", + "name": "velocities", + "type": 1 + } + ], + "summary": "Load ADAM embedding parameters." + } + }, + { + "name": "LoadTPUEmbeddingADAMParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the ADAM optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of momenta used in the ADAM optimization algorithm.", + "name": "momenta", + "type": 1 + }, + { + "description": "Value of velocities used in the ADAM optimization algorithm.", + "name": "velocities", + "type": 1 + }, + { + "description": "Value of gradient_accumulators used in the ADAM optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Load ADAM embedding parameters with debug support." + } + }, + { + "name": "LoadTPUEmbeddingAdadeltaParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the Adadelta optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of accumulators used in the Adadelta optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Value of updates used in the Adadelta optimization algorithm.", + "name": "updates", + "type": 1 + } + ], + "summary": "Load Adadelta embedding parameters." + } + }, + { + "name": "LoadTPUEmbeddingAdadeltaParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the Adadelta optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of accumulators used in the Adadelta optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Value of updates used in the Adadelta optimization algorithm.", + "name": "updates", + "type": 1 + }, + { + "description": "Value of gradient_accumulators used in the Adadelta optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Load Adadelta parameters with debug support." + } + }, + { + "name": "LoadTPUEmbeddingAdagradParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the Adagrad optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of accumulators used in the Adagrad optimization algorithm.", + "name": "accumulators", + "type": 1 + } + ], + "summary": "Load Adagrad embedding parameters." + } + }, + { + "name": "LoadTPUEmbeddingAdagradParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the Adagrad optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of accumulators used in the Adagrad optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Value of gradient_accumulators used in the Adagrad optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Load Adagrad embedding parameters with debug support." + } + }, + { + "name": "LoadTPUEmbeddingCenteredRMSPropParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the centered RMSProp optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of ms used in the centered RMSProp optimization algorithm.", + "name": "ms", + "type": 1 + }, + { + "description": "Value of mom used in the centered RMSProp optimization algorithm.", + "name": "mom", + "type": 1 + }, + { + "description": "Value of mg used in the centered RMSProp optimization algorithm.", + "name": "mg", + "type": 1 + } + ], + "summary": "Load centered RMSProp embedding parameters." + } + }, + { + "name": "LoadTPUEmbeddingFTRLParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the FTRL optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of accumulators used in the FTRL optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Value of linears used in the FTRL optimization algorithm.", + "name": "linears", + "type": 1 + } + ], + "summary": "Load FTRL embedding parameters." + } + }, + { + "name": "LoadTPUEmbeddingFTRLParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the FTRL optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of accumulators used in the FTRL optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Value of linears used in the FTRL optimization algorithm.", + "name": "linears", + "type": 1 + }, + { + "description": "Value of gradient_accumulators used in the FTRL optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Load FTRL embedding parameters with debug support." + } + }, + { + "name": "LoadTPUEmbeddingMDLAdagradLightParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the MDL Adagrad Light optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of accumulators used in the MDL Adagrad Light optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Value of weights used in the MDL Adagrad Light optimization algorithm.", + "name": "weights", + "type": 1 + }, + { + "description": "Value of benefits used in the MDL Adagrad Light optimization algorithm.", + "name": "benefits", + "type": 1 + } + ], + "summary": "Load MDL Adagrad Light embedding parameters." + } + }, + { + "name": "LoadTPUEmbeddingMomentumParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the Momentum optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of momenta used in the Momentum optimization algorithm.", + "name": "momenta", + "type": 1 + } + ], + "summary": "Load Momentum embedding parameters." + } + }, + { + "name": "LoadTPUEmbeddingMomentumParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the Momentum optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of momenta used in the Momentum optimization algorithm.", + "name": "momenta", + "type": 1 + }, + { + "description": "Value of gradient_accumulators used in the Momentum optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Load Momentum embedding parameters with debug support." + } + }, + { + "name": "LoadTPUEmbeddingProximalAdagradParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the proximal Adagrad optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of accumulators used in the proximal Adagrad optimization algorithm.", + "name": "accumulators", + "type": 1 + } + ], + "summary": "Load proximal Adagrad embedding parameters." + } + }, + { + "name": "LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the proximal Adagrad optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of accumulators used in the proximal Adagrad optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Value of gradient_accumulators used in the proximal Adagrad optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Load proximal Adagrad embedding parameters with debug support." + } + }, + { + "name": "LoadTPUEmbeddingProximalYogiParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "inputs": [ + { + "name": "parameters", + "type": 1 + }, + { + "name": "v", + "type": 1 + }, + { + "name": "m", + "type": 1 + } + ] + } + }, + { + "name": "LoadTPUEmbeddingProximalYogiParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "inputs": [ + { + "name": "parameters", + "type": 1 + }, + { + "name": "v", + "type": 1 + }, + { + "name": "m", + "type": 1 + }, + { + "name": "gradient_accumulators", + "type": 1 + } + ] + } + }, + { + "name": "LoadTPUEmbeddingRMSPropParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the RMSProp optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of ms used in the RMSProp optimization algorithm.", + "name": "ms", + "type": 1 + }, + { + "description": "Value of mom used in the RMSProp optimization algorithm.", + "name": "mom", + "type": 1 + } + ], + "summary": "Load RMSProp embedding parameters." + } + }, + { + "name": "LoadTPUEmbeddingRMSPropParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the RMSProp optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of ms used in the RMSProp optimization algorithm.", + "name": "ms", + "type": 1 + }, + { + "description": "Value of mom used in the RMSProp optimization algorithm.", + "name": "mom", + "type": 1 + }, + { + "description": "Value of gradient_accumulators used in the RMSProp optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Load RMSProp embedding parameters with debug support." + } + }, + { + "name": "LoadTPUEmbeddingStochasticGradientDescentParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the stochastic gradient descent optimization algorithm.", + "name": "parameters", + "type": 1 + } + ], + "summary": "Load SGD embedding parameters." + } + }, + { + "name": "LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that loads optimization parameters into HBM for embedding. Must be\npreceded by a ConfigureTPUEmbeddingHost op that sets up the correct\nembedding table configuration. For example, this op is used to install\nparameters that are loaded from a checkpoint before a training loop is\nexecuted.", + "inputs": [ + { + "description": "Value of parameters used in the stochastic gradient descent optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Value of gradient_accumulators used in the Adadelta optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Load SGD embedding parameters." + } + }, + { + "name": "Log", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "I.e., \\\\(y = \\log_e x\\\\).\n\nExample:\n\n```python\nx = tf.constant([0, 0.5, 1, 5])\ntf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes natural logarithm of x element-wise." + } + }, + { + "name": "Log1p", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "I.e., \\\\(y = \\log_e (1 + x)\\\\).\n\nExample:\n\n```python\nx = tf.constant([0, 0.5, 1, 5])\ntf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes natural logarithm of (1 + x) element-wise." + } + }, + { + "name": "LogMatrixDeterminant", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "one or more square matrices.\n\nThe input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions\nform square matrices. The outputs are two tensors containing the signs and\nabsolute values of the log determinants for all N input submatrices\n`[..., :, :]` such that the determinant = sign*exp(log_abs_determinant).\nThe log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU\nis the LU decomposition of the input and P is the corresponding\npermutation matrix.", + "inputs": [ + { + "description": "Shape is `[N, M, M]`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The signs of the log determinants of the inputs. Shape is `[N]`.", + "name": "sign", + "typeAttr": "T" + }, + { + "description": "The logs of the absolute values of the determinants\nof the N input matrices. Shape is `[N]`.", + "name": "log_abs_determinant", + "typeAttr": "T" + } + ], + "summary": "Computes the sign and the log of the absolute value of the determinant of" + } + }, + { + "name": "LogSoftmax", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "For each batch `i` and class `j` we have\n\n logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))", + "inputs": [ + { + "description": "2-D with shape `[batch_size, num_classes]`.", + "name": "logits", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same shape as `logits`.", + "name": "logsoftmax", + "typeAttr": "T" + } + ], + "summary": "Computes log softmax activations." + } + }, + { + "name": "LogUniformCandidateSampler", + "schema": { + "attributes": [ + { + "description": "Number of true labels per context.", + "minimum": 1, + "name": "num_true", + "type": "int64" + }, + { + "description": "Number of candidates to randomly sample.", + "minimum": 1, + "name": "num_sampled", + "type": "int64" + }, + { + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities.", + "name": "unique", + "type": "boolean" + }, + { + "description": "The sampler will sample integers from the interval [0, range_max).", + "minimum": 1, + "name": "range_max", + "type": "int64" + }, + { + "default": 0, + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "An second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + } + ], + "description": "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "inputs": [ + { + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "name": "true_classes", + "type": 9 + } + ], + "outputs": [ + { + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "name": "sampled_candidates", + "type": 9 + }, + { + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "name": "true_expected_count", + "type": 1 + }, + { + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "name": "sampled_expected_count", + "type": 1 + } + ], + "summary": "Generates labels for candidate sampling with a log-uniform distribution." + } + }, + { + "name": "LogicalAnd", + "schema": { + "description": "*NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "type": 10 + }, + { + "name": "y", + "type": 10 + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ], + "summary": "Returns the truth value of x AND y element-wise." + } + }, + { + "name": "LogicalNot", + "schema": { + "inputs": [ + { + "description": "A `Tensor` of type `bool`.", + "name": "x", + "type": 10 + } + ], + "outputs": [ + { + "description": "A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`.", + "name": "y", + "type": 10 + } + ], + "summary": "Returns the truth value of `NOT x` element-wise." + } + }, + { + "name": "LogicalOr", + "schema": { + "description": "*NOTE*: `LogicalOr` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "type": 10 + }, + { + "name": "y", + "type": 10 + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ], + "summary": "Returns the truth value of x OR y element-wise." + } + }, + { + "name": "LookupTableExport", + "schema": { + "attributes": [ + { + "name": "Tkeys", + "type": "type" + }, + { + "name": "Tvalues", + "type": "type" + } + ], + "inputs": [ + { + "description": "Handle to the table.", + "isRef": true, + "name": "table_handle", + "type": 7 + } + ], + "outputs": [ + { + "description": "Vector of all keys present in the table.", + "name": "keys", + "typeAttr": "Tkeys" + }, + { + "description": "Tensor of all values in the table. Indexed in parallel with `keys`.", + "name": "values", + "typeAttr": "Tvalues" + } + ], + "summary": "Outputs all keys and values in the table." + } + }, + { + "name": "LookupTableExportV2", + "schema": { + "attributes": [ + { + "name": "Tkeys", + "type": "type" + }, + { + "name": "Tvalues", + "type": "type" + } + ], + "inputs": [ + { + "description": "Handle to the table.", + "name": "table_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "Vector of all keys present in the table.", + "name": "keys", + "typeAttr": "Tkeys" + }, + { + "description": "Tensor of all values in the table. Indexed in parallel with `keys`.", + "name": "values", + "typeAttr": "Tvalues" + } + ], + "summary": "Outputs all keys and values in the table." + } + }, + { + "name": "LookupTableFind", + "schema": { + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "description": "The tensor `keys` must of the same type as the keys of the table.\nThe output `values` is of the type of the table values.\n\nThe scalar `default_value` is the value output for keys not present in the\ntable. It must also be of the same type as the table values.", + "inputs": [ + { + "description": "Handle to the table.", + "isRef": true, + "name": "table_handle", + "type": 7 + }, + { + "description": "Any shape. Keys to look up.", + "name": "keys", + "typeAttr": "Tin" + }, + { + "name": "default_value", + "typeAttr": "Tout" + } + ], + "outputs": [ + { + "description": "Same shape as `keys`. Values found in the table, or `default_values`\nfor missing keys.", + "name": "values", + "typeAttr": "Tout" + } + ], + "summary": "Looks up keys in a table, outputs the corresponding values." + } + }, + { + "name": "LookupTableFindV2", + "schema": { + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "description": "The tensor `keys` must of the same type as the keys of the table.\nThe output `values` is of the type of the table values.\n\nThe scalar `default_value` is the value output for keys not present in the\ntable. It must also be of the same type as the table values.", + "inputs": [ + { + "description": "Handle to the table.", + "name": "table_handle", + "type": 20 + }, + { + "description": "Any shape. Keys to look up.", + "name": "keys", + "typeAttr": "Tin" + }, + { + "name": "default_value", + "typeAttr": "Tout" + } + ], + "outputs": [ + { + "description": "Same shape as `keys`. Values found in the table, or `default_values`\nfor missing keys.", + "name": "values", + "typeAttr": "Tout" + } + ], + "summary": "Looks up keys in a table, outputs the corresponding values." + } + }, + { + "name": "LookupTableImport", + "schema": { + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "description": "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values.", + "inputs": [ + { + "description": "Handle to the table.", + "isRef": true, + "name": "table_handle", + "type": 7 + }, + { + "description": "Any shape. Keys to look up.", + "name": "keys", + "typeAttr": "Tin" + }, + { + "description": "Values to associate with keys.", + "name": "values", + "typeAttr": "Tout" + } + ], + "summary": "Replaces the contents of the table with the specified keys and values." + } + }, + { + "name": "LookupTableImportV2", + "schema": { + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "description": "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values.", + "inputs": [ + { + "description": "Handle to the table.", + "name": "table_handle", + "type": 20 + }, + { + "description": "Any shape. Keys to look up.", + "name": "keys", + "typeAttr": "Tin" + }, + { + "description": "Values to associate with keys.", + "name": "values", + "typeAttr": "Tout" + } + ], + "summary": "Replaces the contents of the table with the specified keys and values." + } + }, + { + "name": "LookupTableInsert", + "schema": { + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "description": "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values.", + "inputs": [ + { + "description": "Handle to the table.", + "isRef": true, + "name": "table_handle", + "type": 7 + }, + { + "description": "Any shape. Keys to look up.", + "name": "keys", + "typeAttr": "Tin" + }, + { + "description": "Values to associate with keys.", + "name": "values", + "typeAttr": "Tout" + } + ], + "summary": "Updates the table to associates keys with values." + } + }, + { + "name": "LookupTableInsertV2", + "schema": { + "attributes": [ + { + "name": "Tin", + "type": "type" + }, + { + "name": "Tout", + "type": "type" + } + ], + "description": "The tensor `keys` must be of the same type as the keys of the table.\nThe tensor `values` must be of the type of the table values.", + "inputs": [ + { + "description": "Handle to the table.", + "name": "table_handle", + "type": 20 + }, + { + "description": "Any shape. Keys to look up.", + "name": "keys", + "typeAttr": "Tin" + }, + { + "description": "Values to associate with keys.", + "name": "values", + "typeAttr": "Tout" + } + ], + "summary": "Updates the table to associates keys with values." + } + }, + { + "name": "LookupTableRemoveV2", + "schema": { + "attributes": [ + { + "name": "Tin", + "type": "type" + } + ], + "description": "The tensor `keys` must of the same type as the keys of the table. Keys not\nalready in the table are silently ignored.", + "inputs": [ + { + "description": "Handle to the table.", + "name": "table_handle", + "type": 20 + }, + { + "description": "Any shape. Keys of the elements to remove.", + "name": "keys", + "typeAttr": "Tin" + } + ], + "summary": "Removes keys and its associated values from a table." + } + }, + { + "name": "LookupTableSize", + "schema": { + "inputs": [ + { + "description": "Handle to the table.", + "isRef": true, + "name": "table_handle", + "type": 7 + } + ], + "outputs": [ + { + "description": "Scalar that contains number of elements in the table.", + "name": "size", + "type": 9 + } + ], + "summary": "Computes the number of elements in the given table." + } + }, + { + "name": "LookupTableSizeV2", + "schema": { + "inputs": [ + { + "description": "Handle to the table.", + "name": "table_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "Scalar that contains number of elements in the table.", + "name": "size", + "type": 9 + } + ], + "summary": "Computes the number of elements in the given table." + } + }, + { + "name": "LoopCond", + "schema": { + "description": "This operator represents the loop termination condition used by the\n\"pivot\" switches of a loop.", + "inputs": [ + { + "description": "A boolean scalar, representing the branch predicate of the Switch op.", + "name": "input", + "type": 10 + } + ], + "outputs": [ + { + "description": "The same tensor as `input`.", + "name": "output", + "type": 10 + } + ], + "summary": "Forwards the input to the output." + } + }, + { + "name": "LowerBound", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "out_type", + "type": "type" + } + ], + "description": "Each set of rows with the same index in (sorted_inputs, values) is treated\nindependently. The resulting row is the equivalent of calling\n`np.searchsorted(sorted_inputs, values, side='left')`.\n\nThe result is not a global index to the entire\n`Tensor`, but rather just the index in the last dimension.\n\nA 2-D example:\n sorted_sequence = [[0, 3, 9, 9, 10],\n [1, 2, 3, 4, 5]]\n values = [[2, 4, 9],\n [0, 2, 6]]\n\n result = LowerBound(sorted_sequence, values)\n\n result == [[1, 2, 2],\n [0, 1, 5]]", + "inputs": [ + { + "description": "2-D Tensor where each row is ordered.", + "name": "sorted_inputs", + "typeAttr": "T" + }, + { + "description": "2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains\nthe values that will be searched for in `sorted_search_values`.", + "name": "values", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A `Tensor` with the same shape as `values`. It contains the first scalar index\ninto the last dimension where values can be inserted without changing the\nordered property.", + "name": "output", + "typeAttr": "out_type" + } + ], + "summary": "Applies lower_bound(sorted_search_values, values) along each row." + } + }, + { + "name": "Lu", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "output_idx_type", + "type": "type" + } + ], + "description": "The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices.\n\nThe input has to be invertible.\n\nThe output consists of two tensors LU and P containing the LU decomposition\nof all input submatrices `[..., :, :]`. LU encodes the lower triangular and\nupper triangular factors.\n\nFor each input submatrix of shape `[M, M]`, L is a lower triangular matrix of\nshape `[M, M]` with unit diagonal whose entries correspond to the strictly lower\ntriangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose\nentries correspond to the upper triangular part, including the diagonal, of LU.\n\nP represents a permutation matrix encoded as a list of indices each between `0`\nand `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to\nP, then the L, U and P satisfies P_mat * input = L * U.", + "inputs": [ + { + "description": "A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of\nsize `[M, M]`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the\nlower triangular factor `L` with unit diagonal, and whose upper triangular part\ndenotes the upper triangular factor `U`.", + "name": "lu", + "typeAttr": "T" + }, + { + "description": "Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is\n`[..., M]`.\n@compatibility(scipy)\nSimilar to `scipy.linalg.lu`, except the triangular factors `L` and `U` are\npacked into a single tensor, the permutation is applied to `input` instead of\nthe right hand side and the permutation `P` is returned as a list of indices\ninstead of a permutation matrix.\n@end_compatibility", + "name": "p", + "typeAttr": "output_idx_type" + } + ], + "summary": "Computes the LU decomposition of one or more square matrices." + } + }, + { + "name": "MakeIterator", + "schema": { + "description": "This operation may be executed multiple times. Each execution will reset the\niterator in `iterator` to the first element of `dataset`.", + "inputs": [ + { + "name": "dataset", + "type": 21 + }, + { + "name": "iterator", + "type": 20 + } + ], + "summary": "Makes a new iterator from the given `dataset` and stores it in `iterator`." + } + }, + { + "name": "MapAndBatchDataset", + "schema": { + "attributes": [ + { + "description": "A function to apply to the outputs of `input_dataset`.", + "name": "f", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": false, + "name": "preserve_cardinality", + "type": "boolean" + } + ], + "description": "Creates a dataset that applies `f` to the outputs of `input_dataset` and then\nbatches `batch_size` of them.\n\nUnlike a \"MapDataset\", which applies `f` sequentially, this dataset invokes up\nto `batch_size * num_parallel_batches` copies of `f` in parallel.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A list of tensors, typically values that were captured when building a closure\nfor `f`.", + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "description": "A scalar representing the number of elements to accumulate in a\nbatch. It determines the number of concurrent invocations of `f` that process\nelements from `input_dataset` in parallel.", + "name": "batch_size", + "type": 9 + }, + { + "description": "A scalar representing the maximum number of parallel invocations of the `map_fn`\nfunction. Applying the `map_fn` on consecutive input elements in parallel has\nthe potential to improve input pipeline throughput.", + "name": "num_parallel_calls", + "type": 9 + }, + { + "description": "A scalar representing whether the last batch should be dropped in case its size\nis smaller than desired.", + "name": "drop_remainder", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that fuses mapping with batching." + } + }, + { + "name": "MapClear", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "summary": "Op removes all elements in the underlying container." + } + }, + { + "name": "MapDataset", + "schema": { + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": true, + "name": "use_inter_op_parallelism", + "type": "boolean" + }, + { + "default": false, + "name": "preserve_cardinality", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "MapDefun", + "schema": { + "attributes": [ + { + "description": "A list of types.", + "minimum": 1, + "name": "Targuments", + "type": "type[]" + }, + { + "default": [], + "description": "A list of types.", + "minimum": 0, + "name": "Tcaptured", + "type": "type[]" + }, + { + "description": "A list of types.", + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "description": "A list of shapes.", + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "name": "f", + "type": "function" + }, + { + "default": 1, + "name": "max_intra_op_parallelism", + "type": "int64" + } + ], + "inputs": [ + { + "description": " A list of tensors whose types are `Targuments`, corresponding to the inputs\n the function should be mapped over.", + "name": "arguments", + "typeListAttr": "Targuments" + }, + { + "description": " A list of tensors whose types are `Tcaptured`, corresponding to the captured\n inputs of the defun.", + "name": "captured_inputs", + "typeListAttr": "Tcaptured" + } + ], + "outputs": [ + { + "description": " A list of output tensors whose types are `output_types` and whose dimensions\n 0 are the same as the dimensions 0 of the tensors in `arguments`, and whose\n remaining dimensions correspond to those in `output_shapes`.", + "name": "output", + "typeListAttr": "output_types" + } + ], + "summary": " Maps a function on the list of tensors unpacked from arguments on dimension 0.\n The function given by `f` is assumed to be stateless, and is executed\n concurrently on all the slices; up to batch_size (i.e. the size of the 0th\n dimension of each argument) functions will be scheduled at once.\n\n The `max_intra_op_parallelism` attr, which defaults to 1, can be used to\n limit the intra op parallelism. To limit inter-op parallelism, a user can\n set a private threadpool on the dataset using `tf.data.Options`'s\n `ThreadingOptions`.\n\n Note that this op is not exposed to users directly, but is invoked in tf.data\n rewrites." + } + }, + { + "name": "MapIncompleteSize", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ], + "summary": "Op returns the number of incomplete elements in the underlying container." + } + }, + { + "name": "MapPeek", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "description": "underlying container does not contain this key\nthis op will block until it does.", + "inputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ], + "summary": "Op peeks at the values at the specified key. If the" + } + }, + { + "name": "MapSize", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ], + "summary": "Op returns the number of elements in the underlying container." + } + }, + { + "name": "MapStage", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached.", + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "minimum": 1, + "name": "fake_dtypes", + "type": "type[]" + }, + { + "default": "", + "description": "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "It is necessary to match this name to the matching Unstage Op.", + "name": "shared_name", + "type": "string" + } + ], + "inputs": [ + { + "description": "int64", + "name": "key", + "type": 9 + }, + { + "name": "indices", + "type": 3 + }, + { + "description": "a list of tensors\ndtypes A list of data types that inserted values should adhere to.", + "name": "values", + "typeListAttr": "fake_dtypes" + } + ], + "summary": "Stage (key, values) in the underlying container which behaves like a hashtable." + } + }, + { + "name": "MapUnstage", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "description": "from the underlying container. If the underlying container\ndoes not contain this key, the op will block until it does.", + "inputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ], + "summary": "Op removes and returns the values associated with the key" + } + }, + { + "name": "MapUnstageNoKey", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "description": "from the underlying container. If the underlying container\ndoes not contain elements, the op will block until it does.", + "inputs": [ + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "values", + "typeListAttr": "dtypes" + } + ], + "summary": "Op removes and returns a random (key, value)" + } + }, + { + "name": "MatMul", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, \"a\" is transposed before multiplication.", + "name": "transpose_a", + "type": "boolean" + }, + { + "default": false, + "description": "If true, \"b\" is transposed before multiplication.", + "name": "transpose_b", + "type": "boolean" + }, + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "The inputs must be two-dimensional matrices and the inner dimension of\n\"a\" (after being transposed if transpose_a is true) must match the\nouter dimension of \"b\" (after being transposed if transposed_b is\ntrue).\n\n*Note*: The default kernel implementation for MatMul on GPUs uses\ncublas.", + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "b", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "product", + "typeAttr": "T" + } + ], + "summary": "Multiply the matrix \"a\" by the matrix \"b\"." + } + }, + { + "name": "MatchingFiles", + "schema": { + "description": "Note that this routine only supports wildcard characters in the\nbasename portion of the pattern, not in the directory portion.\nNote also that the order of filenames returned is deterministic.", + "inputs": [ + { + "description": "Shell wildcard pattern(s). Scalar or vector of type string.", + "name": "pattern", + "type": 7 + } + ], + "outputs": [ + { + "description": "A vector of matching filenames.", + "name": "filenames", + "type": 7 + } + ], + "summary": "Returns the set of files matching one or more glob patterns." + } + }, + { + "name": "MatchingFilesDataset", + "schema": { + "inputs": [ + { + "name": "patterns", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "MatrixBandPart", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindex", + "type": "type" + } + ], + "description": "The `band` part is computed as follows:\nAssume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a\ntensor with the same shape where\n\n`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.\n\nThe indicator function\n\n`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&\n (num_upper < 0 || (n-m) <= num_upper)`.\n\nFor example:\n\n```\n# if 'input' is [[ 0, 1, 2, 3]\n [-1, 0, 1, 2]\n [-2, -1, 0, 1]\n [-3, -2, -1, 0]],\n\ntf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]\n [-1, 0, 1, 2]\n [ 0, -1, 0, 1]\n [ 0, 0, -1, 0]],\n\ntf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]\n [-1, 0, 1, 0]\n [-2, -1, 0, 1]\n [ 0, -2, -1, 0]]\n```\n\nUseful special cases:\n\n```\n tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.\n tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.\n tf.matrix_band_part(input, 0, 0) ==> Diagonal.\n```", + "inputs": [ + { + "description": "Rank `k` tensor.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "0-D tensor. Number of subdiagonals to keep. If negative, keep entire\nlower triangle.", + "name": "num_lower", + "typeAttr": "Tindex" + }, + { + "description": "0-D tensor. Number of superdiagonals to keep. If negative, keep\nentire upper triangle.", + "name": "num_upper", + "typeAttr": "Tindex" + } + ], + "outputs": [ + { + "description": "Rank `k` tensor of the same shape as input. The extracted banded tensor.", + "name": "band", + "typeAttr": "T" + } + ], + "summary": "Copy a tensor setting everything outside a central band in each innermost matrix to zero." + } + }, + { + "name": "MatrixDeterminant", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. The output is a tensor containing the determinants\nfor all input submatrices `[..., :, :]`.", + "inputs": [ + { + "description": "Shape is `[..., M, M]`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Shape is `[...]`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the determinant of one or more square matrices." + } + }, + { + "name": "MatrixDiag", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Given a `diagonal`, this operation returns a tensor with the `diagonal` and\neverything else padded with zeros. The diagonal is computed as follows:\n\nAssume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a\ntensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:\n\n`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.\n\nFor example:\n\n```\n# 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]\n\nand diagonal.shape = (2, 4)\n\ntf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]\n [0, 2, 0, 0]\n [0, 0, 3, 0]\n [0, 0, 0, 4]],\n [[5, 0, 0, 0]\n [0, 6, 0, 0]\n [0, 0, 7, 0]\n [0, 0, 0, 8]]]\n\nwhich has shape (2, 4, 4)\n```", + "inputs": [ + { + "description": "Rank `k`, where `k >= 1`.", + "name": "diagonal", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns a batched diagonal tensor with a given batched diagonal values." + } + }, + { + "name": "MatrixDiagPart", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "This operation returns a tensor with the `diagonal` part\nof the batched `input`. The `diagonal` part is computed as follows:\n\nAssume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a\ntensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:\n\n`diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.\n\nThe input must be at least a matrix.\n\nFor example:\n\n```\n# 'input' is [[[1, 0, 0, 0]\n [0, 2, 0, 0]\n [0, 0, 3, 0]\n [0, 0, 0, 4]],\n [[5, 0, 0, 0]\n [0, 6, 0, 0]\n [0, 0, 7, 0]\n [0, 0, 0, 8]]]\n\nand input.shape = (2, 4, 4)\n\ntf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]\n\nwhich has shape (2, 4)\n```", + "inputs": [ + { + "description": "Rank `k` tensor where `k >= 2`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The extracted diagonal(s) having shape\n`diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.", + "name": "diagonal", + "typeAttr": "T" + } + ], + "summary": "Returns the batched diagonal part of a batched tensor." + } + }, + { + "name": "MatrixDiagPartV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched\n`input`.\n\nAssume `input` has `r` dimensions `[I, J, ..., L, M, N]`.\nLet `max_diag_len` be the maximum length among all diagonals to be extracted,\n`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\nLet `num_diags` be the number of diagonals to extract,\n`num_diags = k[1] - k[0] + 1`.\n\nIf `num_diags == 1`, the output tensor is of rank `r - 1` with shape\n`[I, J, ..., L, max_diag_len]` and values:\n\n```\ndiagonal[i, j, ..., l, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n```\nwhere `y = max(-k[1], 0)`, `x = max(k[1], 0)`.\n\nOtherwise, the output tensor has rank `r` with dimensions\n`[I, J, ..., L, num_diags, max_diag_len]` with values:\n\n```\ndiagonal[i, j, ..., l, m, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n```\nwhere `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`.\n\nThe input must be at least a matrix.\n\nFor example:\n\n```\ninput = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)\n [5, 6, 7, 8],\n [9, 8, 7, 6]],\n [[5, 4, 3, 2],\n [1, 2, 3, 4],\n [5, 6, 7, 8]]])\n\n# A main diagonal from each batch.\ntf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)\n [5, 2, 7]]\n\n# A superdiagonal from each batch.\ntf.matrix_diag_part(input, k = 1)\n ==> [[2, 7, 6], # Output shape: (2, 3)\n [4, 3, 8]]\n\n# A tridiagonal band from each batch.\ntf.matrix_diag_part(input, k = (-1, 1))\n ==> [[[2, 7, 6], # Output shape: (2, 3, 3)\n [1, 6, 7],\n [5, 8, 0]],\n [[4, 3, 8],\n [5, 2, 7],\n [1, 6, 0]]]\n\n# Padding value = 9\ntf.matrix_diag_part(input, k = (1, 3), padding_value = 9)\n ==> [[[4, 9, 9], # Output shape: (2, 3, 3)\n [3, 8, 9],\n [2, 7, 6]],\n [[2, 9, 9],\n [3, 4, 9],\n [4, 3, 8]]]\n```", + "inputs": [ + { + "description": "Rank `r` tensor where `r >= 2`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "name": "k", + "type": 3 + }, + { + "description": "The value to fill the area outside the specified diagonal band with.\nDefault is 0.", + "name": "padding_value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The extracted diagonal(s).", + "name": "diagonal", + "typeAttr": "T" + } + ], + "summary": "Returns the batched diagonal part of a batched tensor." + } + }, + { + "name": "MatrixDiagPartV3", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "RIGHT_LEFT", + "description": "Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is\na string specifying how superdiagonals and subdiagonals should be aligned,\nrespectively. There are four possible alignments: \"RIGHT_LEFT\" (default),\n\"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\" aligns superdiagonals\nto the right (left-pads the row) and subdiagonals to the left (right-pads the\nrow). It is the packing format LAPACK uses. cuSPARSE uses \"LEFT_RIGHT\", which is\nthe opposite alignment. Must be one of the following: `LEFT_RIGHT`, `RIGHT_LEFT`, `LEFT_LEFT`, `RIGHT_RIGHT`.", + "name": "align", + "type": "string" + } + ], + "description": "Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched\n`input`.\n\nAssume `input` has `r` dimensions `[I, J, ..., L, M, N]`.\nLet `max_diag_len` be the maximum length among all diagonals to be extracted,\n`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\nLet `num_diags` be the number of diagonals to extract,\n`num_diags = k[1] - k[0] + 1`.\n\nIf `num_diags == 1`, the output tensor is of rank `r - 1` with shape\n`[I, J, ..., L, max_diag_len]` and values:\n\n```\ndiagonal[i, j, ..., l, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n```\nwhere `y = max(-k[1], 0)`, `x = max(k[1], 0)`.\n\nOtherwise, the output tensor has rank `r` with dimensions\n`[I, J, ..., L, num_diags, max_diag_len]` with values:\n\n```\ndiagonal[i, j, ..., l, m, n]\n = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,\n padding_value ; otherwise.\n```\nwhere `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.\n\n`offset` is zero except when the alignment of the diagonal is to the right.\n```\noffset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n```\nwhere `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\nThe input must be at least a matrix.\n\nFor example:\n\n```\ninput = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)\n [5, 6, 7, 8],\n [9, 8, 7, 6]],\n [[5, 4, 3, 2],\n [1, 2, 3, 4],\n [5, 6, 7, 8]]])\n\n# A main diagonal from each batch.\ntf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)\n [5, 2, 7]]\n\n# A superdiagonal from each batch.\ntf.matrix_diag_part(input, k = 1)\n ==> [[2, 7, 6], # Output shape: (2, 3)\n [4, 3, 8]]\n\n# A band from each batch.\ntf.matrix_diag_part(input, k = (-1, 2))\n ==> [[[0, 3, 8], # Output shape: (2, 4, 3)\n [2, 7, 6],\n [1, 6, 7],\n [5, 8, 0]],\n [[0, 3, 4],\n [4, 3, 8],\n [5, 2, 7],\n [1, 6, 0]]]\n\n# LEFT_RIGHT alignment.\ntf.matrix_diag_part(input, k = (-1, 2), align=\"LEFT_RIGHT\")\n ==> [[[3, 8, 0], # Output shape: (2, 4, 3)\n [2, 7, 6],\n [1, 6, 7],\n [0, 5, 8]],\n [[3, 4, 0],\n [4, 3, 8],\n [5, 2, 7],\n [0, 1, 6]]]\n\n# max_diag_len can be shorter than the main diagonal.\ntf.matrix_diag_part(input, k = (-2, -1))\n ==> [[[5, 8],\n [9, 0]],\n [[1, 6],\n [5, 0]]]\n\n# padding_value = 9\ntf.matrix_diag_part(input, k = (1, 3), padding_value = 9)\n ==> [[[9, 9, 4], # Output shape: (2, 3, 3)\n [9, 3, 8],\n [2, 7, 6]],\n [[9, 9, 2],\n [9, 3, 4],\n [4, 3, 8]]]\n\n```", + "inputs": [ + { + "description": "Rank `r` tensor where `r >= 2`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "name": "k", + "type": 3 + }, + { + "description": "The value to fill the area outside the specified diagonal band with.\nDefault is 0.", + "name": "padding_value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The extracted diagonal(s).", + "name": "diagonal", + "typeAttr": "T" + } + ], + "summary": "Returns the batched diagonal part of a batched tensor." + } + }, + { + "name": "MatrixDiagV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th\ndiagonals of a matrix, with everything else padded with `padding`. `num_rows`\nand `num_cols` specify the dimension of the innermost matrix of the output. If\nboth are not specified, the op assumes the innermost matrix is square and infers\nits size from `k` and the innermost dimension of `diagonal`. If only one of them\nis specified, the op assumes the unspecified value is the smallest possible\nbased on other criteria.\n\nLet `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has\nrank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one\ndiagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank\n`r` with shape `[I, J, ..., L, num_rows, num_cols]`.\n\nThe second innermost dimension of `diagonal` has double meaning.\nWhen `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size\n[I, J, ..., M], and the output tensor is:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper\n padding_value ; otherwise\n```\n\nOtherwise, `M` is treated as the number of diagonals for the matrix in the\nsame batch (`M = k[1]-k[0]+1`), and the output tensor is:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n padding_value ; otherwise\n```\nwhere `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.\n\nFor example:\n\n```\n# The main diagonal.\ndiagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)\n [5, 6, 7, 8]])\ntf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)\n [0, 2, 0, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 4]],\n [[5, 0, 0, 0],\n [0, 6, 0, 0],\n [0, 0, 7, 0],\n [0, 0, 0, 8]]]\n\n# A superdiagonal (per batch).\ndiagonal = np.array([[1, 2, 3], # Input shape: (2, 3)\n [4, 5, 6]])\ntf.matrix_diag(diagonal, k = 1)\n ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)\n [0, 0, 2, 0],\n [0, 0, 0, 3],\n [0, 0, 0, 0]],\n [[0, 4, 0, 0],\n [0, 0, 5, 0],\n [0, 0, 0, 6],\n [0, 0, 0, 0]]]\n\n# A band of diagonals.\ndiagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3)\n [4, 5, 0]],\n [[6, 7, 9],\n [9, 1, 0]]])\ntf.matrix_diag(diagonals, k = (-1, 0))\n ==> [[[1, 0, 0], # Output shape: (2, 3, 3)\n [4, 2, 0],\n [0, 5, 3]],\n [[6, 0, 0],\n [9, 7, 0],\n [0, 1, 9]]]\n\n# Rectangular matrix.\ndiagonal = np.array([1, 2]) # Input shape: (2)\ntf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)\n ==> [[0, 0, 0, 0], # Output shape: (3, 4)\n [1, 0, 0, 0],\n [0, 2, 0, 0]]\n\n# Rectangular matrix with inferred num_cols and padding_value = 9.\ntf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)\n ==> [[9, 9], # Output shape: (3, 2)\n [1, 9],\n [9, 2]]\n```", + "inputs": [ + { + "description": "Rank `r`, where `r >= 1`", + "name": "diagonal", + "typeAttr": "T" + }, + { + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "name": "k", + "type": 3 + }, + { + "description": "The number of rows of the output matrix. If it is not provided, the op assumes\nthe output matrix is a square matrix and infers the matrix size from k and the\ninnermost dimension of `diagonal`.", + "name": "num_rows", + "type": 3 + }, + { + "description": "The number of columns of the output matrix. If it is not provided, the op\nassumes the output matrix is a square matrix and infers the matrix size from\nk and the innermost dimension of `diagonal`.", + "name": "num_cols", + "type": 3 + }, + { + "description": "The number to fill the area outside the specified diagonal band with.\nDefault is 0.", + "name": "padding_value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns a batched diagonal tensor with given batched diagonal values." + } + }, + { + "name": "MatrixDiagV3", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "RIGHT_LEFT", + "description": "Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is\na string specifying how superdiagonals and subdiagonals should be aligned,\nrespectively. There are four possible alignments: \"RIGHT_LEFT\" (default),\n\"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\" aligns superdiagonals\nto the right (left-pads the row) and subdiagonals to the left (right-pads the\nrow). It is the packing format LAPACK uses. cuSPARSE uses \"LEFT_RIGHT\", which is\nthe opposite alignment. Must be one of the following: `LEFT_RIGHT`, `RIGHT_LEFT`, `LEFT_LEFT`, `RIGHT_RIGHT`.", + "name": "align", + "type": "string" + } + ], + "description": "Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th\ndiagonals of a matrix, with everything else padded with `padding`. `num_rows`\nand `num_cols` specify the dimension of the innermost matrix of the output. If\nboth are not specified, the op assumes the innermost matrix is square and infers\nits size from `k` and the innermost dimension of `diagonal`. If only one of them\nis specified, the op assumes the unspecified value is the smallest possible\nbased on other criteria.\n\nLet `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has\nrank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one\ndiagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank\n`r` with shape `[I, J, ..., L, num_rows, num_cols]`.\n\nThe second innermost dimension of `diagonal` has double meaning.\nWhen `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size\n[I, J, ..., M], and the output tensor is:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper\n padding_value ; otherwise\n```\n\nOtherwise, `M` is treated as the number of diagonals for the matrix in the\nsame batch (`M = k[1]-k[0]+1`), and the output tensor is:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n padding_value ; otherwise\n```\nwhere `d = n - m`, `diag_index = [k] - d`, and\n`index_in_diag = n - max(d, 0) + offset`.\n\n`offset` is zero except when the alignment of the diagonal is to the right.\n```\noffset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n```\nwhere `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\nFor example:\n\n```\n# The main diagonal.\ndiagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)\n [5, 6, 7, 8]])\ntf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)\n [0, 2, 0, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 4]],\n [[5, 0, 0, 0],\n [0, 6, 0, 0],\n [0, 0, 7, 0],\n [0, 0, 0, 8]]]\n\n# A superdiagonal (per batch).\ndiagonal = np.array([[1, 2, 3], # Input shape: (2, 3)\n [4, 5, 6]])\ntf.matrix_diag(diagonal, k = 1)\n ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)\n [0, 0, 2, 0],\n [0, 0, 0, 3],\n [0, 0, 0, 0]],\n [[0, 4, 0, 0],\n [0, 0, 5, 0],\n [0, 0, 0, 6],\n [0, 0, 0, 0]]]\n\n# A tridiagonal band (per batch).\ndiagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3)\n [1, 2, 3],\n [4, 5, 0]],\n [[0, 2, 3],\n [6, 7, 9],\n [9, 1, 0]]])\ntf.matrix_diag(diagonals, k = (-1, 1))\n ==> [[[1, 8, 0], # Output shape: (2, 3, 3)\n [4, 2, 9],\n [0, 5, 3]],\n [[6, 2, 0],\n [9, 7, 3],\n [0, 1, 9]]]\n\n# LEFT_RIGHT alignment.\ndiagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3)\n [1, 2, 3],\n [0, 4, 5]],\n [[2, 3, 0],\n [6, 7, 9],\n [0, 9, 1]]])\ntf.matrix_diag(diagonals, k = (-1, 1), align=\"LEFT_RIGHT\")\n ==> [[[1, 8, 0], # Output shape: (2, 3, 3)\n [4, 2, 9],\n [0, 5, 3]],\n [[6, 2, 0],\n [9, 7, 3],\n [0, 1, 9]]]\n\n# Rectangular matrix.\ndiagonal = np.array([1, 2]) # Input shape: (2)\ntf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)\n ==> [[0, 0, 0, 0], # Output shape: (3, 4)\n [1, 0, 0, 0],\n [0, 2, 0, 0]]\n\n# Rectangular matrix with inferred num_cols and padding_value = 9.\ntf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)\n ==> [[9, 9], # Output shape: (3, 2)\n [1, 9],\n [9, 2]]\n\n```", + "inputs": [ + { + "description": "Rank `r`, where `r >= 1`", + "name": "diagonal", + "typeAttr": "T" + }, + { + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "name": "k", + "type": 3 + }, + { + "description": "The number of rows of the output matrix. If it is not provided, the op assumes\nthe output matrix is a square matrix and infers the matrix size from k and the\ninnermost dimension of `diagonal`.", + "name": "num_rows", + "type": 3 + }, + { + "description": "The number of columns of the output matrix. If it is not provided, the op\nassumes the output matrix is a square matrix and infers the matrix size from\nk and the innermost dimension of `diagonal`.", + "name": "num_cols", + "type": 3 + }, + { + "description": "The number to fill the area outside the specified diagonal band with.\nDefault is 0.", + "name": "padding_value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns a batched diagonal tensor with given batched diagonal values." + } + }, + { + "name": "MatrixExponential", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Deprecated, use python implementation tf.linalg.matrix_exponential." + } + }, + { + "name": "MatrixInverse", + "schema": { + "attributes": [ + { + "default": false, + "name": "adjoint", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "adjoints (conjugate transposes).\n\nThe input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. The output is a tensor of the same shape as the input\ncontaining the inverse for all input submatrices `[..., :, :]`.\n\nThe op uses LU decomposition with partial pivoting to compute the inverses.\n\nIf a matrix is not invertible there is no guarantee what the op does. It\nmay detect the condition and raise an exception or it may simply return a\ngarbage result.", + "inputs": [ + { + "description": "Shape is `[..., M, M]`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Shape is `[..., M, M]`.\n\n@compatibility(numpy)\nEquivalent to np.linalg.inv\n@end_compatibility", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the inverse of one or more square invertible matrices or their" + } + }, + { + "name": "MatrixLogarithm", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "\n\\\\(log(exp(A)) = A\\\\)\n\nThis op is only defined for complex matrices. If A is positive-definite and\nreal, then casting to a complex matrix, taking the logarithm and casting back\nto a real matrix will give the correct result.\n\nThis function computes the matrix logarithm using the Schur-Parlett algorithm.\nDetails of the algorithm can be found in Section 11.6.2 of:\nNicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008.\nISBN 978-0-898716-46-7.\n\nThe input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. The output is a tensor of the same shape as the input\ncontaining the exponential for all input submatrices `[..., :, :]`.", + "inputs": [ + { + "description": "Shape is `[..., M, M]`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Shape is `[..., M, M]`.\n\n@compatibility(scipy)\nEquivalent to scipy.linalg.logm\n@end_compatibility", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the matrix logarithm of one or more square matrices:" + } + }, + { + "name": "MatrixSetDiag", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Given `input` and `diagonal`, this operation returns a tensor with the\nsame shape and values as `input`, except for the main diagonal of the\ninnermost matrices. These will be overwritten by the values in `diagonal`.\n\nThe output is computed as follows:\n\nAssume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has\n`k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a\ntensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:\n\n * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.\n * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.", + "inputs": [ + { + "description": "Rank `k+1`, where `k >= 1`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "Rank `k`, where `k >= 1`.", + "name": "diagonal", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Rank `k+1`, with `output.shape = input.shape`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns a batched matrix tensor with new batched diagonal values." + } + }, + { + "name": "MatrixSetDiagV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Given `input` and `diagonal`, this operation returns a tensor with the\nsame shape and values as `input`, except for the specified diagonals of the\ninnermost matrices. These will be overwritten by the values in `diagonal`.\n\n`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or\n`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.\nOtherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.\n`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.\n`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,\n`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\n\nThe output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.\nIf `k` is scalar or `k[0] == k[1]`:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]\n input[i, j, ..., l, m, n] ; otherwise\n```\n\nOtherwise,\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n input[i, j, ..., l, m, n] ; otherwise\n```\nwhere `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.\n\nFor example:\n\n```\n# The main diagonal.\ninput = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)\n [7, 7, 7, 7],\n [7, 7, 7, 7]],\n [[7, 7, 7, 7],\n [7, 7, 7, 7],\n [7, 7, 7, 7]]])\ndiagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)\n [4, 5, 6]])\ntf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)\n [7, 2, 7, 7],\n [7, 7, 3, 7]],\n [[4, 7, 7, 7],\n [7, 5, 7, 7],\n [7, 7, 6, 7]]]\n\n# A superdiagonal (per batch).\ntf.matrix_set_diag(diagonal, k = 1)\n ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)\n [7, 7, 2, 7],\n [7, 7, 7, 3]],\n [[7, 4, 7, 7],\n [7, 7, 5, 7],\n [7, 7, 7, 6]]]\n\n# A band of diagonals.\ndiagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3)\n [4, 5, 0]],\n [[6, 1, 2],\n [3, 4, 0]]])\ntf.matrix_set_diag(diagonals, k = (-1, 0))\n ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)\n [4, 2, 7, 7],\n [0, 5, 3, 7]],\n [[6, 7, 7, 7],\n [3, 1, 7, 7],\n [7, 4, 2, 7]]]\n\n```", + "inputs": [ + { + "description": "Rank `r+1`, where `r >= 1`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.\n`k >= 1`.", + "name": "diagonal", + "typeAttr": "T" + }, + { + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "name": "k", + "type": 3 + } + ], + "outputs": [ + { + "description": "Rank `r+1`, with `output.shape = input.shape`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns a batched matrix tensor with new batched diagonal values." + } + }, + { + "name": "MatrixSetDiagV3", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "RIGHT_LEFT", + "description": "Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is\na string specifying how superdiagonals and subdiagonals should be aligned,\nrespectively. There are four possible alignments: \"RIGHT_LEFT\" (default),\n\"LEFT_RIGHT\", \"LEFT_LEFT\", and \"RIGHT_RIGHT\". \"RIGHT_LEFT\" aligns superdiagonals\nto the right (left-pads the row) and subdiagonals to the left (right-pads the\nrow). It is the packing format LAPACK uses. cuSPARSE uses \"LEFT_RIGHT\", which is\nthe opposite alignment. Must be one of the following: `LEFT_RIGHT`, `RIGHT_LEFT`, `LEFT_LEFT`, `RIGHT_RIGHT`.", + "name": "align", + "type": "string" + } + ], + "description": "Given `input` and `diagonal`, this operation returns a tensor with the\nsame shape and values as `input`, except for the specified diagonals of the\ninnermost matrices. These will be overwritten by the values in `diagonal`.\n\n`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or\n`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.\nOtherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.\n`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.\n`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,\n`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`\n\nThe output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.\nIf `k` is scalar or `k[0] == k[1]`:\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]\n input[i, j, ..., l, m, n] ; otherwise\n```\n\nOtherwise,\n\n```\noutput[i, j, ..., l, m, n]\n = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]\n input[i, j, ..., l, m, n] ; otherwise\n```\nwhere `d = n - m`, `diag_index = k[1] - d`, and\n`index_in_diag = n - max(d, 0) + offset`.\n\n`offset` is zero except when the alignment of the diagonal is to the right.\n```\noffset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}\n and `d >= 0`) or\n (`align` in {LEFT_RIGHT, RIGHT_RIGHT}\n and `d <= 0`)\n 0 ; otherwise\n```\nwhere `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.\n\nFor example:\n\n```\n# The main diagonal.\ninput = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)\n [7, 7, 7, 7],\n [7, 7, 7, 7]],\n [[7, 7, 7, 7],\n [7, 7, 7, 7],\n [7, 7, 7, 7]]])\ndiagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)\n [4, 5, 6]])\ntf.matrix_set_diag(input, diagonal)\n ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)\n [7, 2, 7, 7],\n [7, 7, 3, 7]],\n [[4, 7, 7, 7],\n [7, 5, 7, 7],\n [7, 7, 6, 7]]]\n\n# A superdiagonal (per batch).\ntf.matrix_set_diag(input, diagonal, k = 1)\n ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)\n [7, 7, 2, 7],\n [7, 7, 7, 3]],\n [[7, 4, 7, 7],\n [7, 7, 5, 7],\n [7, 7, 7, 6]]]\n\n# A band of diagonals.\ndiagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3)\n [6, 5, 8],\n [1, 2, 3],\n [4, 5, 0]],\n [[0, 1, 2],\n [5, 6, 4],\n [6, 1, 2],\n [3, 4, 0]]])\ntf.matrix_set_diag(input, diagonals, k = (-1, 2))\n ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)\n [4, 2, 5, 1],\n [7, 5, 3, 8]],\n [[6, 5, 1, 7],\n [3, 1, 6, 2],\n [7, 4, 2, 4]]]\n\n# LEFT_RIGHT alignment.\ndiagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3)\n [6, 5, 8],\n [1, 2, 3],\n [0, 4, 5]],\n [[1, 2, 0],\n [5, 6, 4],\n [6, 1, 2],\n [0, 3, 4]]])\ntf.matrix_set_diag(input, diagonals, k = (-1, 2), align=\"LEFT_RIGHT\")\n ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)\n [4, 2, 5, 1],\n [7, 5, 3, 8]],\n [[6, 5, 1, 7],\n [3, 1, 6, 2],\n [7, 4, 2, 4]]]\n\n```", + "inputs": [ + { + "description": "Rank `r+1`, where `r >= 1`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.\n`k >= 1`.", + "name": "diagonal", + "typeAttr": "T" + }, + { + "description": "Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main\ndiagonal, and negative value means subdiagonals. `k` can be a single integer\n(for a single diagonal) or a pair of integers specifying the low and high ends\nof a matrix band. `k[0]` must not be larger than `k[1]`.", + "name": "k", + "type": 3 + } + ], + "outputs": [ + { + "description": "Rank `r+1`, with `output.shape = input.shape`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns a batched matrix tensor with new batched diagonal values." + } + }, + { + "name": "MatrixSolve", + "schema": { + "attributes": [ + { + "default": false, + "description": "Boolean indicating whether to solve with `matrix` or its (block-wise)\nadjoint.", + "name": "adjoint", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "`Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is\na tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix\nsatisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.\nIf `adjoint` is `True` then each output matrix satisfies\n`adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.", + "inputs": [ + { + "description": "Shape is `[..., M, M]`.", + "name": "matrix", + "typeAttr": "T" + }, + { + "description": "Shape is `[..., M, K]`.", + "name": "rhs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Shape is `[..., M, K]`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Solves systems of linear equations." + } + }, + { + "name": "MatrixSolveLs", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + }, + { + "default": true, + "name": "fast", + "type": "boolean" + } + ], + "description": "`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same\ntype as `matrix` and shape `[..., M, K]`.\nThe output is a tensor shape `[..., N, K]` where each output matrix solves\neach of the equations\n`matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`\nin the least squares sense.\n\nWe use the following notation for (complex) matrix and right-hand sides\nin the batch:\n\n`matrix`=\\\\(A \\in \\mathbb{C}^{m \\times n}\\\\),\n`rhs`=\\\\(B \\in \\mathbb{C}^{m \\times k}\\\\),\n`output`=\\\\(X \\in \\mathbb{C}^{n \\times k}\\\\),\n`l2_regularizer`=\\\\(\\lambda \\in \\mathbb{R}\\\\).\n\nIf `fast` is `True`, then the solution is computed by solving the normal\nequations using Cholesky decomposition. Specifically, if \\\\(m \\ge n\\\\) then\n\\\\(X = (A^H A + \\lambda I)^{-1} A^H B\\\\), which solves the least-squares\nproblem \\\\(X = \\mathrm{argmin}_{Z \\in \\Re^{n \\times k} } ||A Z - B||_F^2 + \\lambda ||Z||_F^2\\\\).\nIf \\\\(m \\lt n\\\\) then `output` is computed as\n\\\\(X = A^H (A A^H + \\lambda I)^{-1} B\\\\), which (for \\\\(\\lambda = 0\\\\)) is the\nminimum-norm solution to the under-determined linear system, i.e.\n\\\\(X = \\mathrm{argmin}_{Z \\in \\mathbb{C}^{n \\times k} } ||Z||_F^2 \\\\),\nsubject to \\\\(A Z = B\\\\). Notice that the fast path is only numerically stable\nwhen \\\\(A\\\\) is numerically full rank and has a condition number\n\\\\(\\mathrm{cond}(A) \\lt \\frac{1}{\\sqrt{\\epsilon_{mach} } }\\\\) or \\\\(\\lambda\\\\) is\nsufficiently large.\n\nIf `fast` is `False` an algorithm based on the numerically robust complete\northogonal decomposition is used. This computes the minimum-norm\nleast-squares solution, even when \\\\(A\\\\) is rank deficient. This path is\ntypically 6-7 times slower than the fast path. If `fast` is `False` then\n`l2_regularizer` is ignored.", + "inputs": [ + { + "description": "Shape is `[..., M, N]`.", + "name": "matrix", + "typeAttr": "T" + }, + { + "description": "Shape is `[..., M, K]`.", + "name": "rhs", + "typeAttr": "T" + }, + { + "description": "Scalar tensor.\n\n@compatibility(numpy)\nEquivalent to np.linalg.lstsq\n@end_compatibility", + "name": "l2_regularizer", + "type": 2 + } + ], + "outputs": [ + { + "description": "Shape is `[..., N, K]`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Solves one or more linear least-squares problems." + } + }, + { + "name": "MatrixSquareRoot", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "matmul(sqrtm(A), sqrtm(A)) = A\n\nThe input matrix should be invertible. If the input matrix is real, it should\nhave no eigenvalues which are real and negative (pairs of complex conjugate\neigenvalues are allowed).\n\nThe matrix square root is computed by first reducing the matrix to\nquasi-triangular form with the real Schur decomposition. The square root\nof the quasi-triangular matrix is then computed directly. Details of\nthe algorithm can be found in: Nicholas J. Higham, \"Computing real\nsquare roots of a real matrix\", Linear Algebra Appl., 1987.\n\nThe input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices. The output is a tensor of the same shape as the input\ncontaining the matrix square root for all input submatrices `[..., :, :]`.", + "inputs": [ + { + "description": "Shape is `[..., M, M]`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Shape is `[..., M, M]`.\n\n@compatibility(scipy)\nEquivalent to scipy.linalg.sqrtm\n@end_compatibility", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the matrix square root of one or more square matrices:" + } + }, + { + "name": "MatrixTriangularSolve", + "schema": { + "attributes": [ + { + "default": true, + "description": "Boolean indicating whether the innermost matrices in `matrix` are\nlower or upper triangular.", + "name": "lower", + "type": "boolean" + }, + { + "default": false, + "description": "Boolean indicating whether to solve with `matrix` or its (block-wise)\n adjoint.\n\n@compatibility(numpy)\nEquivalent to scipy.linalg.solve_triangular\n@end_compatibility", + "name": "adjoint", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "\n`matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form\nsquare matrices. If `lower` is `True` then the strictly upper triangular part\nof each inner-most matrix is assumed to be zero and not accessed.\nIf `lower` is False then the strictly lower triangular part of each inner-most\nmatrix is assumed to be zero and not accessed.\n`rhs` is a tensor of shape `[..., M, N]`.\n\nThe output is a tensor of shape `[..., M, N]`. If `adjoint` is\n`True` then the innermost matrices in `output` satisfy matrix equations\n`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.\nIf `adjoint` is `False` then the strictly then the innermost matrices in\n`output` satisfy matrix equations\n`adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.\n\nNote, the batch shapes for the inputs only need to broadcast.\n\nExample:\n```python\n\na = tf.constant([[3, 0, 0, 0],\n [2, 1, 0, 0],\n [1, 0, 1, 0],\n [1, 1, 1, 1]], dtype=tf.float32)\n\nb = tf.constant([[4],\n [2],\n [4],\n [2]], dtype=tf.float32)\n\nx = tf.linalg.triangular_solve(a, b, lower=True)\nx\n# \n\n# in python3 one can use `a@x`\ntf.matmul(a, x)\n# \n```", + "inputs": [ + { + "description": "Shape is `[..., M, M]`.", + "name": "matrix", + "typeAttr": "T" + }, + { + "description": "Shape is `[..., M, K]`.", + "name": "rhs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Shape is `[..., M, K]`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Solves systems of linear equations with upper or lower triangular matrices by backsubstitution." + } + }, + { + "name": "Max", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "inputs": [ + { + "description": "The tensor to reduce.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "name": "reduction_indices", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "The reduced tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the maximum of elements across dimensions of a tensor." + } + }, + { + "name": "MaxIntraOpParallelismDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "Identifies the maximum intra-op parallelism to use.", + "name": "max_intra_op_parallelism", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that overrides the maximum intra-op parallelism." + } + }, + { + "name": "MaxPool", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`.", + "name": "T", + "type": "type" + }, + { + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`, `NCHW_VECT_C`.", + "name": "data_format", + "type": "string" + } + ], + "category": "Pool", + "inputs": [ + { + "description": "4-D input to pool over.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The max pooled output tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Performs max pooling on the input." + } + }, + { + "name": "MaxPool3D", + "schema": { + "attributes": [ + { + "description": "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`.", + "minimum": 5, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NDHWC", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "name": "data_format", + "type": "string" + }, + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "Shape `[batch, depth, rows, cols, channels]` tensor to pool over.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The max pooled output tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Performs 3D max pooling on the input." + } + }, + { + "name": "MaxPool3DGrad", + "schema": { + "attributes": [ + { + "description": "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`.", + "minimum": 5, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NDHWC", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`.", + "name": "TInput", + "type": "type" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "orig_input", + "typeAttr": "TInput" + }, + { + "description": "The original output tensor.", + "name": "orig_output", + "typeAttr": "TInput" + }, + { + "description": "Output backprop of shape `[batch, depth, rows, cols, channels]`.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes gradients of 3D max pooling function." + } + }, + { + "name": "MaxPool3DGradGrad", + "schema": { + "attributes": [ + { + "description": "1-D tensor of length 5. The size of the window for each dimension of\nthe input tensor. Must have `ksize[0] = ksize[4] = 1`.", + "minimum": 5, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "1-D tensor of length 5. The stride of the sliding window for each\ndimension of `input`. Must have `strides[0] = strides[4] = 1`.", + "minimum": 5, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NDHWC", + "description": "The data format of the input and output data. With the\ndefault format \"NDHWC\", the data is stored in the order of:\n [batch, in_depth, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCDHW\", the data storage order is:\n [batch, in_channels, in_depth, in_height, in_width]. Must be one of the following: `NDHWC`, `NCDHW`.", + "name": "data_format", + "type": "string" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "orig_input", + "typeAttr": "T" + }, + { + "description": "The original output tensor.", + "name": "orig_output", + "typeAttr": "T" + }, + { + "description": "Output backprop of shape `[batch, depth, rows, cols, channels]`.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Gradients of gradients w.r.t. the input to `max_pool`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes second-order gradients of the maxpooling function." + } + }, + { + "name": "MaxPoolGrad", + "schema": { + "attributes": [ + { + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "orig_input", + "typeAttr": "T" + }, + { + "description": "The original output tensor.", + "name": "orig_output", + "typeAttr": "T" + }, + { + "description": "4-D. Gradients w.r.t. the output of `max_pool`.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Gradients w.r.t. the input to `max_pool`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes gradients of the maxpooling function." + } + }, + { + "name": "MaxPoolGradGrad", + "schema": { + "attributes": [ + { + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "orig_input", + "typeAttr": "T" + }, + { + "description": "The original output tensor.", + "name": "orig_output", + "typeAttr": "T" + }, + { + "description": "4-D. Gradients of gradients w.r.t. the input of `max_pool`.", + "name": "grad", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Gradients of gradients w.r.t. the input to `max_pool`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes second-order gradients of the maxpooling function." + } + }, + { + "name": "MaxPoolGradGradV2", + "schema": { + "attributes": [ + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "orig_input", + "typeAttr": "T" + }, + { + "description": "The original output tensor.", + "name": "orig_output", + "typeAttr": "T" + }, + { + "description": "4-D. Gradients of gradients w.r.t. the input of `max_pool`.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "The size of the window for each dimension of the input tensor.", + "name": "ksize", + "type": 3 + }, + { + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "name": "strides", + "type": 3 + } + ], + "outputs": [ + { + "description": "Gradients of gradients w.r.t. the input to `max_pool`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes second-order gradients of the maxpooling function." + } + }, + { + "name": "MaxPoolGradGradWithArgmax", + "schema": { + "attributes": [ + { + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": false, + "description": "Whether to include batch dimension in flattened index of `argmax`.", + "name": "include_batch_in_index", + "type": "boolean" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Targmax", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The original input.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the\ninput of `max_pool`.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "The indices of the maximum values chosen for each output of `max_pool`.", + "name": "argmax", + "typeAttr": "Targmax" + } + ], + "outputs": [ + { + "description": "Gradients of gradients w.r.t. the input of `max_pool`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes second-order gradients of the maxpooling function." + } + }, + { + "name": "MaxPoolGradV2", + "schema": { + "attributes": [ + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`.", + "name": "data_format", + "type": "string" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "orig_input", + "typeAttr": "T" + }, + { + "description": "The original output tensor.", + "name": "orig_output", + "typeAttr": "T" + }, + { + "description": "4-D. Gradients w.r.t. the output of `max_pool`.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "The size of the window for each dimension of the input tensor.", + "name": "ksize", + "type": 3 + }, + { + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "name": "strides", + "type": 3 + } + ], + "outputs": [ + { + "description": "Gradients w.r.t. the input to `max_pool`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes gradients of the maxpooling function." + } + }, + { + "name": "MaxPoolGradWithArgmax", + "schema": { + "attributes": [ + { + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": false, + "description": "Whether to include batch dimension in flattened index of `argmax`.", + "name": "include_batch_in_index", + "type": "boolean" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Targmax", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The original input.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the\noutput of `max_pool`.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "The indices of the maximum values chosen for each output of `max_pool`.", + "name": "argmax", + "typeAttr": "Targmax" + } + ], + "outputs": [ + { + "description": "Gradients w.r.t. the input of `max_pool`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes gradients of the maxpooling function." + } + }, + { + "name": "MaxPoolV2", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `qint8`.", + "name": "T", + "type": "type" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": "NHWC", + "description": "Specify the data format of the input and output data. With the\ndefault format \"NHWC\", the data is stored in the order of:\n [batch, in_height, in_width, in_channels].\nAlternatively, the format could be \"NCHW\", the data storage order of:\n [batch, in_channels, in_height, in_width]. Must be one of the following: `NHWC`, `NCHW`, `NCHW_VECT_C`.", + "name": "data_format", + "type": "string" + } + ], + "category": "Pool", + "inputs": [ + { + "description": "4-D input to pool over.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "The size of the window for each dimension of the input tensor.", + "name": "ksize", + "type": 3 + }, + { + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "name": "strides", + "type": 3 + } + ], + "outputs": [ + { + "description": "The max pooled output tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Performs max pooling on the input." + } + }, + { + "name": "MaxPoolWithArgmax", + "schema": { + "attributes": [ + { + "description": "The size of the window for each dimension of the input tensor.", + "minimum": 4, + "name": "ksize", + "type": "int64[]" + }, + { + "description": "The stride of the sliding window for each dimension of the\ninput tensor.", + "minimum": 4, + "name": "strides", + "type": "int64[]" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Targmax", + "type": "type" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": false, + "description": "Whether to include batch dimension in flattened index of `argmax`.", + "name": "include_batch_in_index", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "The indices in `argmax` are flattened, so that a maximum value at position\n`[b, y, x, c]` becomes flattened index:\n`(y * width + x) * channels + c` if `include_batch_in_index` is False;\n`((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True.\n\nThe indices returned are always in `[0, height) x [0, width)` before flattening,\neven if padding is involved and the mathematically correct answer is outside\n(either negative or too large). This is a bug, but fixing it is difficult to do\nin a safe backwards compatible way, especially due to flattening.", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`. Input to pool over.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The max pooled output tensor.", + "name": "output", + "typeAttr": "T" + }, + { + "description": "4-D. The flattened indices of the max values chosen for each output.", + "name": "argmax", + "typeAttr": "Targmax" + } + ], + "summary": "Performs max pooling on the input and outputs both max values and indices." + } + }, + { + "name": "Maximum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int16`, `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `Maximum` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns the max of x and y (i.e. x > y ? x : y) element-wise." + } + }, + { + "name": "Mean", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "inputs": [ + { + "description": "The tensor to reduce.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "name": "reduction_indices", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "The reduced tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the mean of elements across dimensions of a tensor." + } + }, + { + "name": "Merge", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "minimum": 1, + "name": "N", + "type": "int64" + } + ], + "description": "`Merge` waits for at least one of the tensors in `inputs` to become available.\nIt is usually combined with `Switch` to implement branching.\n\n`Merge` forwards the first tensor to become available to `output`, and sets\n`value_index` to its index in `inputs`.", + "inputs": [ + { + "description": "The input tensors, exactly one of which will become available.", + "name": "inputs", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Will be set to the available input tensor.", + "name": "output", + "typeAttr": "T" + }, + { + "description": "The index of the chosen input tensor in `inputs`.", + "name": "value_index", + "type": 3 + } + ], + "summary": "Forwards the value of an available tensor from `inputs` to `output`." + } + }, + { + "name": "MergeSummary", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + } + ], + "description": "This op creates a\n[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\nprotocol buffer that contains the union of all the values in the input\nsummaries.\n\nWhen the Op is run, it reports an `InvalidArgument` error if multiple values\nin the summaries to merge use the same tag.", + "inputs": [ + { + "description": "Can be of any shape. Each must contain serialized `Summary` protocol\nbuffers.", + "name": "inputs", + "numberAttr": "N", + "type": 7 + } + ], + "outputs": [ + { + "description": "Scalar. Serialized `Summary` protocol buffer.", + "name": "summary", + "type": 7 + } + ], + "summary": "Merges summaries." + } + }, + { + "name": "MergeV2Checkpoints", + "schema": { + "attributes": [ + { + "default": true, + "description": "see above.", + "name": "delete_old_dirs", + "type": "boolean" + } + ], + "description": "result is one logical checkpoint, with one physical metadata file and renamed\ndata files.\n\nIntended for \"grouping\" multiple checkpoints in a sharded checkpoint setup.\n\nIf delete_old_dirs is true, attempts to delete recursively the dirname of each\npath in the input checkpoint_prefixes. This is useful when those paths are non\nuser-facing temporary locations.", + "inputs": [ + { + "description": "prefixes of V2 checkpoints to merge.", + "name": "checkpoint_prefixes", + "type": 7 + }, + { + "description": "scalar. The desired final prefix. Allowed to be the same\nas one of the checkpoint_prefixes.", + "name": "destination_prefix", + "type": 7 + } + ], + "summary": "V2 format specific: merges the metadata files of sharded checkpoints. The" + } + }, + { + "name": "Mfcc", + "schema": { + "attributes": [ + { + "default": 4000.0, + "description": "The highest frequency to use when calculating the\nceptstrum.", + "name": "upper_frequency_limit", + "type": "float32" + }, + { + "default": 20.0, + "description": "The lowest frequency to use when calculating the\nceptstrum.", + "name": "lower_frequency_limit", + "type": "float32" + }, + { + "default": 40, + "description": "Resolution of the Mel bank used internally.", + "name": "filterbank_channel_count", + "type": "int64" + }, + { + "default": 13, + "description": "How many output channels to produce per time slice.", + "name": "dct_coefficient_count", + "type": "int64" + } + ], + "description": "Mel Frequency Cepstral Coefficients are a way of representing audio data that's\nbeen effective as an input feature for machine learning. They are created by\ntaking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the\nhigher frequencies that are less significant to the human ear. They have a long\nhistory in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum\nis a good resource to learn more.", + "inputs": [ + { + "description": "Typically produced by the Spectrogram op, with magnitude_squared\nset to true.", + "name": "spectrogram", + "type": 1 + }, + { + "description": "How many samples per second the source audio used.", + "name": "sample_rate", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "type": 1 + } + ], + "summary": "Transforms a spectrogram into a form that's useful for speech recognition." + } + }, + { + "name": "Min", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "inputs": [ + { + "description": "The tensor to reduce.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "name": "reduction_indices", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "The reduced tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the minimum of elements across dimensions of a tensor." + } + }, + { + "name": "Minimum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int16`, `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `Minimum` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns the min of x and y (i.e. x < y ? x : y) element-wise." + } + }, + { + "name": "MirrorPad", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tpaddings", + "type": "type" + }, + { + "description": "Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions\ndo not include the borders, while in symmetric mode the padded regions\ndo include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`\nis `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and\nit is `[1, 2, 3, 3, 2]` in symmetric mode. Must be one of the following: `REFLECT`, `SYMMETRIC`.", + "name": "mode", + "type": "string" + } + ], + "description": "This operation pads a `input` with mirrored values according to the `paddings`\nyou specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is\nthe rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\nhow many values to add before the contents of `input` in that dimension, and\n`paddings[D, 1]` indicates how many values to add after the contents of `input`\nin that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater\nthan `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true\n(if false, respectively).\n\nThe padded size of each dimension D of the output is:\n\n`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\n\nFor example:\n\n```\n# 't' is [[1, 2, 3], [4, 5, 6]].\n# 'paddings' is [[1, 1]], [2, 2]].\n# 'mode' is SYMMETRIC.\n# rank of 't' is 2.\npad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]\n [2, 1, 1, 2, 3, 3, 2]\n [5, 4, 4, 5, 6, 6, 5]\n [5, 4, 4, 5, 6, 6, 5]]\n```", + "inputs": [ + { + "description": "The input tensor to be padded.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`.", + "name": "paddings", + "typeAttr": "Tpaddings" + } + ], + "outputs": [ + { + "description": "The padded tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Pads a tensor with mirrored values." + } + }, + { + "name": "MirrorPadGrad", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tpaddings", + "type": "type" + }, + { + "description": "The mode used in the `MirrorPad` op. Must be one of the following: `REFLECT`, `SYMMETRIC`.", + "name": "mode", + "type": "string" + } + ], + "description": "This operation folds the padded areas of `input` by `MirrorPad` according to the\n`paddings` you specify. `paddings` must be the same as `paddings` argument\ngiven to the corresponding `MirrorPad` op.\n\nThe folded size of each dimension D of the output is:\n\n`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`\n\nFor example:\n\n```\n# 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].\n# 'paddings' is [[0, 1]], [0, 1]].\n# 'mode' is SYMMETRIC.\n# rank of 't' is 2.\npad(t, paddings) ==> [[ 1, 5]\n [11, 28]]\n```", + "inputs": [ + { + "description": "The input tensor to be folded.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "A two-column matrix specifying the padding sizes. The number of\nrows must be the same as the rank of `input`.", + "name": "paddings", + "typeAttr": "Tpaddings" + } + ], + "outputs": [ + { + "description": "The folded tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor." + } + }, + { + "name": "MlirPassthroughOp", + "schema": { + "attributes": [ + { + "name": "mlir_module", + "type": "string" + }, + { + "minimum": 0, + "name": "Tinputs", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Toutputs", + "type": "type[]" + } + ], + "description": "This operation does not have an associated kernel and is not intended to be\nexecuted in a regular TensorFlow session. Instead it is intended to be used for\ntesting or for special case where a user intends to pass custom MLIR computation\nthrough a TensorFlow graph with the intent of having custom tooling processing\nit downstream (when targeting a different environment, like TensorFlow lite for\nexample).\nThe MLIR module is expected to have a main() function that will be used as an\nentry point. The inputs to the operations will be passed as argument to the\nmain() function and the returned values of the main function mapped to the\noutputs.\nExample usage:\n\n```\nimport tensorflow as tf\nfrom tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op\n\nmlir_module = '''python\nfunc @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {\n %add = \"magic.op\"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>\n return %ret : tensor<10x10xf32>\n}\n'''\n\n@tf.function\ndef foo(x, y):\n return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])\n\ngraph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()\n```", + "inputs": [ + { + "name": "inputs", + "typeListAttr": "Tinputs" + } + ], + "outputs": [ + { + "name": "outputs", + "typeListAttr": "Toutputs" + } + ], + "summary": "Wraps an arbitrary MLIR computation expressed as a module with a main() function." + } + }, + { + "name": "Mod", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`, `float16`, `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "the result here is consistent with a truncating divide. E.g.\n`tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.\n\n*NOTE*: `Mod` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns element-wise remainder of division. This emulates C semantics in that" + } + }, + { + "name": "ModelDataset", + "schema": { + "attributes": [ + { + "default": 0, + "name": "algorithm", + "type": "int64" + }, + { + "default": 0, + "name": "cpu_budget", + "type": "int64" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "Identity transformation that models performance.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Identity transformation that models performance." + } + }, + { + "name": "Mul", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `Mul` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns x * y element-wise." + } + }, + { + "name": "MulNoNan", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `MulNoNan` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN." + } + }, + { + "name": "MultiDeviceIterator", + "schema": { + "attributes": [ + { + "description": "A list of devices the iterator works across.", + "minimum": 1, + "name": "devices", + "type": "string[]" + }, + { + "description": "If non-empty, this resource will be shared under the given name\nacross multiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "description": "If non-empty, this resource is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "description": "The type list for the return values.", + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "description": "The list of shapes being produced.", + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "outputs": [ + { + "description": "Handle to the resource created.", + "name": "handle", + "type": 20 + } + ], + "summary": "Creates a MultiDeviceIterator resource." + } + }, + { + "name": "MultiDeviceIteratorFromStringHandle", + "schema": { + "attributes": [ + { + "default": [], + "description": "The type list for the return values.", + "minimum": 0, + "name": "output_types", + "type": "type[]" + }, + { + "default": [], + "description": "The list of shapes being produced.", + "minimum": 0, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "String representing the resource.", + "name": "string_handle", + "type": 7 + } + ], + "outputs": [ + { + "description": "A MultiDeviceIterator resource.", + "name": "multi_device_iterator", + "type": 20 + } + ], + "summary": "Generates a MultiDeviceIterator resource from its provided string handle." + } + }, + { + "name": "MultiDeviceIteratorGetNextFromShard", + "schema": { + "attributes": [ + { + "description": "The type list for the return values.", + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "description": "The list of shapes being produced.", + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A MultiDeviceIterator resource.", + "name": "multi_device_iterator", + "type": 20 + }, + { + "description": "Integer representing which shard to fetch data for.", + "name": "shard_num", + "type": 3 + }, + { + "description": "Which incarnation of the MultiDeviceIterator is running.", + "name": "incarnation_id", + "type": 9 + } + ], + "outputs": [ + { + "description": "Result of the get_next on the dataset.", + "name": "components", + "typeListAttr": "output_types" + } + ], + "summary": "Gets next element for the provided shard number." + } + }, + { + "name": "MultiDeviceIteratorInit", + "schema": { + "inputs": [ + { + "description": "Dataset to be iterated upon.", + "name": "dataset", + "type": 21 + }, + { + "description": "A MultiDeviceIteratorResource.", + "name": "multi_device_iterator", + "type": 20 + }, + { + "description": "The maximum size of the host side per device buffer to keep.", + "name": "max_buffer_size", + "type": 9 + } + ], + "outputs": [ + { + "description": "An int64 indicating which incarnation of the MultiDeviceIterator\nis running.", + "name": "incarnation_id", + "type": 9 + } + ], + "summary": "Initializes the multi device iterator with the given dataset." + } + }, + { + "name": "MultiDeviceIteratorToStringHandle", + "schema": { + "inputs": [ + { + "description": "A MultiDeviceIterator resource.", + "name": "multi_device_iterator", + "type": 20 + } + ], + "outputs": [ + { + "description": "A string representing the resource.", + "name": "string_handle", + "type": 7 + } + ], + "summary": "Produces a string handle for the given MultiDeviceIterator." + } + }, + { + "name": "Multinomial", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If either seed or seed2 is set to be non-zero, the internal random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "output_dtype", + "type": "type" + } + ], + "inputs": [ + { + "description": "2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]`\nrepresents the unnormalized log probabilities for all classes.", + "name": "logits", + "typeAttr": "T" + }, + { + "description": "0-D. Number of independent samples to draw for each row slice.", + "name": "num_samples", + "type": 3 + } + ], + "outputs": [ + { + "description": "2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]`\ncontains the drawn class labels with range `[0, num_classes)`.", + "name": "output", + "typeAttr": "output_dtype" + } + ], + "summary": "Draws samples from a multinomial distribution." + } + }, + { + "name": "MutableDenseHashTable", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "default": false, + "name": "use_node_name_sharing", + "type": "boolean" + }, + { + "description": "Type of the table keys.", + "name": "key_dtype", + "type": "type" + }, + { + "description": "Type of the table values.", + "name": "value_dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "description": "The shape of each value.", + "name": "value_shape", + "type": "shape" + }, + { + "default": 131072, + "description": "The initial number of hash table buckets. Must be a power\nto 2.", + "name": "initial_num_buckets", + "type": "int64" + }, + { + "default": 0.800000011920929, + "description": "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1.", + "name": "max_load_factor", + "type": "float32" + } + ], + "description": "It uses \"open addressing\" with quadratic reprobing to resolve\ncollisions.\n\nThis op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "inputs": [ + { + "description": "The key used to represent empty key buckets internally. Must not\nbe used in insert or lookup operations.", + "name": "empty_key", + "typeAttr": "key_dtype" + } + ], + "outputs": [ + { + "description": "Handle to a table.", + "isRef": true, + "name": "table_handle", + "type": 7 + } + ], + "summary": "Creates an empty hash table that uses tensors as the backing store." + } + }, + { + "name": "MutableDenseHashTableV2", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "default": false, + "name": "use_node_name_sharing", + "type": "boolean" + }, + { + "description": "Type of the table keys.", + "name": "key_dtype", + "type": "type" + }, + { + "description": "Type of the table values.", + "name": "value_dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "description": "The shape of each value.", + "name": "value_shape", + "type": "shape" + }, + { + "default": 131072, + "description": "The initial number of hash table buckets. Must be a power\nto 2.", + "name": "initial_num_buckets", + "type": "int64" + }, + { + "default": 0.800000011920929, + "description": "The maximum ratio between number of entries and number of\nbuckets before growing the table. Must be between 0 and 1.", + "name": "max_load_factor", + "type": "float32" + } + ], + "description": "It uses \"open addressing\" with quadratic reprobing to resolve\ncollisions.\n\nThis op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "inputs": [ + { + "description": "The key used to represent empty key buckets internally. Must not\nbe used in insert or lookup operations.", + "name": "empty_key", + "typeAttr": "key_dtype" + }, + { + "name": "deleted_key", + "typeAttr": "key_dtype" + } + ], + "outputs": [ + { + "description": "Handle to a table.", + "name": "table_handle", + "type": 20 + } + ], + "summary": "Creates an empty hash table that uses tensors as the backing store." + } + }, + { + "name": "MutableHashTable", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "default": false, + "description": "If true and shared_name is empty, the table is shared\nusing the node name.", + "name": "use_node_name_sharing", + "type": "boolean" + }, + { + "description": "Type of the table keys.", + "name": "key_dtype", + "type": "type" + }, + { + "description": "Type of the table values.", + "name": "value_dtype", + "type": "type" + } + ], + "description": "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "outputs": [ + { + "description": "Handle to a table.", + "isRef": true, + "name": "table_handle", + "type": 7 + } + ], + "summary": "Creates an empty hash table." + } + }, + { + "name": "MutableHashTableOfTensors", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "default": false, + "name": "use_node_name_sharing", + "type": "boolean" + }, + { + "description": "Type of the table keys.", + "name": "key_dtype", + "type": "type" + }, + { + "description": "Type of the table values.", + "name": "value_dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "name": "value_shape", + "type": "shape" + } + ], + "description": "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a vector. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "outputs": [ + { + "description": "Handle to a table.", + "isRef": true, + "name": "table_handle", + "type": 7 + } + ], + "summary": "Creates an empty hash table." + } + }, + { + "name": "MutableHashTableOfTensorsV2", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "default": false, + "name": "use_node_name_sharing", + "type": "boolean" + }, + { + "description": "Type of the table keys.", + "name": "key_dtype", + "type": "type" + }, + { + "description": "Type of the table values.", + "name": "value_dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "name": "value_shape", + "type": "shape" + } + ], + "description": "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a vector. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "outputs": [ + { + "description": "Handle to a table.", + "name": "table_handle", + "type": 20 + } + ], + "summary": "Creates an empty hash table." + } + }, + { + "name": "MutableHashTableV2", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this table is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this table is shared under the given name across\nmultiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "default": false, + "description": "If true and shared_name is empty, the table is shared\nusing the node name.", + "name": "use_node_name_sharing", + "type": "boolean" + }, + { + "description": "Type of the table keys.", + "name": "key_dtype", + "type": "type" + }, + { + "description": "Type of the table values.", + "name": "value_dtype", + "type": "type" + } + ], + "description": "This op creates a mutable hash table, specifying the type of its keys and\nvalues. Each value must be a scalar. Data can be inserted into the table using\nthe insert operations. It does not support the initialization operation.", + "outputs": [ + { + "description": "Handle to a table.", + "name": "table_handle", + "type": 20 + } + ], + "summary": "Creates an empty hash table." + } + }, + { + "name": "MutexLock", + "schema": { + "description": "is alive, any other request to use `MutexLock` with this mutex will wait.\n\nThis is particularly useful for creating a critical section when used in\nconjunction with `MutexLockIdentity`:\n\n```python\n\nmutex = mutex_v2(\n shared_name=handle_name, container=container, name=name)\n\ndef execute_in_critical_section(fn, *args, **kwargs):\n lock = gen_resource_variable_ops.mutex_lock(mutex)\n\n with ops.control_dependencies([lock]):\n r = fn(*args, **kwargs)\n\n with ops.control_dependencies(nest.flatten(r)):\n with ops.colocate_with(mutex):\n ensure_lock_exists = mutex_lock_identity(lock)\n\n # Make sure that if any element of r is accessed, all of\n # them are executed together.\n r = nest.map_structure(tf.identity, r)\n\n with ops.control_dependencies([ensure_lock_exists]):\n return nest.map_structure(tf.identity, r)\n```\n\nWhile `fn` is running in the critical section, no other functions which wish to\nuse this critical section may run.\n\nOften the use case is that two executions of the same graph, in parallel,\nwish to run `fn`; and we wish to ensure that only one of them executes\nat a time. This is especially important if `fn` modifies one or more\nvariables at a time.\n\nIt is also useful if two separate functions must share a resource, but we\nwish to ensure the usage is exclusive.", + "inputs": [ + { + "description": "The mutex resource to lock.", + "name": "mutex", + "type": 20 + } + ], + "outputs": [ + { + "description": "A tensor that keeps a shared pointer to a lock on the mutex;\nwhen the Tensor is destroyed, the use count on the shared pointer is decreased\nby 1. When it reaches 0, the lock is released.", + "name": "mutex_lock", + "type": 21 + } + ], + "summary": "Locks a mutex resource. The output is the lock. So long as the lock tensor" + } + }, + { + "name": "MutexV2", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this variable is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this variable is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "description": "The mutex resource.", + "name": "resource", + "type": 20 + } + ], + "summary": "Creates a Mutex resource that can be locked by `MutexLock`." + } + }, + { + "name": "NcclAllReduce", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `min`, `max`, `prod`, `sum`.", + "name": "reduction", + "type": "string" + }, + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "name": "num_devices", + "type": "int64" + }, + { + "name": "shared_name", + "type": "string" + } + ], + "description": "Outputs a tensor containing the reduction across all input tensors passed to ops\nwithin the same `shared_name.\n\nThe graph should be constructed so if one op runs with shared_name value `c`,\nthen `num_devices` ops will run with shared_name value `c`. Failure to do so\nwill cause the graph execution to fail to complete.\n\ninput: the input to the reduction\ndata: the value of the reduction across all `num_devices` devices.\nreduction: the reduction operation to perform.\nnum_devices: The number of devices participating in this reduction.\nshared_name: Identifier that shared between ops of the same reduction.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ], + "summary": "Outputs a tensor containing the reduction across all input tensors." + } + }, + { + "name": "NcclBroadcast", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "name": "shape", + "type": "shape" + } + ], + "description": "Sends `input` to all devices that are connected to the output.\n\nThe graph should be constructed so that all ops connected to the output have a\nvalid device assignment, and the op itself is assigned one of these devices.\n\ninput: The input to the broadcast.\noutput: The same as input.\nshape: The shape of the input tensor.\n", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Sends `input` to all devices that are connected to the output." + } + }, + { + "name": "NcclReduce", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `min`, `max`, `prod`, `sum`.", + "name": "reduction", + "type": "string" + }, + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "minimum": 1, + "name": "num_devices", + "type": "int64" + } + ], + "description": "Reduces `input` from `num_devices` using `reduction` to a single device.\n\nThe graph should be constructed so that all inputs have a valid device\nassignment, and the op itself is assigned one of these devices.\n\ninput: The input to the reduction.\ndata: the value of the reduction across all `num_devices` devices.\nreduction: the reduction operation to perform.", + "inputs": [ + { + "name": "input", + "numberAttr": "num_devices", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "data", + "typeAttr": "T" + } + ], + "summary": "Reduces `input` from `num_devices` using `reduction` to a single device." + } + }, + { + "name": "Ndtri", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + } + }, + { + "name": "NearestNeighbors", + "schema": { + "description": "Rows of points are assumed to be input points. Rows of centers are assumed to be\nthe list of candidate centers. For each point, the k centers that have least L2\ndistance to it are computed.", + "inputs": [ + { + "description": "Matrix of shape (n, d). Rows are assumed to be input points.", + "name": "points", + "type": 1 + }, + { + "description": "Matrix of shape (m, d). Rows are assumed to be centers.", + "name": "centers", + "type": 1 + }, + { + "description": "Number of nearest centers to return for each point. If k is larger than m, then\nonly m centers are returned.", + "name": "k", + "type": 9 + } + ], + "outputs": [ + { + "description": "Matrix of shape (n, min(m, k)). Each row contains the indices of the centers\nclosest to the corresponding point, ordered by increasing distance.", + "name": "nearest_center_indices", + "type": 9 + }, + { + "description": "Matrix of shape (n, min(m, k)). Each row contains the squared L2 distance to the\ncorresponding center in nearest_center_indices.", + "name": "nearest_center_distances", + "type": 1 + } + ], + "summary": "Selects the k nearest centers for each point." + } + }, + { + "name": "Neg", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "I.e., \\\\(y = -x\\\\).", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes numerical negative value element-wise." + } + }, + { + "name": "NegTrain", + "schema": { + "attributes": [ + { + "description": "Count of words in the vocabulary.", + "name": "vocab_count", + "type": "int64[]" + }, + { + "description": "Number of negative samples per example.", + "name": "num_negative_samples", + "type": "int64" + } + ], + "inputs": [ + { + "description": "input word embedding.", + "isRef": true, + "name": "w_in", + "type": 1 + }, + { + "description": "output word embedding.", + "isRef": true, + "name": "w_out", + "type": 1 + }, + { + "description": "A vector of word ids.", + "name": "examples", + "type": 3 + }, + { + "description": "A vector of word ids.", + "name": "labels", + "type": 3 + }, + { + "name": "lr", + "type": 1 + } + ], + "summary": "Training via negative sampling." + } + }, + { + "name": "NextAfter", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float64`, `float32`.", + "name": "T", + "type": "type" + } + ], + "description": "This operation returns the same result as the C++ std::nextafter function.\n\nIt can also return a subnormal number.\n\n@compatibility(cpp)\nEquivalent to C++ std::nextafter function.\n@end_compatibility", + "inputs": [ + { + "name": "x1", + "typeAttr": "T" + }, + { + "name": "x2", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns the next representable value of `x1` in the direction of `x2`, element-wise." + } + }, + { + "name": "NextIteration", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The tensor to be made available to the next iteration.", + "name": "data", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The same tensor as `data`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Makes its input available to the next iteration." + } + }, + { + "name": "NoOp", + "schema": { + "summary": "Does nothing. Only useful as a placeholder for control edges." + } + }, + { + "name": "NonDeterministicInts", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 9 + }, + "description": "The type of the output.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "name": "shape_dtype", + "type": "type" + } + ], + "description": "This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "description": "Non-deterministic integer values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Non-deterministically generates some integers." + } + }, + { + "name": "NonMaxSuppression", + "schema": { + "attributes": [ + { + "default": 0.5, + "description": "A float representing the threshold for deciding whether boxes\noverlap too much with respect to IOU.", + "name": "iou_threshold", + "type": "float32" + } + ], + "description": "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system. Note that this\nalgorithm is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n selected_indices = tf.image.non_max_suppression(\n boxes, scores, max_output_size, iou_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)", + "inputs": [ + { + "description": "A 2-D float tensor of shape `[num_boxes, 4]`.", + "name": "boxes", + "type": 1 + }, + { + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "name": "scores", + "type": 1 + }, + { + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "name": "max_output_size", + "type": 3 + } + ], + "outputs": [ + { + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "name": "selected_indices", + "type": 3 + } + ], + "summary": "Greedily selects a subset of bounding boxes in descending order of score," + } + }, + { + "name": "NonMaxSuppressionV2", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T_threshold", + "type": "type" + } + ], + "description": "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system. Note that this\nalgorithm is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\n\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n\n selected_indices = tf.image.non_max_suppression_v2(\n boxes, scores, max_output_size, iou_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)", + "inputs": [ + { + "description": "A 2-D float tensor of shape `[num_boxes, 4]`.", + "name": "boxes", + "typeAttr": "T" + }, + { + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "name": "scores", + "typeAttr": "T" + }, + { + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "name": "max_output_size", + "type": 3 + }, + { + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU.", + "name": "iou_threshold", + "typeAttr": "T_threshold" + } + ], + "outputs": [ + { + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "name": "selected_indices", + "type": 3 + } + ], + "summary": "Greedily selects a subset of bounding boxes in descending order of score," + } + }, + { + "name": "NonMaxSuppressionV3", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T_threshold", + "type": "type" + } + ], + "description": "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes with score less than\n`score_threshold` are removed. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system and more\ngenerally is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n selected_indices = tf.image.non_max_suppression_v2(\n boxes, scores, max_output_size, iou_threshold, score_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)", + "inputs": [ + { + "description": "A 2-D float tensor of shape `[num_boxes, 4]`.", + "name": "boxes", + "typeAttr": "T" + }, + { + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "name": "scores", + "typeAttr": "T" + }, + { + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "name": "max_output_size", + "type": 3 + }, + { + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU.", + "name": "iou_threshold", + "typeAttr": "T_threshold" + }, + { + "description": "A 0-D float tensor representing the threshold for deciding when to remove\nboxes based on score.", + "name": "score_threshold", + "typeAttr": "T_threshold" + } + ], + "outputs": [ + { + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "name": "selected_indices", + "type": 3 + } + ], + "summary": "Greedily selects a subset of bounding boxes in descending order of score," + } + }, + { + "name": "NonMaxSuppressionV4", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T_threshold", + "type": "type" + }, + { + "default": false, + "description": "If true, the output `selected_indices` is padded to be of length\n`max_output_size`. Defaults to false.", + "name": "pad_to_max_output_size", + "type": "boolean" + } + ], + "description": "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes with score less than\n`score_threshold` are removed. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system and more\ngenerally is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n selected_indices = tf.image.non_max_suppression_v2(\n boxes, scores, max_output_size, iou_threshold, score_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)", + "inputs": [ + { + "description": "A 2-D float tensor of shape `[num_boxes, 4]`.", + "name": "boxes", + "typeAttr": "T" + }, + { + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "name": "scores", + "typeAttr": "T" + }, + { + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "name": "max_output_size", + "type": 3 + }, + { + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU.", + "name": "iou_threshold", + "typeAttr": "T_threshold" + }, + { + "description": "A 0-D float tensor representing the threshold for deciding when to remove\nboxes based on score.", + "name": "score_threshold", + "typeAttr": "T_threshold" + } + ], + "outputs": [ + { + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "name": "selected_indices", + "type": 3 + }, + { + "description": "A 0-D integer tensor representing the number of valid elements in\n`selected_indices`, with the valid elements appearing first.", + "name": "valid_outputs", + "type": 3 + } + ], + "summary": "Greedily selects a subset of bounding boxes in descending order of score," + } + }, + { + "name": "NonMaxSuppressionV5", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `float32`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If true, the output `selected_indices` is padded to be of length\n`max_output_size`. Defaults to false.", + "name": "pad_to_max_output_size", + "type": "boolean" + } + ], + "description": "pruning away boxes that have high intersection-over-union (IOU) overlap\nwith previously selected boxes. Bounding boxes with score less than\n`score_threshold` are removed. Bounding boxes are supplied as\n[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any\ndiagonal pair of box corners and the coordinates can be provided as normalized\n(i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm\nis agnostic to where the origin is in the coordinate system and more\ngenerally is invariant to orthogonal transformations and translations\nof the coordinate system; thus translating or reflections of the coordinate\nsystem result in the same boxes being selected by the algorithm.\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n selected_indices = tf.image.non_max_suppression_v2(\n boxes, scores, max_output_size, iou_threshold, score_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)\nThis op also supports a Soft-NMS (with Gaussian weighting) mode (c.f.\nBodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score\nof other overlapping boxes instead of directly causing them to be pruned.\nTo enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be\nlarger than 0.", + "inputs": [ + { + "description": "A 2-D float tensor of shape `[num_boxes, 4]`.", + "name": "boxes", + "typeAttr": "T" + }, + { + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "name": "scores", + "typeAttr": "T" + }, + { + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "name": "max_output_size", + "type": 3 + }, + { + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too much with respect to IOU.", + "name": "iou_threshold", + "typeAttr": "T" + }, + { + "description": "A 0-D float tensor representing the threshold for deciding when to remove\nboxes based on score.", + "name": "score_threshold", + "typeAttr": "T" + }, + { + "description": "A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et\nal (c.f. https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which\nis default), we fall back to standard (hard) NMS.", + "name": "soft_nms_sigma", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "name": "selected_indices", + "type": 3 + }, + { + "description": "A 1-D float tensor of shape `[M]` representing the corresponding\nscores for each selected box, where `M <= max_output_size`. Scores only differ\nfrom corresponding input scores when using Soft NMS (i.e. when\n`soft_nms_sigma>0`)", + "name": "selected_scores", + "typeAttr": "T" + }, + { + "description": "A 0-D integer tensor representing the number of valid elements in\n`selected_indices`, with the valid elements appearing first.", + "name": "valid_outputs", + "type": 3 + } + ], + "summary": "Greedily selects a subset of bounding boxes in descending order of score," + } + }, + { + "name": "NonMaxSuppressionWithOverlaps", + "schema": { + "description": "pruning away boxes that have high overlaps\nwith previously selected boxes. Bounding boxes with score less than\n`score_threshold` are removed. N-by-n overlap values are supplied as square matrix,\nwhich allows for defining a custom overlap criterium (eg. intersection over union,\nintersection over area, etc.).\n\nThe output of this operation is a set of integers indexing into the input\ncollection of bounding boxes representing the selected boxes. The bounding\nbox coordinates corresponding to the selected indices can then be obtained\nusing the `tf.gather operation`. For example:\n\n selected_indices = tf.image.non_max_suppression_with_overlaps(\n overlaps, scores, max_output_size, overlap_threshold, score_threshold)\n selected_boxes = tf.gather(boxes, selected_indices)", + "inputs": [ + { + "description": "A 2-D float tensor of shape `[num_boxes, num_boxes]` representing\nthe n-by-n box overlap values.", + "name": "overlaps", + "type": 1 + }, + { + "description": "A 1-D float tensor of shape `[num_boxes]` representing a single\nscore corresponding to each box (each row of boxes).", + "name": "scores", + "type": 1 + }, + { + "description": "A scalar integer tensor representing the maximum number of\nboxes to be selected by non max suppression.", + "name": "max_output_size", + "type": 3 + }, + { + "description": "A 0-D float tensor representing the threshold for deciding whether\nboxes overlap too.", + "name": "overlap_threshold", + "type": 1 + }, + { + "description": "A 0-D float tensor representing the threshold for deciding when to remove\nboxes based on score.", + "name": "score_threshold", + "type": 1 + } + ], + "outputs": [ + { + "description": "A 1-D integer tensor of shape `[M]` representing the selected\nindices from the boxes tensor, where `M <= max_output_size`.", + "name": "selected_indices", + "type": 3 + } + ], + "summary": "Greedily selects a subset of bounding boxes in descending order of score," + } + }, + { + "name": "NonSerializableDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "NotEqual", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `uint16`, `uint32`, `uint64`, `complex64`, `quint8`, `qint8`, `qint32`, `string`, `bool`, `complex128`.", + "name": "T", + "type": "type" + }, + { + "default": true, + "name": "incompatible_shape_error", + "type": "boolean" + } + ], + "description": "*NOTE*: `NotEqual` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "type": 10 + } + ], + "summary": "Returns the truth value of (x != y) element-wise." + } + }, + { + "name": "NthElement", + "schema": { + "attributes": [ + { + "default": false, + "description": "When set to True, find the nth-largest value in the vector and vice\nversa.", + "name": "reverse", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "If the input is a vector (rank-1), finds the entries which is the nth-smallest\nvalue in the vector and outputs their values as scalar tensor.\n\nFor matrices (resp. higher rank input), computes the entries which is the\nnth-smallest value in each row (resp. vector along the last dimension). Thus,\n\n values.shape = input.shape[:-1]", + "inputs": [ + { + "description": "1-D or higher with last dimension at least `n+1`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "0-D. Position of sorted vector to select along the last dimension (along\neach row for matrices). Valid range of n is `[0, input.shape[:-1])`", + "name": "n", + "type": 3 + } + ], + "outputs": [ + { + "description": "The `n`-th order statistic along each last dimensional slice.", + "name": "values", + "typeAttr": "T" + } + ], + "summary": "Finds values of the `n`-th order statistic for the last dimension." + } + }, + { + "name": "OneHot", + "schema": { + "attributes": [ + { + "default": -1, + "description": "The axis to fill (default: -1, a new inner-most axis).", + "name": "axis", + "type": "int64" + }, + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `uint8`, `int32`, `int64`.", + "name": "TI", + "type": "type" + } + ], + "description": "The locations represented by indices in `indices` take value `on_value`,\nwhile all other locations take value `off_value`.\n\nIf the input `indices` is rank `N`, the output will have rank `N+1`,\nThe new axis is created at dimension `axis` (default: the new axis is\nappended at the end).\n\nIf `indices` is a scalar the output shape will be a vector of length `depth`.\n\nIf `indices` is a vector of length `features`, the output shape will be:\n```\n features x depth if axis == -1\n depth x features if axis == 0\n```\n\nIf `indices` is a matrix (batch) with shape `[batch, features]`,\nthe output shape will be:\n```\n batch x features x depth if axis == -1\n batch x depth x features if axis == 1\n depth x batch x features if axis == 0\n```\n\n\nExamples\n=========\n\nSuppose that\n```\n indices = [0, 2, -1, 1]\n depth = 3\n on_value = 5.0\n off_value = 0.0\n axis = -1\n```\n\nThen output is `[4 x 3]`:\n```\noutput =\n [5.0 0.0 0.0] // one_hot(0)\n [0.0 0.0 5.0] // one_hot(2)\n [0.0 0.0 0.0] // one_hot(-1)\n [0.0 5.0 0.0] // one_hot(1)\n```\n\nSuppose that\n```\n indices = [0, 2, -1, 1]\n depth = 3\n on_value = 0.0\n off_value = 3.0\n axis = 0\n```\n\nThen output is `[3 x 4]`:\n```\noutput =\n [0.0 3.0 3.0 3.0]\n [3.0 3.0 3.0 0.0]\n [3.0 3.0 3.0 3.0]\n [3.0 0.0 3.0 3.0]\n// ^ one_hot(0)\n// ^ one_hot(2)\n// ^ one_hot(-1)\n// ^ one_hot(1)\n```\n\nSuppose that\n```\n indices = [[0, 2], [1, -1]]\n depth = 3\n on_value = 1.0\n off_value = 0.0\n axis = -1\n```\n\nThen output is `[2 x 2 x 3]`:\n```\noutput =\n [\n [1.0, 0.0, 0.0] // one_hot(0)\n [0.0, 0.0, 1.0] // one_hot(2)\n ][\n [0.0, 1.0, 0.0] // one_hot(1)\n [0.0, 0.0, 0.0] // one_hot(-1)\n ]\n```", + "inputs": [ + { + "description": "A tensor of indices.", + "name": "indices", + "typeAttr": "TI" + }, + { + "description": "A scalar defining the depth of the one hot dimension.", + "name": "depth", + "type": 3 + }, + { + "description": "A scalar defining the value to fill in output when `indices[j] = i`.", + "name": "on_value", + "typeAttr": "T" + }, + { + "description": "A scalar defining the value to fill in output when `indices[j] != i`.", + "name": "off_value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The one-hot tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns a one-hot tensor." + } + }, + { + "name": "OneShotIterator", + "schema": { + "attributes": [ + { + "description": "A function of type `() -> DT_VARIANT`, where the returned\nDT_VARIANT is a dataset.", + "name": "dataset_factory", + "type": "function" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "description": "A one-shot iterator bundles the logic for defining the dataset and\nthe state of the iterator in a single op, which allows simple input\npipelines to be defined without an additional initialization\n(\"MakeIterator\") step.\n\nOne-shot iterators have the following limitations:\n\n* They do not support parameterization: all logic for creating the underlying\n dataset must be bundled in the `dataset_factory` function.\n* They are not resettable. Once a one-shot iterator reaches the end of its\n underlying dataset, subsequent \"IteratorGetNext\" operations on that\n iterator will always produce an `OutOfRange` error.\n\nFor greater flexibility, use \"Iterator\" and \"MakeIterator\" to define\nan iterator using an arbitrary subgraph, which may capture tensors\n(including fed values) as parameters, and which may be reset multiple\ntimes by rerunning \"MakeIterator\".", + "outputs": [ + { + "description": "A handle to the iterator that can be passed to an \"IteratorGetNext\"\nop.", + "name": "handle", + "type": 20 + } + ], + "summary": "Makes a \"one-shot\" iterator that can be iterated only once." + } + }, + { + "name": "OnesLike", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`, `complex128`, `bool`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "a tensor of type T.", + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "a tensor of the same shape and type as x but filled with ones.", + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Returns a tensor of ones with the same shape and type as x." + } + }, + { + "name": "OptimizeDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": [], + "name": "optimization_configs", + "type": "string[]" + } + ], + "description": "Creates a dataset by applying optimizations to `input_dataset`.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A `tf.string` vector `tf.Tensor` identifying optimizations to use.", + "name": "optimizations", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset by applying optimizations to `input_dataset`." + } + }, + { + "name": "OptionalFromValue", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "Toutput_types", + "type": "type[]" + } + ], + "inputs": [ + { + "name": "components", + "typeListAttr": "Toutput_types" + } + ], + "outputs": [ + { + "name": "optional", + "type": 21 + } + ], + "summary": "Constructs an Optional variant from a tuple of tensors." + } + }, + { + "name": "OptionalGetValue", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "optional", + "type": 21 + } + ], + "outputs": [ + { + "name": "components", + "typeListAttr": "output_types" + } + ], + "summary": "Returns the value stored in an Optional variant or raises an error if none exists." + } + }, + { + "name": "OptionalHasValue", + "schema": { + "inputs": [ + { + "name": "optional", + "type": 21 + } + ], + "outputs": [ + { + "name": "has_value", + "type": 10 + } + ], + "summary": "Returns true if and only if the given Optional variant has a value." + } + }, + { + "name": "OptionalNone", + "schema": { + "outputs": [ + { + "name": "optional", + "type": 21 + } + ], + "summary": "Creates an Optional variant with no value." + } + }, + { + "name": "OrderedMapClear", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "summary": "Op removes all elements in the underlying container." + } + }, + { + "name": "OrderedMapIncompleteSize", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ], + "summary": "Op returns the number of incomplete elements in the underlying container." + } + }, + { + "name": "OrderedMapPeek", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "description": "underlying container does not contain this key\nthis op will block until it does. This Op is optimized for\nperformance.", + "inputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ], + "summary": "Op peeks at the values at the specified key. If the" + } + }, + { + "name": "OrderedMapSize", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ], + "summary": "Op returns the number of elements in the underlying container." + } + }, + { + "name": "OrderedMapStage", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached.", + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "minimum": 1, + "name": "fake_dtypes", + "type": "type[]" + }, + { + "default": "", + "description": "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "It is necessary to match this name to the matching Unstage Op.", + "name": "shared_name", + "type": "string" + } + ], + "description": "associative container. Elements are ordered by key.", + "inputs": [ + { + "description": "int64", + "name": "key", + "type": 9 + }, + { + "name": "indices", + "type": 3 + }, + { + "description": "a list of tensors\ndtypes A list of data types that inserted values should adhere to.", + "name": "values", + "typeListAttr": "fake_dtypes" + } + ], + "summary": "Stage (key, values) in the underlying container which behaves like a ordered" + } + }, + { + "name": "OrderedMapUnstage", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "description": "from the underlying container. If the underlying container\ndoes not contain this key, the op will block until it does.", + "inputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ], + "summary": "Op removes and returns the values associated with the key" + } + }, + { + "name": "OrderedMapUnstageNoKey", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "description": "key from the underlying container. If the underlying container\ndoes not contain elements, the op will block until it does.", + "inputs": [ + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "key", + "type": 9 + }, + { + "name": "values", + "typeListAttr": "dtypes" + } + ], + "summary": "Op removes and returns the (key, value) element with the smallest" + } + }, + { + "name": "OutfeedDequeue", + "schema": { + "attributes": [ + { + "description": "The type of elements in the tensor.", + "name": "dtype", + "type": "type" + }, + { + "description": "The shape of the tensor.", + "name": "shape", + "type": "shape" + }, + { + "default": -1, + "description": "The TPU device to use. This should be -1 when the Op\nis running on a TPU device, and >= 0 when the Op is running on the CPU\ndevice.", + "name": "device_ordinal", + "type": "int64" + } + ], + "description": "This operation will block indefinitely until data is available.", + "outputs": [ + { + "description": "A tensor that will be read from the device outfeed.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Retrieves a single tensor from the computation outfeed." + } + }, + { + "name": "OutfeedDequeueTuple", + "schema": { + "attributes": [ + { + "description": "The element types of each element in `outputs`.", + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "description": "The shapes of each tensor in `outputs`.", + "name": "shapes", + "type": "shape[]" + }, + { + "default": -1, + "description": "The TPU device to use. This should be -1 when the Op\nis running on a TPU device, and >= 0 when the Op is running on the CPU\ndevice.", + "name": "device_ordinal", + "type": "int64" + } + ], + "description": "This operation will block indefinitely until data is available. Output `i`\ncorresponds to XLA tuple element `i`.", + "outputs": [ + { + "description": "A list of tensors that will be read from the outfeed.", + "name": "outputs", + "typeListAttr": "dtypes" + } + ], + "summary": "Retrieve multiple values from the computation outfeed." + } + }, + { + "name": "OutfeedEnqueue", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + } + ], + "inputs": [ + { + "description": "A tensor that will be inserted into the outfeed queue.", + "name": "input", + "typeAttr": "dtype" + } + ], + "summary": "Enqueue a Tensor on the computation outfeed." + } + }, + { + "name": "OutfeedEnqueueTuple", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "dtypes", + "type": "type[]" + } + ], + "inputs": [ + { + "description": "A list of tensors that will be inserted into the outfeed queue as an\nXLA tuple.", + "name": "inputs", + "typeListAttr": "dtypes" + } + ], + "summary": "Enqueue multiple Tensor values on the computation outfeed." + } + }, + { + "name": "Pack", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "name": "T", + "type": "type" + }, + { + "default": 0, + "description": "Dimension along which to pack. Negative values wrap around, so the\nvalid range is `[-(R+1), R+1)`.", + "name": "axis", + "type": "int64" + } + ], + "description": "Packs the `N` tensors in `values` into a tensor with rank one higher than each\ntensor in `values`, by packing them along the `axis` dimension.\nGiven a list of tensors of shape `(A, B, C)`;\n\nif `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.\nif `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.\nEtc.\n\nFor example:\n\n```\n# 'x' is [1, 4]\n# 'y' is [2, 5]\n# 'z' is [3, 6]\npack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.\npack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]\n```\n\nThis is the opposite of `unpack`.", + "inputs": [ + { + "description": "Must be of same shape and type.", + "name": "values", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The packed tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor." + } + }, + { + "name": "Pad", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tpaddings", + "type": "type" + } + ], + "category": "Tensor", + "description": "This operation pads a `input` with zeros according to the `paddings` you\nspecify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the\nrank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\nhow many zeros to add before the contents of `input` in that dimension, and\n`paddings[D, 1]` indicates how many zeros to add after the contents of `input`\nin that dimension.\n\nThe padded size of each dimension D of the output is:\n\n`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\n\nFor example:\n\n```\n# 't' is [[1, 1], [2, 2]]\n# 'paddings' is [[1, 1], [2, 2]]\n# rank of 't' is 2\npad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]\n [0, 0, 1, 1, 0, 0]\n [0, 0, 2, 2, 0, 0]\n [0, 0, 0, 0, 0, 0]]\n```\n", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "paddings", + "typeAttr": "Tpaddings" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Pads a tensor with zeros." + } + }, + { + "name": "PadV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tpaddings", + "type": "type" + } + ], + "description": "This operation pads `input` according to the `paddings` and `constant_values`\nyou specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is\nthe rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates\nhow many padding values to add before the contents of `input` in that dimension,\nand `paddings[D, 1]` indicates how many padding values to add after the contents\nof `input` in that dimension. `constant_values` is a scalar tensor of the same\ntype as `input` that indicates the value to use for padding `input`.\n\nThe padded size of each dimension D of the output is:\n\n`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`\n\nFor example:\n\n```\n# 't' is [[1, 1], [2, 2]]\n# 'paddings' is [[1, 1], [2, 2]]\n# 'constant_values' is 0\n# rank of 't' is 2\npad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]\n [0, 0, 1, 1, 0, 0]\n [0, 0, 2, 2, 0, 0]\n [0, 0, 0, 0, 0, 0]]\n```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "paddings", + "typeAttr": "Tpaddings" + }, + { + "name": "constant_values", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Pads a tensor." + } + }, + { + "name": "PaddedBatchDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "Toutput_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "minimum": 1, + "name": "N", + "type": "int64" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of elements to accumulate in a\nbatch.", + "name": "batch_size", + "type": 9 + }, + { + "description": "A list of int64 tensors representing the desired padded shapes\nof the corresponding output components. These shapes may be partially\nspecified, using `-1` to indicate that a particular dimension should be\npadded to the maximum size of all batch elements.", + "name": "padded_shapes", + "numberAttr": "N", + "type": 9 + }, + { + "description": "A list of scalars containing the padding value to use for\neach of the outputs.", + "name": "padding_values", + "typeListAttr": "Toutput_types" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that batches and pads `batch_size` elements from the input." + } + }, + { + "name": "PaddedBatchDatasetV2", + "schema": { + "attributes": [ + { + "default": false, + "name": "parallel_copy", + "type": "boolean" + }, + { + "minimum": 1, + "name": "Toutput_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "minimum": 1, + "name": "N", + "type": "int64" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of elements to accumulate in a\nbatch.", + "name": "batch_size", + "type": 9 + }, + { + "description": "A list of int64 tensors representing the desired padded shapes\nof the corresponding output components. These shapes may be partially\nspecified, using `-1` to indicate that a particular dimension should be\npadded to the maximum size of all batch elements.", + "name": "padded_shapes", + "numberAttr": "N", + "type": 9 + }, + { + "description": "A list of scalars containing the padding value to use for\neach of the outputs.", + "name": "padding_values", + "typeListAttr": "Toutput_types" + }, + { + "description": "A scalar representing whether the last batch should be dropped in case its size\nis smaller than desired.", + "name": "drop_remainder", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that batches and pads `batch_size` elements from the input." + } + }, + { + "name": "PaddingFIFOQueue", + "schema": { + "attributes": [ + { + "description": "The type of each component in a value.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": [], + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1. In this case, the inputs' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time.", + "minimum": 0, + "name": "shapes", + "type": "shape[]" + }, + { + "default": -1, + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "name": "capacity", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "name": "shared_name", + "type": "string" + } + ], + "description": "Variable-size shapes are allowed by setting the corresponding shape dimensions\nto 0 in the shape attr. In this case DequeueMany will pad up to the maximum\nsize of any given element in the minibatch. See below for details.", + "outputs": [ + { + "description": "The handle to the queue.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "A queue that produces elements in first-in first-out order." + } + }, + { + "name": "PaddingFIFOQueueV2", + "schema": { + "attributes": [ + { + "description": "The type of each component in a value.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": [], + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types.\nShapes of fixed rank but variable size are allowed by setting\nany shape dimension to -1. In this case, the inputs' shape may vary along\nthe given dimension, and DequeueMany will pad the given dimension with\nzeros up to the maximum shape of all elements in the given batch.\nIf the length of this attr is 0, different queue elements may have\ndifferent ranks and shapes, but only one element may be dequeued at a time.", + "minimum": 0, + "name": "shapes", + "type": "shape[]" + }, + { + "default": -1, + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "name": "capacity", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "name": "shared_name", + "type": "string" + } + ], + "description": "Variable-size shapes are allowed by setting the corresponding shape dimensions\nto 0 in the shape attr. In this case DequeueMany will pad up to the maximum\nsize of any given element in the minibatch. See below for details.", + "outputs": [ + { + "description": "The handle to the queue.", + "name": "handle", + "type": 20 + } + ], + "summary": "A queue that produces elements in first-in first-out order." + } + }, + { + "name": "ParallelConcat", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "name": "T", + "type": "type" + }, + { + "description": "the final shape of the result; should be equal to the shapes of any input\nbut with the number of input values in the first dimension.", + "name": "shape", + "type": "shape" + } + ], + "description": "The input tensors are all required to have size 1 in the first dimension.\n\nFor example:\n\n```\n# 'x' is [[1, 4]]\n# 'y' is [[2, 5]]\n# 'z' is [[3, 6]]\nparallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.\n```\n\nThe difference between concat and parallel_concat is that concat requires all\nof the inputs be computed before the operation will begin but doesn't require\nthat the input shapes be known during graph construction. Parallel concat\nwill copy pieces of the input into the output as they become available, in\nsome situations this can provide a performance benefit.", + "inputs": [ + { + "description": "Tensors to be concatenated. All must have size 1 in the first dimension\nand same shape.", + "name": "values", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The concatenated tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Concatenates a list of `N` tensors along the first dimension." + } + }, + { + "name": "ParallelDynamicStitch", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "name": "T", + "type": "type" + } + ], + "description": "Builds a merged tensor such that\n\n```python\n merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]\n```\n\nFor example, if each `indices[m]` is scalar or vector, we have\n\n```python\n # Scalar indices:\n merged[indices[m], ...] = data[m][...]\n\n # Vector indices:\n merged[indices[m][i], ...] = data[m][i, ...]\n```\n\nEach `data[i].shape` must start with the corresponding `indices[i].shape`,\nand the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we\nmust have `data[i].shape = indices[i].shape + constant`. In terms of this\n`constant`, the output shape is\n\n merged.shape = [max(indices)] + constant\n\nValues may be merged in parallel, so if an index appears in both `indices[m][i]`\nand `indices[n][j]`, the result may be invalid. This differs from the normal\nDynamicStitch operator that defines the behavior in that case.\n\nFor example:\n\n```python\n indices[0] = 6\n indices[1] = [4, 1]\n indices[2] = [[5, 2], [0, 3]]\n data[0] = [61, 62]\n data[1] = [[41, 42], [11, 12]]\n data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]\n merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],\n [51, 52], [61, 62]]\n```\n\nThis method can be used to merge partitions created by `dynamic_partition`\nas illustrated on the following example:\n\n```python\n # Apply function (increments x_i) on elements for which a certain condition\n # apply (x_i != -1 in this example).\n x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])\n condition_mask=tf.not_equal(x,tf.constant(-1.))\n partitioned_data = tf.dynamic_partition(\n x, tf.cast(condition_mask, tf.int32) , 2)\n partitioned_data[1] = partitioned_data[1] + 1.0\n condition_indices = tf.dynamic_partition(\n tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)\n x = tf.dynamic_stitch(condition_indices, partitioned_data)\n # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain\n # unchanged.\n```\n\n
    \n\n
    ", + "inputs": [ + { + "name": "indices", + "numberAttr": "N", + "type": 3 + }, + { + "name": "data", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "merged", + "typeAttr": "T" + } + ], + "summary": "Interleave the values from the `data` tensors into a single tensor." + } + }, + { + "name": "ParallelInterleaveDataset", + "schema": { + "attributes": [ + { + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`.", + "name": "f", + "type": "function" + }, + { + "description": "Types of the elements of `other_arguments`.", + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "The resulting dataset is similar to the `InterleaveDataset`, with the exception\nthat if retrieving the next value from a dataset would cause the requester to\nblock, it will skip that input dataset. This dataset is especially useful\nwhen loading data from a variable-latency datastores (e.g. HDFS, GCS), as it\nallows the training step to proceed so long as some data is available.\n\n!! WARNING !! If the `sloppy` parameter is set to `True`, the operation of this\ndataset will not be deterministic!\n\nThis dataset has been superseded by `ParallelInterleaveDatasetV2`. New code\nshould use `ParallelInterleaveDatasetV2`.\n\nThe Python API `tf.data.experimental.parallel_interleave` creates instances of\nthis op. `tf.data.experimental.parallel_interleave` is a deprecated API.", + "inputs": [ + { + "description": "Dataset that produces a stream of arguments for the function `f`.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "Additional arguments to pass to `f` beyond those produced by `input_dataset`.\nEvaluated once when the dataset is instantiated.", + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "description": "Number of datasets (each created by applying `f` to the elements of\n`input_dataset`) among which the `ParallelInterleaveDataset` will cycle in a\nround-robin fashion.", + "name": "cycle_length", + "type": 9 + }, + { + "description": "Number of elements at a time to produce from each interleaved invocation of a\ndataset returned by `f`.", + "name": "block_length", + "type": 9 + }, + { + "description": "If `True`, return elements as they become available, even if that means returning\nthese elements in a non-deterministic order. Sloppy operation may result in better\nperformance in the presence of stragglers, but the dataset will still block if\nall of its open streams are blocked.\nIf `False`, always return elements in a deterministic order.", + "name": "sloppy", + "type": 10 + }, + { + "description": "The number of elements each iterator being interleaved should buffer (similar\nto the `.prefetch()` transformation for each interleaved iterator).", + "name": "buffer_output_elements", + "type": 9 + }, + { + "description": "Determines the number of iterators to prefetch, allowing buffers to warm up and\ndata to be pre-fetched without blocking the main thread.", + "name": "prefetch_input_elements", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "ParallelInterleaveDatasetV2", + "schema": { + "attributes": [ + { + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`.", + "name": "f", + "type": "function" + }, + { + "description": "Types of the elements of `other_arguments`.", + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": false, + "name": "sloppy", + "type": "boolean" + } + ], + "description": "The resulting dataset is similar to the `InterleaveDataset`, except that the\ndataset will fetch records from the interleaved datasets in parallel.\n\nThe `tf.data` Python API creates instances of this op from\n`Dataset.interleave()` when the `num_parallel_calls` parameter of that method\nis set to any value other than `None`.\n\nBy default, the output of this dataset will be deterministic, which may result\nin the dataset blocking if the next data item to be returned isn't available.\nIn order to avoid head-of-line blocking, one can set the\n`experimental_deterministic` parameter of `tf.data.Options` to `False`,\nwhich can improve performance at the expense of non-determinism.", + "inputs": [ + { + "description": "Dataset that produces a stream of arguments for the function `f`.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "Additional arguments to pass to `f` beyond those produced by `input_dataset`.\nEvaluated once when the dataset is instantiated.", + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "description": "Number of datasets (each created by applying `f` to the elements of\n`input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a\nround-robin fashion.", + "name": "cycle_length", + "type": 9 + }, + { + "description": "Number of elements at a time to produce from each interleaved invocation of a\ndataset returned by `f`.", + "name": "block_length", + "type": 9 + }, + { + "description": "Determines the number of threads that should be used for fetching data from\ninput datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE`\nconstant can be used to indicate that the level of parallelism should be autotuned.", + "name": "num_parallel_calls", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "ParallelInterleaveDatasetV3", + "schema": { + "attributes": [ + { + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`.", + "name": "f", + "type": "function" + }, + { + "default": "default", + "description": "A string indicating the op-level determinism to use. Deterministic controls\nwhether the interleave is allowed to return elements out of order if the next\nelement to be returned isn't available, but a later element is. Options are\n\"true\", \"false\", and \"default\". \"default\" indicates that determinism should be\ndecided by the `experimental_deterministic` parameter of `tf.data.Options`.", + "name": "deterministic", + "type": "string" + }, + { + "description": "Types of the elements of `other_arguments`.", + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "The resulting dataset is similar to the `InterleaveDataset`, except that the\ndataset will fetch records from the interleaved datasets in parallel.\n\nThe `tf.data` Python API creates instances of this op from\n`Dataset.interleave()` when the `num_parallel_calls` parameter of that method\nis set to any value other than `None`.\n\nBy default, the output of this dataset will be deterministic, which may result\nin the dataset blocking if the next data item to be returned isn't available.\nIn order to avoid head-of-line blocking, one can either set the `deterministic`\nattribute to \"false\", or leave it as \"default\" and set the\n`experimental_deterministic` parameter of `tf.data.Options` to `False`.\nThis can improve performance at the expense of non-determinism.", + "inputs": [ + { + "description": "Dataset that produces a stream of arguments for the function `f`.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "Additional arguments to pass to `f` beyond those produced by `input_dataset`.\nEvaluated once when the dataset is instantiated.", + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "description": "Number of datasets (each created by applying `f` to the elements of\n`input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a\nround-robin fashion.", + "name": "cycle_length", + "type": 9 + }, + { + "description": "Number of elements at a time to produce from each interleaved invocation of a\ndataset returned by `f`.", + "name": "block_length", + "type": 9 + }, + { + "description": "Determines the number of threads that should be used for fetching data from\ninput datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE`\nconstant can be used to indicate that the level of parallelism should be autotuned.", + "name": "num_parallel_calls", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "ParallelInterleaveDatasetV4", + "schema": { + "attributes": [ + { + "description": "A function mapping elements of `input_dataset`, concatenated with\n`other_arguments`, to a Dataset variant that contains elements matching\n`output_types` and `output_shapes`.", + "name": "f", + "type": "function" + }, + { + "default": "default", + "description": "A string indicating the op-level determinism to use. Deterministic controls\nwhether the interleave is allowed to return elements out of order if the next\nelement to be returned isn't available, but a later element is. Options are\n\"true\", \"false\", and \"default\". \"default\" indicates that determinism should be\ndecided by the `experimental_deterministic` parameter of `tf.data.Options`.", + "name": "deterministic", + "type": "string" + }, + { + "description": "Types of the elements of `other_arguments`.", + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "The resulting dataset is similar to the `InterleaveDataset`, except that the\ndataset will fetch records from the interleaved datasets in parallel.\n\nThe `tf.data` Python API creates instances of this op from\n`Dataset.interleave()` when the `num_parallel_calls` parameter of that method\nis set to any value other than `None`.\n\nBy default, the output of this dataset will be deterministic, which may result\nin the dataset blocking if the next data item to be returned isn't available.\nIn order to avoid head-of-line blocking, one can either set the `deterministic`\nattribute to \"false\", or leave it as \"default\" and set the\n`experimental_deterministic` parameter of `tf.data.Options` to `False`.\nThis can improve performance at the expense of non-determinism.", + "inputs": [ + { + "description": "Dataset that produces a stream of arguments for the function `f`.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "Additional arguments to pass to `f` beyond those produced by `input_dataset`.\nEvaluated once when the dataset is instantiated.", + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "description": "Number of datasets (each created by applying `f` to the elements of\n`input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a\nround-robin fashion.", + "name": "cycle_length", + "type": 9 + }, + { + "description": "Number of elements at a time to produce from each interleaved invocation of a\ndataset returned by `f`.", + "name": "block_length", + "type": 9 + }, + { + "description": "The number of elements each iterator being interleaved should buffer (similar\nto the `.prefetch()` transformation for each interleaved iterator).", + "name": "buffer_output_elements", + "type": 9 + }, + { + "description": "Determines the number of iterators to prefetch, allowing buffers to warm up and\ndata to be pre-fetched without blocking the main thread.", + "name": "prefetch_input_elements", + "type": 9 + }, + { + "description": "Determines the number of threads that should be used for fetching data from\ninput datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE`\nconstant can be used to indicate that the level of parallelism should be autotuned.", + "name": "num_parallel_calls", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "ParallelMapDataset", + "schema": { + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": true, + "name": "use_inter_op_parallelism", + "type": "boolean" + }, + { + "default": false, + "name": "sloppy", + "type": "boolean" + }, + { + "default": false, + "name": "preserve_cardinality", + "type": "boolean" + } + ], + "description": "Unlike a \"MapDataset\", which applies `f` sequentially, this dataset invokes up\nto `num_parallel_calls` copies of `f` in parallel.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "description": "The number of concurrent invocations of `f` that process\nelements from `input_dataset` in parallel.", + "name": "num_parallel_calls", + "type": 3 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "ParallelMapDatasetV2", + "schema": { + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": true, + "name": "use_inter_op_parallelism", + "type": "boolean" + }, + { + "default": "default", + "name": "deterministic", + "type": "string" + }, + { + "default": false, + "name": "preserve_cardinality", + "type": "boolean" + } + ], + "description": "Unlike a \"MapDataset\", which applies `f` sequentially, this dataset invokes up\nto `num_parallel_calls` copies of `f` in parallel.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + }, + { + "description": "The number of concurrent invocations of `f` that process\nelements from `input_dataset` in parallel.", + "name": "num_parallel_calls", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that applies `f` to the outputs of `input_dataset`." + } + }, + { + "name": "ParameterizedTruncatedNormal", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "scalar which applies to the entire output, or a vector of length shape[0] which\nstores the parameters for each batch.", + "inputs": [ + { + "description": "The shape of the output tensor. Batches are indexed by the 0th dimension.", + "name": "shape", + "typeAttr": "T" + }, + { + "description": "The mean parameter of each batch.", + "name": "means", + "typeAttr": "dtype" + }, + { + "description": "The standard deviation parameter of each batch. Must be greater than 0.", + "name": "stdevs", + "typeAttr": "dtype" + }, + { + "description": "The minimum cutoff. May be -infinity.", + "name": "minvals", + "typeAttr": "dtype" + }, + { + "description": "The maximum cutoff. May be +infinity, and must be more than the minval\nfor each batch.", + "name": "maxvals", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "description": "A matrix of shape num_batches x samples_per_batch, filled with random\ntruncated normal values using the parameters for each row.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs random values from a normal distribution. The parameters may each be a" + } + }, + { + "name": "ParseExample", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "Nsparse", + "type": "int64" + }, + { + "minimum": 0, + "name": "Ndense", + "type": "int64" + }, + { + "description": "A list of Nsparse types; the data types of data in each Feature\ngiven in sparse_keys.\nCurrently the ParseExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "sparse_types", + "type": "type[]" + }, + { + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "Tdense", + "type": "type[]" + }, + { + "description": "A list of Ndense shapes; the shapes of data in each Feature\ngiven in dense_keys.\nThe number of elements in the Feature corresponding to dense_key[j]\nmust always equal dense_shapes[j].NumEntries().\nIf dense_shapes[j] == (D0, D1, ..., DN) then the shape of output\nTensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):\nThe dense outputs are just the inputs row-stacked by batch.\nThis works for dense_shapes[j] = (-1, D1, ..., DN). In this case\nthe shape of the output Tensor dense_values[j] will be\n(|serialized|, M, D1, .., DN), where M is the maximum number of blocks\nof elements of length D1 * .... * DN, across all minibatch entries\nin the input. Any minibatch entry with less than M blocks of elements of\nlength D1 * ... * DN will be padded with the corresponding default_value\nscalar element along the second dimension.", + "minimum": 0, + "name": "dense_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A vector containing a batch of binary serialized Example protos.", + "name": "serialized", + "type": 7 + }, + { + "description": "A vector containing the names of the serialized protos.\nMay contain, for example, table key (descriptive) names for the\ncorresponding serialized protos. These are purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty vector if no names are available.\nIf non-empty, this vector must be the same length as \"serialized\".", + "name": "names", + "type": 7 + }, + { + "description": "A list of Nsparse string Tensors (scalars).\nThe keys expected in the Examples' features associated with sparse values.", + "name": "sparse_keys", + "numberAttr": "Nsparse", + "type": 7 + }, + { + "description": "A list of Ndense string Tensors (scalars).\nThe keys expected in the Examples' features associated with dense values.", + "name": "dense_keys", + "numberAttr": "Ndense", + "type": 7 + }, + { + "description": "A list of Ndense Tensors (some may be empty).\ndense_defaults[j] provides default values\nwhen the example's feature_map lacks dense_key[j]. If an empty Tensor is\nprovided for dense_defaults[j], then the Feature dense_keys[j] is required.\nThe input type is inferred from dense_defaults[j], even when it's empty.\nIf dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,\nthen the shape of dense_defaults[j] must match that of dense_shapes[j].\nIf dense_shapes[j] has an undefined major dimension (variable strides dense\nfeature), dense_defaults[j] must contain a single element:\nthe padding element.", + "name": "dense_defaults", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "sparse_indices", + "numberAttr": "Nsparse", + "type": 9 + }, + { + "name": "sparse_values", + "typeListAttr": "sparse_types" + }, + { + "name": "sparse_shapes", + "numberAttr": "Nsparse", + "type": 9 + }, + { + "name": "dense_values", + "typeListAttr": "Tdense" + } + ], + "summary": "Transforms a vector of brain.Example protos (as strings) into typed tensors." + } + }, + { + "name": "ParseExampleDataset", + "schema": { + "attributes": [ + { + "description": "A list of string keys in the examples features.\nThe results for these keys will be returned as `SparseTensor` objects.", + "minimum": 0, + "name": "sparse_keys", + "type": "string[]" + }, + { + "description": "A list of Ndense string Tensors (scalars).\nThe keys expected in the Examples features associated with dense values.", + "minimum": 0, + "name": "dense_keys", + "type": "string[]" + }, + { + "description": "A list of `DTypes` of the same length as `sparse_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported. Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "sparse_types", + "type": "type[]" + }, + { + "description": "A list of DTypes of the same length as `dense_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported.\n Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "Tdense", + "type": "type[]" + }, + { + "description": "List of tuples with the same length as `dense_keys`.\nThe shape of the data for each dense feature referenced by `dense_keys`.\nRequired for any input tensors identified by `dense_keys`. Must be\neither fully defined, or may contain an unknown first dimension.\nAn unknown first dimension means the feature is treated as having\na variable number of blocks, and the output shape along this dimension\nis considered unknown at graph build time. Padding is applied for\nminibatch elements smaller than the maximum number of blocks for the\ngiven feature along this dimension.", + "minimum": 0, + "name": "dense_shapes", + "type": "shape[]" + }, + { + "description": "The type list for the return values.", + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "description": "The list of shapes being produced.", + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": false, + "name": "sloppy", + "type": "boolean" + }, + { + "default": [], + "minimum": 0, + "name": "ragged_keys", + "type": "string[]" + }, + { + "default": [], + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "ragged_value_types", + "type": "type[]" + }, + { + "default": [], + "description": "Must be one of the following: `int32`, `int64`.", + "minimum": 0, + "name": "ragged_split_types", + "type": "type[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "num_parallel_calls", + "type": 9 + }, + { + "description": "A dict mapping string keys to `Tensor`s.\nThe keys of the dict must match the dense_keys of the feature.", + "name": "dense_defaults", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features." + } + }, + { + "name": "ParseExampleDatasetV2", + "schema": { + "attributes": [ + { + "description": "A list of string keys in the examples features.\nThe results for these keys will be returned as `SparseTensor` objects.", + "minimum": 0, + "name": "sparse_keys", + "type": "string[]" + }, + { + "description": "A list of Ndense string Tensors (scalars).\nThe keys expected in the Examples features associated with dense values.", + "minimum": 0, + "name": "dense_keys", + "type": "string[]" + }, + { + "description": "A list of `DTypes` of the same length as `sparse_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported. Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "sparse_types", + "type": "type[]" + }, + { + "description": "A list of DTypes of the same length as `dense_keys`.\nOnly `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),\nand `tf.string` (`BytesList`) are supported.\n Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "Tdense", + "type": "type[]" + }, + { + "description": "List of tuples with the same length as `dense_keys`.\nThe shape of the data for each dense feature referenced by `dense_keys`.\nRequired for any input tensors identified by `dense_keys`. Must be\neither fully defined, or may contain an unknown first dimension.\nAn unknown first dimension means the feature is treated as having\na variable number of blocks, and the output shape along this dimension\nis considered unknown at graph build time. Padding is applied for\nminibatch elements smaller than the maximum number of blocks for the\ngiven feature along this dimension.", + "minimum": 0, + "name": "dense_shapes", + "type": "shape[]" + }, + { + "description": "The type list for the return values.", + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "description": "The list of shapes being produced.", + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": "default", + "description": "A string indicating the op-level determinism to use. Deterministic controls\nwhether the dataset is allowed to return elements out of order if the next\nelement to be returned isn't available, but a later element is. Options are\n\"true\", \"false\", and \"default\". \"default\" indicates that determinism should be\ndecided by the `experimental_deterministic` parameter of `tf.data.Options`.", + "name": "deterministic", + "type": "string" + }, + { + "default": [], + "minimum": 0, + "name": "ragged_keys", + "type": "string[]" + }, + { + "default": [], + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "ragged_value_types", + "type": "type[]" + }, + { + "default": [], + "description": "Must be one of the following: `int32`, `int64`.", + "minimum": 0, + "name": "ragged_split_types", + "type": "type[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "num_parallel_calls", + "type": 9 + }, + { + "description": "A dict mapping string keys to `Tensor`s.\nThe keys of the dict must match the dense_keys of the feature.", + "name": "dense_defaults", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features." + } + }, + { + "name": "ParseExampleV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "Tdense", + "type": "type[]" + }, + { + "description": "The number of sparse keys.", + "minimum": 0, + "name": "num_sparse", + "type": "int64" + }, + { + "description": "A list of `num_sparse` types; the data types of data in each Feature\ngiven in sparse_keys.\nCurrently the ParseExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "sparse_types", + "type": "type[]" + }, + { + "description": "A list of `num_ragged` types; the data types of data in each Feature\ngiven in ragged_keys (where `num_ragged = sparse_keys.size()`).\nCurrently the ParseExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "ragged_value_types", + "type": "type[]" + }, + { + "description": "A list of `num_ragged` types; the data types of row_splits in each Feature\ngiven in ragged_keys (where `num_ragged = sparse_keys.size()`).\nMay be DT_INT32 or DT_INT64. Must be one of the following: `int32`, `int64`.", + "minimum": 0, + "name": "ragged_split_types", + "type": "type[]" + }, + { + "description": "A list of `num_dense` shapes; the shapes of data in each Feature\ngiven in dense_keys (where `num_dense = dense_keys.size()`).\nThe number of elements in the Feature corresponding to dense_key[j]\nmust always equal dense_shapes[j].NumEntries().\nIf dense_shapes[j] == (D0, D1, ..., DN) then the shape of output\nTensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):\nThe dense outputs are just the inputs row-stacked by batch.\nThis works for dense_shapes[j] = (-1, D1, ..., DN). In this case\nthe shape of the output Tensor dense_values[j] will be\n(|serialized|, M, D1, .., DN), where M is the maximum number of blocks\nof elements of length D1 * .... * DN, across all minibatch entries\nin the input. Any minibatch entry with less than M blocks of elements of\nlength D1 * ... * DN will be padded with the corresponding default_value\nscalar element along the second dimension.", + "minimum": 0, + "name": "dense_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A scalar or vector containing binary serialized Example protos.", + "name": "serialized", + "type": 7 + }, + { + "description": "A tensor containing the names of the serialized protos.\nCorresponds 1:1 with the `serialized` tensor.\nMay contain, for example, table key (descriptive) names for the\ncorresponding serialized protos. These are purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty vector if no names are available.\nIf non-empty, this tensor must have the same shape as \"serialized\".", + "name": "names", + "type": 7 + }, + { + "description": "Vector of strings.\nThe keys expected in the Examples' features associated with sparse values.", + "name": "sparse_keys", + "type": 7 + }, + { + "description": "Vector of strings.\nThe keys expected in the Examples' features associated with dense values.", + "name": "dense_keys", + "type": 7 + }, + { + "description": "Vector of strings.\nThe keys expected in the Examples' features associated with ragged values.", + "name": "ragged_keys", + "type": 7 + }, + { + "description": "A list of Tensors (some may be empty). Corresponds 1:1 with `dense_keys`.\ndense_defaults[j] provides default values\nwhen the example's feature_map lacks dense_key[j]. If an empty Tensor is\nprovided for dense_defaults[j], then the Feature dense_keys[j] is required.\nThe input type is inferred from dense_defaults[j], even when it's empty.\nIf dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,\nthen the shape of dense_defaults[j] must match that of dense_shapes[j].\nIf dense_shapes[j] has an undefined major dimension (variable strides dense\nfeature), dense_defaults[j] must contain a single element:\nthe padding element.", + "name": "dense_defaults", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "sparse_indices", + "numberAttr": "num_sparse", + "type": 9 + }, + { + "name": "sparse_values", + "typeListAttr": "sparse_types" + }, + { + "name": "sparse_shapes", + "numberAttr": "num_sparse", + "type": 9 + }, + { + "name": "dense_values", + "typeListAttr": "Tdense" + }, + { + "name": "ragged_values", + "typeListAttr": "ragged_value_types" + }, + { + "name": "ragged_row_splits", + "typeListAttr": "ragged_split_types" + } + ], + "summary": "Transforms a vector of tf.Example protos (as strings) into typed tensors." + } + }, + { + "name": "ParseSequenceExample", + "schema": { + "attributes": [ + { + "description": "A vector listing the\nFeatureList keys which may be missing from the SequenceExamples. If the\nassociated FeatureList is missing, it is treated as empty. By default,\nany FeatureList not listed in this vector must exist in the SequenceExamples.", + "minimum": 0, + "name": "feature_list_dense_missing_assumed_empty", + "type": "string[]" + }, + { + "description": "A list of Ncontext_sparse string Tensors (scalars).\nThe keys expected in the Examples' features associated with context_sparse\nvalues.", + "minimum": 0, + "name": "context_sparse_keys", + "type": "string[]" + }, + { + "description": "A list of Ncontext_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples' context features associated with\ndense values.", + "minimum": 0, + "name": "context_dense_keys", + "type": "string[]" + }, + { + "description": "A list of Nfeature_list_sparse string Tensors\n(scalars). The keys expected in the FeatureLists associated with sparse\nvalues.", + "minimum": 0, + "name": "feature_list_sparse_keys", + "type": "string[]" + }, + { + "description": "A list of Nfeature_list_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples' feature_lists associated\nwith lists of dense values.", + "minimum": 0, + "name": "feature_list_dense_keys", + "type": "string[]" + }, + { + "default": 0, + "minimum": 0, + "name": "Ncontext_sparse", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "Ncontext_dense", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "Nfeature_list_sparse", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "Nfeature_list_dense", + "type": "int64" + }, + { + "default": [], + "description": "A list of Ncontext_sparse types; the data types of data in\neach context Feature given in context_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "context_sparse_types", + "type": "type[]" + }, + { + "default": [], + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "Tcontext_dense", + "type": "type[]" + }, + { + "default": [], + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "feature_list_dense_types", + "type": "type[]" + }, + { + "default": [], + "description": "A list of Ncontext_dense shapes; the shapes of data in\neach context Feature given in context_dense_keys.\nThe number of elements in the Feature corresponding to context_dense_key[j]\nmust always equal context_dense_shapes[j].NumEntries().\nThe shape of context_dense_values[j] will match context_dense_shapes[j].", + "minimum": 0, + "name": "context_dense_shapes", + "type": "shape[]" + }, + { + "default": [], + "description": "A list of Nfeature_list_sparse types; the data types\nof data in each FeatureList given in feature_list_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "feature_list_sparse_types", + "type": "type[]" + }, + { + "default": [], + "description": "A list of Nfeature_list_dense shapes; the shapes of\ndata in each FeatureList given in feature_list_dense_keys.\nThe shape of each Feature in the FeatureList corresponding to\nfeature_list_dense_key[j] must always equal\nfeature_list_dense_shapes[j].NumEntries().", + "minimum": 0, + "name": "feature_list_dense_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A vector containing binary serialized SequenceExample protos.", + "name": "serialized", + "type": 7 + }, + { + "description": "A vector containing the names of the serialized protos.\nMay contain, for example, table key (descriptive) name for the\ncorresponding serialized proto. This is purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty vector if no name is available.", + "name": "debug_name", + "type": 7 + }, + { + "description": "A list of Ncontext_dense Tensors (some may be empty).\ncontext_dense_defaults[j] provides default values\nwhen the SequenceExample's context map lacks context_dense_key[j].\nIf an empty Tensor is provided for context_dense_defaults[j],\nthen the Feature context_dense_keys[j] is required.\nThe input type is inferred from context_dense_defaults[j], even when it's\nempty. If context_dense_defaults[j] is not empty, its shape must match\ncontext_dense_shapes[j].", + "name": "context_dense_defaults", + "typeListAttr": "Tcontext_dense" + } + ], + "outputs": [ + { + "name": "context_sparse_indices", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_sparse_values", + "typeListAttr": "context_sparse_types" + }, + { + "name": "context_sparse_shapes", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_dense_values", + "typeListAttr": "Tcontext_dense" + }, + { + "name": "feature_list_sparse_indices", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_sparse_values", + "typeListAttr": "feature_list_sparse_types" + }, + { + "name": "feature_list_sparse_shapes", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_dense_values", + "typeListAttr": "feature_list_dense_types" + }, + { + "name": "feature_list_dense_lengths", + "numberAttr": "Nfeature_list_dense", + "type": 9 + } + ], + "summary": "Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors." + } + }, + { + "name": "ParseSequenceExampleV2", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "Ncontext_sparse", + "type": "int64" + }, + { + "default": [], + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "Tcontext_dense", + "type": "type[]" + }, + { + "default": [], + "description": "A list of Ncontext_sparse types; the data types of data in\neach context Feature given in context_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "context_sparse_types", + "type": "type[]" + }, + { + "default": [], + "description": "RaggedTensor.value dtypes for the ragged context features. Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "context_ragged_value_types", + "type": "type[]" + }, + { + "default": [], + "description": "RaggedTensor.row_split dtypes for the ragged context features. Must be one of the following: `int32`, `int64`.", + "minimum": 0, + "name": "context_ragged_split_types", + "type": "type[]" + }, + { + "default": [], + "description": "A list of Ncontext_dense shapes; the shapes of data in\neach context Feature given in context_dense_keys.\nThe number of elements in the Feature corresponding to context_dense_key[j]\nmust always equal context_dense_shapes[j].NumEntries().\nThe shape of context_dense_values[j] will match context_dense_shapes[j].", + "minimum": 0, + "name": "context_dense_shapes", + "type": "shape[]" + }, + { + "default": 0, + "minimum": 0, + "name": "Nfeature_list_sparse", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "Nfeature_list_dense", + "type": "int64" + }, + { + "default": [], + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "feature_list_dense_types", + "type": "type[]" + }, + { + "default": [], + "description": "A list of Nfeature_list_sparse types; the data types\nof data in each FeatureList given in feature_list_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "feature_list_sparse_types", + "type": "type[]" + }, + { + "default": [], + "description": "RaggedTensor.value dtypes for the ragged FeatureList features. Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "feature_list_ragged_value_types", + "type": "type[]" + }, + { + "default": [], + "description": "RaggedTensor.row_split dtypes for the ragged FeatureList features. Must be one of the following: `int32`, `int64`.", + "minimum": 0, + "name": "feature_list_ragged_split_types", + "type": "type[]" + }, + { + "default": [], + "description": "A list of Nfeature_list_dense shapes; the shapes of\ndata in each FeatureList given in feature_list_dense_keys.\nThe shape of each Feature in the FeatureList corresponding to\nfeature_list_dense_key[j] must always equal\nfeature_list_dense_shapes[j].NumEntries().", + "minimum": 0, + "name": "feature_list_dense_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A scalar or vector containing binary serialized SequenceExample protos.", + "name": "serialized", + "type": 7 + }, + { + "description": "A scalar or vector containing the names of the serialized protos.\nMay contain, for example, table key (descriptive) name for the\ncorresponding serialized proto. This is purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty vector if no name is available.", + "name": "debug_name", + "type": 7 + }, + { + "description": "The keys expected in the Examples' features associated with context_sparse\nvalues.", + "name": "context_sparse_keys", + "type": 7 + }, + { + "description": "The keys expected in the SequenceExamples' context features associated with\ndense values.", + "name": "context_dense_keys", + "type": 7 + }, + { + "description": "The keys expected in the Examples' features associated with context_ragged\nvalues.", + "name": "context_ragged_keys", + "type": 7 + }, + { + "description": "The keys expected in the FeatureLists associated with sparse values.", + "name": "feature_list_sparse_keys", + "type": 7 + }, + { + "description": "The keys expected in the SequenceExamples' feature_lists associated\nwith lists of dense values.", + "name": "feature_list_dense_keys", + "type": 7 + }, + { + "description": "The keys expected in the FeatureLists associated with ragged values.", + "name": "feature_list_ragged_keys", + "type": 7 + }, + { + "description": "A vector corresponding 1:1 with feature_list_dense_keys, indicating which\nfeatures may be missing from the SequenceExamples. If the associated\nFeatureList is missing, it is treated as empty.", + "name": "feature_list_dense_missing_assumed_empty", + "type": 10 + }, + { + "description": "A list of Ncontext_dense Tensors (some may be empty).\ncontext_dense_defaults[j] provides default values\nwhen the SequenceExample's context map lacks context_dense_key[j].\nIf an empty Tensor is provided for context_dense_defaults[j],\nthen the Feature context_dense_keys[j] is required.\nThe input type is inferred from context_dense_defaults[j], even when it's\nempty. If context_dense_defaults[j] is not empty, its shape must match\ncontext_dense_shapes[j].", + "name": "context_dense_defaults", + "typeListAttr": "Tcontext_dense" + } + ], + "outputs": [ + { + "name": "context_sparse_indices", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_sparse_values", + "typeListAttr": "context_sparse_types" + }, + { + "name": "context_sparse_shapes", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_dense_values", + "typeListAttr": "Tcontext_dense" + }, + { + "name": "context_ragged_values", + "typeListAttr": "context_ragged_value_types" + }, + { + "name": "context_ragged_row_splits", + "typeListAttr": "context_ragged_split_types" + }, + { + "name": "feature_list_sparse_indices", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_sparse_values", + "typeListAttr": "feature_list_sparse_types" + }, + { + "name": "feature_list_sparse_shapes", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_dense_values", + "typeListAttr": "feature_list_dense_types" + }, + { + "name": "feature_list_dense_lengths", + "numberAttr": "Nfeature_list_dense", + "type": 9 + }, + { + "name": "feature_list_ragged_values", + "typeListAttr": "feature_list_ragged_value_types" + }, + { + "name": "feature_list_ragged_outer_splits", + "typeListAttr": "feature_list_ragged_split_types" + }, + { + "name": "feature_list_ragged_inner_splits", + "typeListAttr": "feature_list_ragged_split_types" + } + ], + "summary": "Transforms a vector of tf.io.SequenceExample protos (as strings) into\ntyped tensors." + } + }, + { + "name": "ParseSingleExample", + "schema": { + "attributes": [ + { + "description": "The number of sparse features to be parsed from the example. This\nmust match the lengths of `sparse_keys` and `sparse_types`.", + "minimum": 0, + "name": "num_sparse", + "type": "int64" + }, + { + "description": "A list of `num_sparse` strings.\nThe keys expected in the Examples' features associated with sparse values.", + "minimum": 0, + "name": "sparse_keys", + "type": "string[]" + }, + { + "description": "The keys expected in the Examples' features associated with dense\nvalues.", + "minimum": 0, + "name": "dense_keys", + "type": "string[]" + }, + { + "description": "A list of `num_sparse` types; the data types of data in each\nFeature given in sparse_keys.\nCurrently the ParseSingleExample op supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "sparse_types", + "type": "type[]" + }, + { + "description": "The data types of data in each Feature given in dense_keys.\nThe length of this list must match the length of `dense_keys`.\nCurrently the ParseSingleExample op supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "Tdense", + "type": "type[]" + }, + { + "description": "The shapes of data in each Feature given in dense_keys.\nThe length of this list must match the length of `dense_keys`. The\nnumber of elements in the Feature corresponding to dense_key[j] must\nalways equal dense_shapes[j].NumEntries(). If dense_shapes[j] ==\n(D0, D1, ..., DN) then the shape of output Tensor dense_values[j]\nwill be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1,\n..., DN), the shape of the output Tensor dense_values[j] will be (M,\nD1, .., DN), where M is the number of blocks of elements of length\nD1 * .... * DN, in the input.", + "minimum": 0, + "name": "dense_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A vector containing a batch of binary serialized Example protos.", + "name": "serialized", + "type": 7 + }, + { + "description": "A list of Tensors (some may be empty), whose length matches\nthe length of `dense_keys`. dense_defaults[j] provides default values\nwhen the example's feature_map lacks dense_key[j]. If an empty Tensor is\nprovided for dense_defaults[j], then the Feature dense_keys[j] is required.\nThe input type is inferred from dense_defaults[j], even when it's empty.\nIf dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,\nthen the shape of dense_defaults[j] must match that of dense_shapes[j].\nIf dense_shapes[j] has an undefined major dimension (variable strides dense\nfeature), dense_defaults[j] must contain a single element:\nthe padding element.", + "name": "dense_defaults", + "typeListAttr": "Tdense" + } + ], + "outputs": [ + { + "name": "sparse_indices", + "numberAttr": "num_sparse", + "type": 9 + }, + { + "name": "sparse_values", + "typeListAttr": "sparse_types" + }, + { + "name": "sparse_shapes", + "numberAttr": "num_sparse", + "type": 9 + }, + { + "name": "dense_values", + "typeListAttr": "Tdense" + } + ], + "summary": "Transforms a tf.Example proto (as a string) into typed tensors." + } + }, + { + "name": "ParseSingleSequenceExample", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "Ncontext_sparse", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "Ncontext_dense", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "Nfeature_list_sparse", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "Nfeature_list_dense", + "type": "int64" + }, + { + "default": [], + "description": "A list of Ncontext_sparse types; the data types of data in\neach context Feature given in context_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "context_sparse_types", + "type": "type[]" + }, + { + "default": [], + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "Tcontext_dense", + "type": "type[]" + }, + { + "default": [], + "description": "Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "feature_list_dense_types", + "type": "type[]" + }, + { + "default": [], + "description": "A list of Ncontext_dense shapes; the shapes of data in\neach context Feature given in context_dense_keys.\nThe number of elements in the Feature corresponding to context_dense_key[j]\nmust always equal context_dense_shapes[j].NumEntries().\nThe shape of context_dense_values[j] will match context_dense_shapes[j].", + "minimum": 0, + "name": "context_dense_shapes", + "type": "shape[]" + }, + { + "default": [], + "description": "A list of Nfeature_list_sparse types; the data types\nof data in each FeatureList given in feature_list_sparse_keys.\nCurrently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),\nDT_INT64 (Int64List), and DT_STRING (BytesList). Must be one of the following: `float32`, `int64`, `string`.", + "minimum": 0, + "name": "feature_list_sparse_types", + "type": "type[]" + }, + { + "default": [], + "description": "A list of Nfeature_list_dense shapes; the shapes of\ndata in each FeatureList given in feature_list_dense_keys.\nThe shape of each Feature in the FeatureList corresponding to\nfeature_list_dense_key[j] must always equal\nfeature_list_dense_shapes[j].NumEntries().", + "minimum": 0, + "name": "feature_list_dense_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "A scalar containing a binary serialized SequenceExample proto.", + "name": "serialized", + "type": 7 + }, + { + "description": "A vector listing the\nFeatureList keys which may be missing from the SequenceExample. If the\nassociated FeatureList is missing, it is treated as empty. By default,\nany FeatureList not listed in this vector must exist in the SequenceExample.", + "name": "feature_list_dense_missing_assumed_empty", + "type": 7 + }, + { + "description": "A list of Ncontext_sparse string Tensors (scalars).\nThe keys expected in the Examples' features associated with context_sparse\nvalues.", + "name": "context_sparse_keys", + "numberAttr": "Ncontext_sparse", + "type": 7 + }, + { + "description": "A list of Ncontext_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples' context features associated with\ndense values.", + "name": "context_dense_keys", + "numberAttr": "Ncontext_dense", + "type": 7 + }, + { + "description": "A list of Nfeature_list_sparse string Tensors\n(scalars). The keys expected in the FeatureLists associated with sparse\nvalues.", + "name": "feature_list_sparse_keys", + "numberAttr": "Nfeature_list_sparse", + "type": 7 + }, + { + "description": "A list of Nfeature_list_dense string Tensors (scalars).\nThe keys expected in the SequenceExamples' feature_lists associated\nwith lists of dense values.", + "name": "feature_list_dense_keys", + "numberAttr": "Nfeature_list_dense", + "type": 7 + }, + { + "description": "A list of Ncontext_dense Tensors (some may be empty).\ncontext_dense_defaults[j] provides default values\nwhen the SequenceExample's context map lacks context_dense_key[j].\nIf an empty Tensor is provided for context_dense_defaults[j],\nthen the Feature context_dense_keys[j] is required.\nThe input type is inferred from context_dense_defaults[j], even when it's\nempty. If context_dense_defaults[j] is not empty, its shape must match\ncontext_dense_shapes[j].", + "name": "context_dense_defaults", + "typeListAttr": "Tcontext_dense" + }, + { + "description": "A scalar containing the name of the serialized proto.\nMay contain, for example, table key (descriptive) name for the\ncorresponding serialized proto. This is purely useful for debugging\npurposes, and the presence of values here has no effect on the output.\nMay also be an empty scalar if no name is available.", + "name": "debug_name", + "type": 7 + } + ], + "outputs": [ + { + "name": "context_sparse_indices", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_sparse_values", + "typeListAttr": "context_sparse_types" + }, + { + "name": "context_sparse_shapes", + "numberAttr": "Ncontext_sparse", + "type": 9 + }, + { + "name": "context_dense_values", + "typeListAttr": "Tcontext_dense" + }, + { + "name": "feature_list_sparse_indices", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_sparse_values", + "typeListAttr": "feature_list_sparse_types" + }, + { + "name": "feature_list_sparse_shapes", + "numberAttr": "Nfeature_list_sparse", + "type": 9 + }, + { + "name": "feature_list_dense_values", + "typeListAttr": "feature_list_dense_types" + } + ], + "summary": "Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors." + } + }, + { + "name": "ParseTensor", + "schema": { + "attributes": [ + { + "description": "The type of the serialized tensor. The provided type must match the\ntype of the serialized tensor and no implicit conversion will take place.", + "name": "out_type", + "type": "type" + } + ], + "inputs": [ + { + "description": "A scalar string containing a serialized TensorProto proto.", + "name": "serialized", + "type": 7 + } + ], + "outputs": [ + { + "description": "A Tensor of type `out_type`.", + "name": "output", + "typeAttr": "out_type" + } + ], + "summary": "Transforms a serialized tensorflow.TensorProto proto into a Tensor." + } + }, + { + "name": "PartitionedCall", + "schema": { + "attributes": [ + { + "description": "A list of input types.", + "minimum": 0, + "name": "Tin", + "type": "type[]" + }, + { + "description": "A list of output types.", + "minimum": 0, + "name": "Tout", + "type": "type[]" + }, + { + "description": " A function that takes 'args', a list of tensors, and returns 'output',\n another list of tensors. Input and output types are specified by 'Tin'\n and 'Tout'. The function body of f will be placed and partitioned across\n devices, setting this op apart from the regular Call op.", + "name": "f", + "type": "function" + }, + { + "default": "", + "name": "config", + "type": "string" + }, + { + "default": "", + "name": "config_proto", + "type": "string" + }, + { + "default": "", + "name": "executor_type", + "type": "string" + } + ], + "inputs": [ + { + "description": "A list of input tensors.", + "name": "args", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "description": "A list of return values.", + "name": "output", + "typeListAttr": "Tout" + } + ], + "summary": "returns `f(inputs)`, where `f`'s body is placed and partitioned." + } + }, + { + "name": "Placeholder", + "schema": { + "attributes": [ + { + "description": "The type of elements in the tensor.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "description": "(Optional) The shape of the tensor. If the shape has 0 dimensions, the\nshape is unconstrained.", + "name": "shape", + "type": "shape" + } + ], + "description": "N.B. This operation will fail with an error if it is executed. It is\nintended as a way to represent a value that will always be fed, and to\nprovide attrs that enable the fed value to be checked at runtime.", + "outputs": [ + { + "description": "A placeholder tensor that must be replaced using the feed mechanism.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "A placeholder op for a value that will be fed into the computation." + } + }, + { + "name": "PlaceholderV2", + "schema": { + "attributes": [ + { + "description": "The type of elements in the tensor.", + "name": "dtype", + "type": "type" + }, + { + "description": "The shape of the tensor. The shape can be any partially-specified\nshape. To be unconstrained, pass in a shape with unknown rank.", + "name": "shape", + "type": "shape" + } + ], + "description": "N.B. This operation will fail with an error if it is executed. It is\nintended as a way to represent a value that will always be fed, and to\nprovide attrs that enable the fed value to be checked at runtime.", + "outputs": [ + { + "description": "A placeholder tensor that must be replaced using the feed mechanism.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "A placeholder op for a value that will be fed into the computation." + } + }, + { + "name": "PlaceholderWithDefault", + "schema": { + "attributes": [ + { + "description": "The type of elements in the tensor.", + "name": "dtype", + "type": "type" + }, + { + "description": "The (possibly partial) shape of the tensor.", + "name": "shape", + "type": "shape" + } + ], + "inputs": [ + { + "description": "The default value to produce when `output` is not fed.", + "name": "input", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "description": "A placeholder tensor that defaults to `input` if it is not fed.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "A placeholder op that passes through `input` when its output is not fed." + } + }, + { + "name": "Polygamma", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "The polygamma function is defined as:\n\n\n\\\\(\\psi^{(a)}(x) = \\frac{d^a}{dx^a} \\psi(x)\\\\)\n\nwhere \\\\(\\psi(x)\\\\) is the digamma function.\nThe polygamma function is defined only for non-negative integer orders \\\\a\\\\.", + "inputs": [ + { + "name": "a", + "typeAttr": "T" + }, + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Compute the polygamma function \\\\(\\psi^{(n)}(x)\\\\)." + } + }, + { + "name": "PopulationCount", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "For each entry in `x`, calculates the number of `1` (on) bits in the binary\nrepresentation of that entry.\n\n**NOTE**: It is more efficient to first `tf.bitcast` your tensors into\n`int32` or `int64` and perform the bitcount on the result, than to feed in\n8- or 16-bit inputs and then aggregate the resulting counts.", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "type": 4 + } + ], + "summary": "Computes element-wise population count (a.k.a. popcount, bitsum, bitcount)." + } + }, + { + "name": "Pow", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float32`, `float16`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for\ncorresponding elements in `x` and `y`. For example:\n\n```\n# tensor 'x' is [[2, 2]], [3, 3]]\n# tensor 'y' is [[8, 16], [2, 3]]\ntf.pow(x, y) ==> [[256, 65536], [9, 27]]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Computes the power of one value to another." + } + }, + { + "name": "PrefetchDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": 0, + "name": "slack_period", + "type": "int64" + }, + { + "default": true, + "name": "legacy_autotune", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "The maximum number of elements to buffer in an iterator over\nthis dataset.", + "name": "buffer_size", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that asynchronously prefetches elements from `input_dataset`." + } + }, + { + "name": "Prelinearize", + "schema": { + "attributes": [ + { + "description": "The type of elements in the tensor.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "description": "The shape of the tensor.", + "name": "shape", + "type": "shape" + }, + { + "default": [], + "description": "A vector holding the requested layout in minor-to-major sequence. If a layout\nattribute is passed but its values are all -1 the layout will be computed by\nthe infeed operation.", + "name": "layout", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "A tensor that will be linearized.", + "name": "input", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "type": 21 + } + ], + "summary": "An op which linearizes one Tensor value to an opaque variant tensor." + } + }, + { + "name": "PrelinearizeTuple", + "schema": { + "attributes": [ + { + "description": "The element types of each element in `inputs`.", + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "description": "The shapes of each tensor in `inputs`.", + "name": "shapes", + "type": "shape[]" + }, + { + "default": [], + "description": "A vector holding the requested layout in minor-to-major sequence for all the\ntuple shapes in the order the shapes appear in the \"shapes\" input. The layout\nelements for a sub-shape can be set to -1 in which case the corresponding layout\nwill be computed by the infeed operation.", + "name": "layouts", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "A list of tensors that will be provided using the infeed mechanism.", + "name": "inputs", + "typeListAttr": "dtypes" + } + ], + "outputs": [ + { + "name": "output", + "type": 21 + } + ], + "summary": "An op which linearizes multiple Tensor values to an opaque variant tensor." + } + }, + { + "name": "PreventGradient", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "", + "description": "Will be printed in the error when anyone tries to differentiate\nthis operation.", + "name": "message", + "type": "string" + } + ], + "description": "When executed in a graph, this op outputs its input tensor as-is.\n\nWhen building ops to compute gradients, the TensorFlow gradient system\nwill return an error when trying to lookup the gradient of this op,\nbecause no gradient must ever be registered for this function. This\nop exists to prevent subtle bugs from silently returning unimplemented\ngradients in some corner cases.", + "inputs": [ + { + "description": "any tensor.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "the same input tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "An identity op that triggers an error if a gradient is requested." + } + }, + { + "name": "Print", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "minimum": 0, + "name": "U", + "type": "type[]" + }, + { + "default": "", + "description": "A string, prefix of the error message.", + "name": "message", + "type": "string" + }, + { + "default": -1, + "description": "Only log `first_n` number of times. -1 disables logging.", + "name": "first_n", + "type": "int64" + }, + { + "default": 3, + "description": "Only print this many entries of each tensor.", + "name": "summarize", + "type": "int64" + } + ], + "description": "Passes `input` through to `output` and prints `data` when evaluating.", + "inputs": [ + { + "description": "The tensor passed to `output`", + "name": "input", + "typeAttr": "T" + }, + { + "description": "A list of tensors to print out when op is evaluated.", + "name": "data", + "typeListAttr": "U" + } + ], + "outputs": [ + { + "description": "= The unmodified `input` tensor", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Prints a list of tensors." + } + }, + { + "name": "PrintV2", + "schema": { + "attributes": [ + { + "default": "stderr", + "description": "A string specifying the output stream or logging level to print to.", + "name": "output_stream", + "type": "string" + }, + { + "default": "\n", + "name": "end", + "type": "string" + } + ], + "description": "Prints a string scalar to the desired output_stream.", + "inputs": [ + { + "description": "The string scalar to print.", + "name": "input", + "type": 7 + } + ], + "summary": "Prints a string scalar." + } + }, + { + "name": "PriorityQueue", + "schema": { + "attributes": [ + { + "default": [], + "description": "The type of each component in a value.", + "minimum": 0, + "name": "component_types", + "type": "type[]" + }, + { + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0, + "name": "shapes", + "type": "shape[]" + }, + { + "default": -1, + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "name": "capacity", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "name": "shared_name", + "type": "string" + } + ], + "description": "Note that the PriorityQueue requires the first component of any element\nto be a scalar int64, in addition to the other elements declared by\ncomponent_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue\nand DequeueMany) on a PriorityQueue will all require (resp. output) one extra\nentry in their input (resp. output) lists.", + "outputs": [ + { + "description": "The handle to the queue.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "A queue that produces elements sorted by the first component value." + } + }, + { + "name": "PriorityQueueV2", + "schema": { + "attributes": [ + { + "default": [], + "description": "The type of each component in a value.", + "minimum": 0, + "name": "component_types", + "type": "type[]" + }, + { + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0, + "name": "shapes", + "type": "shape[]" + }, + { + "default": -1, + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "name": "capacity", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "name": "shared_name", + "type": "string" + } + ], + "description": "Note that the PriorityQueue requires the first component of any element\nto be a scalar int64, in addition to the other elements declared by\ncomponent_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue\nand DequeueMany) on a PriorityQueue will all require (resp. output) one extra\nentry in their input (resp. output) lists.", + "outputs": [ + { + "description": "The handle to the queue.", + "name": "handle", + "type": 20 + } + ], + "summary": "A queue that produces elements sorted by the first component value." + } + }, + { + "name": "PrivateThreadPoolDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "Identifies the number of threads to use for the private threadpool.", + "name": "num_threads", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`." + } + }, + { + "name": "Prod", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "inputs": [ + { + "description": "The tensor to reduce.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "name": "reduction_indices", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "The reduced tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the product of elements across dimensions of a tensor." + } + }, + { + "name": "PyFunc", + "schema": { + "attributes": [ + { + "description": "A token representing a registered python function in this address space.", + "name": "token", + "type": "string" + }, + { + "description": "Data types of the inputs to the op.", + "minimum": 0, + "name": "Tin", + "type": "type[]" + }, + { + "description": "Data types of the outputs from the op.\nThe length of the list specifies the number of outputs.", + "minimum": 0, + "name": "Tout", + "type": "type[]" + } + ], + "description": "This operation is considered stateful. For a stateless version, see\nPyFuncStateless.", + "inputs": [ + { + "description": "List of Tensors that will provide input to the Op.", + "name": "input", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "description": "The outputs from the Op.", + "name": "output", + "typeListAttr": "Tout" + } + ], + "summary": "Invokes a python function to compute func(input)->output." + } + }, + { + "name": "PyFuncStateless", + "schema": { + "attributes": [ + { + "name": "token", + "type": "string" + }, + { + "minimum": 0, + "name": "Tin", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Tout", + "type": "type[]" + } + ], + "inputs": [ + { + "name": "input", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "name": "output", + "typeListAttr": "Tout" + } + ], + "summary": "A stateless version of PyFunc." + } + }, + { + "name": "Qr", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, compute full-sized `q` and `r`. If false\n(the default), compute only the leading `P` columns of `q`.", + "name": "full_matrices", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Computes the QR decomposition of each inner matrix in `tensor` such that\n`tensor[..., :, :] = q[..., :, :] * r[..., :,:])`\n\n```python\n# a is a tensor.\n# q is a tensor of orthonormal matrices.\n# r is a tensor of upper triangular matrices.\nq, r = qr(a)\nq_full, r_full = qr(a, full_matrices=True)\n```", + "inputs": [ + { + "description": "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Orthonormal basis for range of `a`. If `full_matrices` is `False` then\nshape is `[..., M, P]`; if `full_matrices` is `True` then shape is\n`[..., M, M]`.", + "name": "q", + "typeAttr": "T" + }, + { + "description": "Triangular factor. If `full_matrices` is `False` then shape is\n`[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.", + "name": "r", + "typeAttr": "T" + } + ], + "summary": "Computes the QR decompositions of one or more matrices." + } + }, + { + "name": "QuantizeAndDequantize", + "schema": { + "attributes": [ + { + "default": true, + "name": "signed_input", + "type": "boolean" + }, + { + "default": 8, + "name": "num_bits", + "type": "int64" + }, + { + "default": false, + "name": "range_given", + "type": "boolean" + }, + { + "default": 0.0, + "name": "input_min", + "type": "float32" + }, + { + "default": 0.0, + "name": "input_max", + "type": "float32" + }, + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Use QuantizeAndDequantizeV2 instead." + } + }, + { + "name": "QuantizeAndDequantizeV2", + "schema": { + "attributes": [ + { + "default": true, + "description": "Whether the quantization is signed or unsigned. (actually this parameter should\nhave been called `signed_output`)", + "name": "signed_input", + "type": "boolean" + }, + { + "default": 8, + "description": "The bitwidth of the quantization.", + "name": "num_bits", + "type": "int64" + }, + { + "default": false, + "description": "Whether the range is given or should be determined from the `input` tensor.", + "name": "range_given", + "type": "boolean" + }, + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": "HALF_TO_EVEN", + "description": "The 'round_mode' attribute controls which rounding tie-breaking algorithm is\nused when rounding float values to their quantized equivalents. The following\nrounding modes are currently supported:\n\n* HALF_TO_EVEN: this is the default round_mode.\n* HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5\n rounds up to -7.\n Must be one of the following: `HALF_TO_EVEN`, `HALF_UP`.", + "name": "round_mode", + "type": "string" + }, + { + "default": false, + "description": "If True, then the absolute value of the quantized minimum value is the same as\nthe quantized maximum value, instead of 1 greater.\ni.e. for 8 bit quantization, the minimum value is -127 instead of -128.", + "name": "narrow_range", + "type": "boolean" + }, + { + "default": -1, + "description": "If specified, this axis is treated as a channel or slice axis, and a separate\nquantization range is used for each channel or slice along this axis.", + "name": "axis", + "type": "int64" + } + ], + "description": "This op simulates the precision loss from the quantized forward pass by:\n\n1. Quantizing the tensor to fixed point numbers, which should match the target\n quantization method when it is used in inference.\n2. Dequantizing it back to floating point numbers for the following ops, most\n likely matmul.\n\nThere are different ways to quantize. This version uses only scaling, so 0.0\nmaps to 0.\n\nFrom the specified 'num_bits' in the quantized output type, it determines\nminimum and maximum representable quantized values.\n\ne.g.\n\n* [-128, 127] for signed, num_bits = 8, or\n* [0, 255] for unsigned, num_bits = 8.\n\nIf range_given == False, the initial input_min, input_max will be determined\nautomatically as the minimum and maximum values in the input tensor, otherwise\nthe specified values of input_min, input_max are used.\n\nNote: If the input_min, input_max are specified, they do not need to equal the\nactual minimum and maximum values in the tensor. e.g. in some cases it may be\nbeneficial to specify these values such that the low probability extremes of the\ninput distribution are clipped.\n\nThis op determines the maximum scale_factor that would map the initial\n[input_min, input_max] range to a range that lies within the representable\nquantized range.\n\nIt determines the scale from one of input_min and input_max, then updates the\nother one to maximize the representable range.\n\ne.g.\n\n* if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,\n 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it\n would update input_max to be 127 / 12.8 = 9.921875\n* if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,\n 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it\n would update input_min to be 128.0 / 12.7 = -10.07874\n* if the output is unsigned, input_min is forced to be 0, and only the\n specified input_max is used.\n\nAfter determining the scale_factor and updating the input range, it applies the\nfollowing to each value in the 'input' tensor.\n\noutput = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.\n\nThe above round function rounds the value based on the given round_mode.\n", + "inputs": [ + { + "description": "Tensor to quantize and then dequantize.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "If `range_given == True`, this specifies the minimum input value that needs to\nbe represented, otherwise it is determined from the min value of the `input`\ntensor.", + "name": "input_min", + "typeAttr": "T" + }, + { + "description": "If `range_given == True`, this specifies the maximum input value that needs to\nbe represented, otherwise it is determined from the max value of the `input`\ntensor.", + "name": "input_max", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Quantizes then dequantizes a tensor." + } + }, + { + "name": "QuantizeAndDequantizeV3", + "schema": { + "attributes": [ + { + "default": true, + "name": "signed_input", + "type": "boolean" + }, + { + "default": true, + "name": "range_given", + "type": "boolean" + }, + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "name": "narrow_range", + "type": "boolean" + }, + { + "default": -1, + "name": "axis", + "type": "int64" + } + ], + "description": "This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a\ntensor, so its value can change during training.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "input_min", + "typeAttr": "T" + }, + { + "name": "input_max", + "typeAttr": "T" + }, + { + "name": "num_bits", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Quantizes then dequantizes a tensor." + } + }, + { + "name": "QuantizeDownAndShrinkRange", + "schema": { + "attributes": [ + { + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "The type of the output. Should be a lower bit depth than Tinput. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + } + ], + "description": "actual distribution of the values to maximize the usage of the lower bit depth\nand adjusting the output min and max ranges accordingly.\n\n[input_min, input_max] are scalar floats that specify the range for the float\ninterpretation of the 'input' data. For example, if input_min is -1.0f and\ninput_max is 1.0f, and we are dealing with quint16 quantized data, then a 0\nvalue in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.\n\nThis operator tries to squeeze as much precision as possible into an output with\na lower bit depth by calculating the actual min and max values found in the\ndata. For example, maybe that quint16 input has no values lower than 16,384 and\nnone higher than 49,152. That means only half the range is actually needed, all\nthe float interpretations are between -0.5f and 0.5f, so if we want to compress\nthe data into a quint8 output, we can use that range rather than the theoretical\n-1.0f to 1.0f that is suggested by the input min and max.\n\nIn practice, this is most useful for taking output from operations like\nQuantizedMatMul that can produce higher bit-depth outputs than their inputs and\nmay have large potential output ranges, but in practice have a distribution of\ninput values that only uses a small fraction of the possible range. By feeding\nthat output into this operator, we can reduce it from 32 bits down to 8 with\nminimal loss of accuracy.", + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "description": "The float value that the minimum quantized input value represents.", + "name": "input_min", + "type": 1 + }, + { + "description": "The float value that the maximum quantized input value represents.", + "name": "input_max", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "description": "The float value that the minimum quantized output value represents.", + "name": "output_min", + "type": 1 + }, + { + "description": "The float value that the maximum quantized output value represents.", + "name": "output_max", + "type": 1 + } + ], + "summary": "Convert the quantized 'input' tensor into a lower-precision 'output', using the" + } + }, + { + "name": "QuantizeV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T", + "type": "type" + }, + { + "default": "MIN_COMBINED", + "description": "Must be one of the following: `MIN_COMBINED`, `MIN_FIRST`, `SCALED`.", + "name": "mode", + "type": "string" + }, + { + "default": "HALF_AWAY_FROM_ZERO", + "description": "Must be one of the following: `HALF_AWAY_FROM_ZERO`, `HALF_TO_EVEN`.", + "name": "round_mode", + "type": "string" + }, + { + "default": false, + "name": "narrow_range", + "type": "boolean" + }, + { + "default": -1, + "name": "axis", + "type": "int64" + }, + { + "default": 0.009999999776482582, + "name": "ensure_minimum_range", + "type": "float32" + } + ], + "description": "[min_range, max_range] are scalar floats that specify the range for\nthe 'input' data. The 'mode' attribute controls exactly which calculations are\nused to convert the float values to their quantized equivalents. The\n'round_mode' attribute controls which rounding tie-breaking algorithm is used\nwhen rounding float values to their quantized equivalents.\n\nIn 'MIN_COMBINED' mode, each value of the tensor will undergo the following:\n\n```\nout[i] = (in[i] - min_range) * range(T) / (max_range - min_range)\nif T == qint8: out[i] -= (range(T) + 1) / 2.0\n```\n\nhere `range(T) = numeric_limits::max() - numeric_limits::min()`\n\n*MIN_COMBINED Mode Example*\n\nAssume the input is type float and has a possible range of [0.0, 6.0] and the\noutput type is quint8 ([0, 255]). The min_range and max_range values should be\nspecified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each\nvalue of the input by 255/6 and cast to quint8.\n\nIf the output type was qint8 ([-128, 127]), the operation will additionally\nsubtract each value by 128 prior to casting, so that the range of values aligns\nwith the range of qint8.\n\nIf the mode is 'MIN_FIRST', then this approach is used:\n\n```\nnum_discrete_values = 1 << (# of bits in T)\nrange_adjust = num_discrete_values / (num_discrete_values - 1)\nrange = (range_max - range_min) * range_adjust\nrange_scale = num_discrete_values / range\nquantized = round(input * range_scale) - round(range_min * range_scale) +\n numeric_limits::min()\nquantized = max(quantized, numeric_limits::min())\nquantized = min(quantized, numeric_limits::max())\n```\n\nThe biggest difference between this and MIN_COMBINED is that the minimum range\nis rounded first, before it's subtracted from the rounded value. With\nMIN_COMBINED, a small bias is introduced where repeated iterations of quantizing\nand dequantizing will introduce a larger and larger error.\n\n*SCALED mode Example*\n\n`SCALED` mode matches the quantization approach used in\n`QuantizeAndDequantize{V2|V3}`.\n\nIf the mode is `SCALED`, the quantization is performed by multiplying each\ninput value by a scaling_factor.\nThe scaling_factor is determined from `min_range` and `max_range` to be as large\nas possible such that the range from `min_range` to `max_range` is representable\nwithin values of type T.\n\n```c++\n\n const int min_T = std::numeric_limits::min();\n const int max_T = std::numeric_limits::max();\n const float max_float = std::numeric_limits::max();\n\n const float scale_factor_from_min_side =\n (min_T * min_range > 0) ? min_T / min_range : max_float;\n const float scale_factor_from_max_side =\n (max_T * max_range > 0) ? max_T / max_range : max_float;\n\n const float scale_factor = std::min(scale_factor_from_min_side,\n scale_factor_from_max_side);\n```\n\nWe next use the scale_factor to adjust min_range and max_range as follows:\n\n```c++\n min_range = min_T / scale_factor;\n max_range = max_T / scale_factor;\n```\n\n\ne.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would\ncompare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8\nIn this case, min_range would remain -10, but max_range would be adjusted to\n127 / 12.8 = 9.921875\n\nSo we will quantize input values in the range (-10, 9.921875) to (-128, 127).\n\nThe input tensor can now be quantized by clipping values to the range\n`min_range` to `max_range`, then multiplying by scale_factor as follows:\n\n```c++\nresult = round(min(max_range, max(min_range, input)) * scale_factor)\n```\n\nThe adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of\nthis operation. These outputs should be used as the range for any further\ncalculations.\n\n\n*narrow_range (bool) attribute*\n\nIf true, we do not use the minimum quantized value.\ni.e. for int8 the quantized output, it would be restricted to the range\n-127..127 instead of the full -128..127 range.\nThis is provided for compatibility with certain inference backends.\n(Only applies to SCALED mode)\n\n\n*axis (int) attribute*\n\nAn optional `axis` attribute can specify a dimension index of the input tensor,\nsuch that quantization ranges will be calculated and applied separately for each\nslice of the tensor along that dimension. This is useful for per-channel\nquantization.\n\nIf axis is specified, min_range and max_range\n\nif `axis`=None, per-tensor quantization is performed as normal.\n\n\n*ensure_minimum_range (float) attribute*\n\nEnsures the minimum quantization range is at least this value.\nThe legacy default value for this is 0.01, but it is strongly suggested to\nset it to 0 for new uses.\n", + "inputs": [ + { + "name": "input", + "type": 1 + }, + { + "description": "The minimum value of the quantization range. This value may be adjusted by the\nop depending on other parameters. The adjusted value is written to `output_min`.\nIf the `axis` attribute is specified, this must be a 1-D tensor whose size\nmatches the `axis` dimension of the input and output tensors.", + "name": "min_range", + "type": 1 + }, + { + "description": "The maximum value of the quantization range. This value may be adjusted by the\nop depending on other parameters. The adjusted value is written to `output_max`.\nIf the `axis` attribute is specified, this must be a 1-D tensor whose size\nmatches the `axis` dimension of the input and output tensors.", + "name": "max_range", + "type": 1 + } + ], + "outputs": [ + { + "description": "The quantized data produced from the float input.", + "name": "output", + "typeAttr": "T" + }, + { + "description": "The final quantization range minimum, used to clip input values before scaling\nand rounding them to quantized values.\nIf the `axis` attribute is specified, this will be a 1-D tensor whose size\nmatches the `axis` dimension of the input and output tensors.", + "name": "output_min", + "type": 1 + }, + { + "description": "The final quantization range maximum, used to clip input values before scaling\nand rounding them to quantized values.\nIf the `axis` attribute is specified, this will be a 1-D tensor whose size\nmatches the `axis` dimension of the input and output tensors.", + "name": "output_max", + "type": 1 + } + ], + "summary": "Quantize the 'input' tensor of type float to 'output' tensor of type 'T'." + } + }, + { + "name": "QuantizedAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T1", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T2", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Toutput", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T1" + }, + { + "name": "y", + "typeAttr": "T2" + }, + { + "description": "The float value that the lowest quantized `x` value represents.", + "name": "min_x", + "type": 1 + }, + { + "description": "The float value that the highest quantized `x` value represents.", + "name": "max_x", + "type": 1 + }, + { + "description": "The float value that the lowest quantized `y` value represents.", + "name": "min_y", + "type": 1 + }, + { + "description": "The float value that the highest quantized `y` value represents.", + "name": "max_y", + "type": 1 + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "Toutput" + }, + { + "description": "The float value that the lowest quantized output value represents.", + "name": "min_z", + "type": 1 + }, + { + "description": "The float value that the highest quantized output value represents.\n\n*NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about\nbroadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "name": "max_z", + "type": 1 + } + ], + "summary": "Returns x + y element-wise, working on quantized buffers." + } + }, + { + "name": "QuantizedAvgPool", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T", + "type": "type" + }, + { + "description": "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input.", + "name": "ksize", + "type": "int64[]" + }, + { + "description": "The stride of the sliding window for each dimension of the input\ntensor. The length must be 4 to match the number of dimensions of the input.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + } + ], + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "The float value that the lowest quantized input value represents.", + "name": "min_input", + "type": 1 + }, + { + "description": "The float value that the highest quantized input value represents.", + "name": "max_input", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "description": "The float value that the lowest quantized output value represents.", + "name": "min_output", + "type": 1 + }, + { + "description": "The float value that the highest quantized output value represents.", + "name": "max_output", + "type": 1 + } + ], + "summary": "Produces the average pool of the input tensor for quantized types." + } + }, + { + "name": "QuantizedBatchNormWithGlobalNormalization", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "description": "A small float number to avoid dividing by 0.", + "name": "variance_epsilon", + "type": "float32" + }, + { + "description": "A bool indicating whether the resulted tensor\nneeds to be multiplied with gamma.", + "name": "scale_after_normalization", + "type": "boolean" + } + ], + "description": "This op is deprecated and will be removed in the future. Prefer\n`tf.nn.batch_normalization`.", + "inputs": [ + { + "description": "A 4D input Tensor.", + "name": "t", + "typeAttr": "Tinput" + }, + { + "description": "The value represented by the lowest quantized input.", + "name": "t_min", + "type": 1 + }, + { + "description": "The value represented by the highest quantized input.", + "name": "t_max", + "type": 1 + }, + { + "description": "A 1D mean Tensor with size matching the last dimension of t.\nThis is the first output from tf.nn.moments,\nor a saved moving average thereof.", + "name": "m", + "typeAttr": "Tinput" + }, + { + "description": "The value represented by the lowest quantized mean.", + "name": "m_min", + "type": 1 + }, + { + "description": "The value represented by the highest quantized mean.", + "name": "m_max", + "type": 1 + }, + { + "description": "A 1D variance Tensor with size matching the last dimension of t.\nThis is the second output from tf.nn.moments,\nor a saved moving average thereof.", + "name": "v", + "typeAttr": "Tinput" + }, + { + "description": "The value represented by the lowest quantized variance.", + "name": "v_min", + "type": 1 + }, + { + "description": "The value represented by the highest quantized variance.", + "name": "v_max", + "type": 1 + }, + { + "description": "A 1D beta Tensor with size matching the last dimension of t.\nAn offset to be added to the normalized tensor.", + "name": "beta", + "typeAttr": "Tinput" + }, + { + "description": "The value represented by the lowest quantized offset.", + "name": "beta_min", + "type": 1 + }, + { + "description": "The value represented by the highest quantized offset.", + "name": "beta_max", + "type": 1 + }, + { + "description": "A 1D gamma Tensor with size matching the last dimension of t.\nIf \"scale_after_normalization\" is true, this tensor will be multiplied\nwith the normalized tensor.", + "name": "gamma", + "typeAttr": "Tinput" + }, + { + "description": "The value represented by the lowest quantized gamma.", + "name": "gamma_min", + "type": 1 + }, + { + "description": "The value represented by the highest quantized gamma.", + "name": "gamma_max", + "type": 1 + } + ], + "outputs": [ + { + "name": "result", + "typeAttr": "out_type" + }, + { + "name": "result_min", + "type": 1 + }, + { + "name": "result_max", + "type": 1 + } + ], + "summary": "Quantized Batch normalization." + } + }, + { + "name": "QuantizedBiasAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T1", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T2", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + } + ], + "description": "Broadcasts the values of bias on dimensions 0..N-2 of 'input'.", + "inputs": [ + { + "name": "input", + "typeAttr": "T1" + }, + { + "description": "A 1D bias Tensor with size matching the last dimension of 'input'.", + "name": "bias", + "typeAttr": "T2" + }, + { + "description": "The float value that the lowest quantized input value represents.", + "name": "min_input", + "type": 1 + }, + { + "description": "The float value that the highest quantized input value represents.", + "name": "max_input", + "type": 1 + }, + { + "description": "The float value that the lowest quantized bias value represents.", + "name": "min_bias", + "type": 1 + }, + { + "description": "The float value that the highest quantized bias value represents.", + "name": "max_bias", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "description": "The float value that the lowest quantized output value represents.", + "name": "min_out", + "type": 1 + }, + { + "description": "The float value that the highest quantized output value represents.", + "name": "max_out", + "type": 1 + } + ], + "summary": "Adds Tensor 'bias' to Tensor 'input' for Quantized types." + } + }, + { + "name": "QuantizedConcat", + "schema": { + "attributes": [ + { + "minimum": 2, + "name": "N", + "type": "int64" + }, + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "0-D. The dimension along which to concatenate. Must be in the\nrange [0, rank(values)).", + "name": "concat_dim", + "type": 3 + }, + { + "description": "The `N` Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except `concat_dim`.", + "name": "values", + "numberAttr": "N", + "typeAttr": "T" + }, + { + "description": "The minimum scalar values for each of the input tensors.", + "name": "input_mins", + "numberAttr": "N", + "type": 1 + }, + { + "description": "The maximum scalar values for each of the input tensors.", + "name": "input_maxes", + "numberAttr": "N", + "type": 1 + } + ], + "outputs": [ + { + "description": "A `Tensor` with the concatenation of values stacked along the\n`concat_dim` dimension. This tensor's shape matches that of `values` except\nin `concat_dim` where it has the sum of the sizes.", + "name": "output", + "typeAttr": "T" + }, + { + "description": "The float value that the minimum quantized output value represents.", + "name": "output_min", + "type": 1 + }, + { + "description": "The float value that the maximum quantized output value represents.", + "name": "output_max", + "type": 1 + } + ], + "summary": "Concatenates quantized tensors along one dimension." + } + }, + { + "name": "QuantizedConv2D", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "description": "The stride of the sliding window for each dimension of the input\ntensor.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "1-D tensor of length 4. The dilation factor for each dimension of\n`input`. If set to k > 1, there will be k-1 skipped cells between each\nfilter element on that dimension. The dimension order is determined by the\nvalue of `data_format`, see above for details. Dilations in the batch and\ndepth dimensions must be 1.", + "name": "dilations", + "type": "int64[]" + } + ], + "description": "The inputs are quantized tensors where the lowest value represents the real\nnumber of the associated minimum, and the highest represents the maximum.\nThis means that you can only interpret the quantized output in the same way, by\ntaking the returned minimum and maximum values into account.", + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "description": "filter's input_depth dimension must match input's depth dimensions.", + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "description": "The float value that the lowest quantized input value represents.", + "name": "min_input", + "type": 1 + }, + { + "description": "The float value that the highest quantized input value represents.", + "name": "max_input", + "type": 1 + }, + { + "description": "The float value that the lowest quantized filter value represents.", + "name": "min_filter", + "type": 1 + }, + { + "description": "The float value that the highest quantized filter value represents.", + "name": "max_filter", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "description": "The float value that the lowest quantized output value represents.", + "name": "min_output", + "type": 1 + }, + { + "description": "The float value that the highest quantized output value represents.", + "name": "max_output", + "type": 1 + } + ], + "summary": "Computes a 2D convolution given quantized 4D input and filter tensors." + } + }, + { + "name": "QuantizedConv2DAndRelu", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + } + }, + { + "name": "QuantizedConv2DAndReluAndRequantize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + } + }, + { + "name": "QuantizedConv2DAndRequantize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 11 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + } + }, + { + "name": "QuantizedConv2DPerChannel", + "schema": { + "attributes": [ + { + "description": "The quantized type of input tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "The quantized type of filter tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "The quantized type of output tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "description": "list of stride values.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "list of dilation values.", + "name": "dilations", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "input", + "typeAttr": "Tinput" + }, + { + "description": "The original filter tensor.", + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "description": "The minimum value of the input tensor", + "name": "min_input", + "type": 1 + }, + { + "description": "The maximum value of the input tensor.", + "name": "max_input", + "type": 1 + }, + { + "description": "The minimum value of the filter tensor.", + "name": "min_filter", + "type": 1 + }, + { + "description": "The maximum value of the filter tensor.", + "name": "max_filter", + "type": 1 + } + ], + "outputs": [ + { + "description": "The output tensor.", + "name": "output", + "typeAttr": "out_type" + }, + { + "description": "The minimum value of the final output tensor.", + "name": "min_output", + "type": 1 + }, + { + "description": "The maximum value of the final output tensor.", + "name": "max_output", + "type": 1 + } + ], + "summary": "Computes QuantizedConv2D per channel." + } + }, + { + "name": "QuantizedConv2DWithBias", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "type": 1 + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + } + }, + { + "name": "QuantizedConv2DWithBiasAndRelu", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "type": 1 + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + } + }, + { + "name": "QuantizedConv2DWithBiasAndReluAndRequantize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`, `qint32`.", + "name": "Tbias", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + } + }, + { + "name": "QuantizedConv2DWithBiasAndRequantize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`, `qint32`.", + "name": "Tbias", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 11 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + } + }, + { + "name": "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`, `qint32`.", + "name": "Tbias", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tsummand", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + }, + { + "name": "summand", + "typeAttr": "Tsummand" + }, + { + "name": "min_summand", + "type": 1 + }, + { + "name": "max_summand", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + } + }, + { + "name": "QuantizedConv2DWithBiasSumAndRelu", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "type": 1 + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "summand", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + } + }, + { + "name": "QuantizedConv2DWithBiasSumAndReluAndRequantize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`, `qint32`.", + "name": "Tbias", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tsummand", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_input", + "type": 1 + }, + { + "name": "max_input", + "type": 1 + }, + { + "name": "min_filter", + "type": 1 + }, + { + "name": "max_filter", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + }, + { + "name": "summand", + "typeAttr": "Tsummand" + }, + { + "name": "min_summand", + "type": 1 + }, + { + "name": "max_summand", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "name": "min_output", + "type": 1 + }, + { + "name": "max_output", + "type": 1 + } + ] + } + }, + { + "name": "QuantizedDepthwiseConv2D", + "schema": { + "attributes": [ + { + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "The type of the filter. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "The type of the output. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "description": "List of stride values.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "List of dilation values.", + "name": "dilations", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "input", + "typeAttr": "Tinput" + }, + { + "description": "The original filter tensor.", + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "description": "The float value that the minimum quantized input value represents.", + "name": "min_input", + "type": 1 + }, + { + "description": "The float value that the maximum quantized input value represents.", + "name": "max_input", + "type": 1 + }, + { + "description": "The float value that the minimum quantized filter value represents.", + "name": "min_filter", + "type": 1 + }, + { + "description": "The float value that the maximum quantized filter value represents.", + "name": "max_filter", + "type": 1 + } + ], + "outputs": [ + { + "description": "The output tensor.", + "name": "output", + "typeAttr": "out_type" + }, + { + "description": "The float value that the minimum quantized output value represents.", + "name": "min_output", + "type": 1 + }, + { + "description": "The float value that the maximum quantized output value represents.", + "name": "max_output", + "type": 1 + } + ], + "summary": "Computes quantized depthwise Conv2D." + } + }, + { + "name": "QuantizedDepthwiseConv2DWithBias", + "schema": { + "attributes": [ + { + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "The type of the filter. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "The type of the output. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "description": "List of stride values.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "List of dilation values.", + "name": "dilations", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "input", + "typeAttr": "Tinput" + }, + { + "description": "The original filter tensor.", + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "description": "The original bias tensor.", + "name": "bias", + "type": 1 + }, + { + "description": "The float value that the minimum quantized input value represents.", + "name": "min_input", + "type": 1 + }, + { + "description": "The float value that the maximum quantized input value represents.", + "name": "max_input", + "type": 1 + }, + { + "description": "The float value that the minimum quantized filter value represents.", + "name": "min_filter", + "type": 1 + }, + { + "description": "The float value that the maximum quantized filter value represents.", + "name": "max_filter", + "type": 1 + } + ], + "outputs": [ + { + "description": "The output tensor.", + "name": "output", + "typeAttr": "out_type" + }, + { + "description": "The float value that the minimum quantized output value represents.", + "name": "min_output", + "type": 1 + }, + { + "description": "The float value that the maximum quantized output value represents.", + "name": "max_output", + "type": 1 + } + ], + "summary": "Computes quantized depthwise Conv2D with Bias." + } + }, + { + "name": "QuantizedDepthwiseConv2DWithBiasAndRelu", + "schema": { + "attributes": [ + { + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "The type of the filter. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "The type of the output. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "description": "List of stride values.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "List of dilation values.", + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "input", + "typeAttr": "Tinput" + }, + { + "description": "The original filter tensor.", + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "description": "The original bias tensor.", + "name": "bias", + "type": 1 + }, + { + "description": "The float value that the minimum quantized input value represents.", + "name": "min_input", + "type": 1 + }, + { + "description": "The float value that the maximum quantized input value represents.", + "name": "max_input", + "type": 1 + }, + { + "description": "The float value that the minimum quantized filter value represents.", + "name": "min_filter", + "type": 1 + }, + { + "description": "The float value that the maximum quantized filter value represents.", + "name": "max_filter", + "type": 1 + } + ], + "outputs": [ + { + "description": "The output tensor.", + "name": "output", + "typeAttr": "out_type" + }, + { + "description": "The float value that the minimum quantized output value represents.", + "name": "min_output", + "type": 1 + }, + { + "description": "The float value that the maximum quantized output value represents.", + "name": "max_output", + "type": 1 + } + ], + "summary": "Computes quantized depthwise Conv2D with Bias and Relu." + } + }, + { + "name": "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", + "schema": { + "attributes": [ + { + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "The type of the filter. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tfilter", + "type": "type" + }, + { + "description": "The type of the bias. Must be one of the following: `float32`, `qint32`.", + "name": "Tbias", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "The type of the output. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + }, + { + "description": "List of stride values.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + }, + { + "default": [ + 1, + 1, + 1, + 1 + ], + "description": "List of dilation values.", + "name": "dilations", + "type": "int64[]" + }, + { + "default": [], + "name": "padding_list", + "type": "int64[]" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "input", + "typeAttr": "Tinput" + }, + { + "description": "The original filter tensor.", + "name": "filter", + "typeAttr": "Tfilter" + }, + { + "description": "The original bias tensor.", + "name": "bias", + "typeAttr": "Tbias" + }, + { + "description": "The float value that the minimum quantized input value represents.", + "name": "min_input", + "type": 1 + }, + { + "description": "The float value that the maximum quantized input value represents.", + "name": "max_input", + "type": 1 + }, + { + "description": "The float value that the minimum quantized filter value represents.", + "name": "min_filter", + "type": 1 + }, + { + "description": "The float value that the maximum quantized filter value represents.", + "name": "max_filter", + "type": 1 + }, + { + "description": "The minimum float value of the output tensor.", + "name": "min_freezed_output", + "type": 1 + }, + { + "description": "The maximum float value of the output tensor.", + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "description": "The output tensor.", + "name": "output", + "typeAttr": "out_type" + }, + { + "description": "The float value that the minimum quantized output value represents.", + "name": "min_output", + "type": 1 + }, + { + "description": "The float value that the maximum quantized output value represents.", + "name": "max_output", + "type": 1 + } + ], + "summary": "Computes quantized depthwise Conv2D with Bias, Relu and Requantize." + } + }, + { + "name": "QuantizedInstanceNorm", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If True, `given_y_min` and `given_y_min`\nand `given_y_max` are used as the output range. Otherwise,\nthe implementation computes the output range.", + "name": "output_range_given", + "type": "boolean" + }, + { + "default": 0.0, + "description": "Output in `y_min` if `output_range_given` is True.", + "name": "given_y_min", + "type": "float32" + }, + { + "default": 0.0, + "description": "Output in `y_max` if `output_range_given` is True.", + "name": "given_y_max", + "type": "float32" + }, + { + "default": 9.999999747378752e-06, + "description": "A small float number to avoid dividing by 0.", + "name": "variance_epsilon", + "type": "float32" + }, + { + "default": 0.0010000000474974513, + "description": "Minimum value of `y_max - y_min`", + "name": "min_separation", + "type": "float32" + } + ], + "inputs": [ + { + "description": "A 4D input Tensor.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "The value represented by the lowest quantized input.", + "name": "x_min", + "type": 1 + }, + { + "description": "The value represented by the highest quantized input.", + "name": "x_max", + "type": 1 + } + ], + "outputs": [ + { + "description": "A 4D Tensor.", + "name": "y", + "typeAttr": "T" + }, + { + "description": "The value represented by the lowest quantized output.", + "name": "y_min", + "type": 1 + }, + { + "description": "The value represented by the highest quantized output.", + "name": "y_max", + "type": 1 + } + ], + "summary": "Quantized Instance normalization." + } + }, + { + "name": "QuantizedMatMul", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T1", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T2", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Toutput", + "type": "type" + }, + { + "default": false, + "description": "If true, `a` is transposed before multiplication.", + "name": "transpose_a", + "type": "boolean" + }, + { + "default": false, + "description": "If true, `b` is transposed before multiplication.", + "name": "transpose_b", + "type": "boolean" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "The type of output produced by activation function\nfollowing this operation. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tactivation", + "type": "type" + } + ], + "description": "The inputs must be two-dimensional matrices and the inner dimension of\n`a` (after being transposed if `transpose_a` is non-zero) must match the\nouter dimension of `b` (after being transposed if `transposed_b` is\nnon-zero).", + "inputs": [ + { + "description": "Must be a two-dimensional tensor.", + "name": "a", + "typeAttr": "T1" + }, + { + "description": "Must be a two-dimensional tensor.", + "name": "b", + "typeAttr": "T2" + }, + { + "description": "The float value that the lowest quantized `a` value represents.", + "name": "min_a", + "type": 1 + }, + { + "description": "The float value that the highest quantized `a` value represents.", + "name": "max_a", + "type": 1 + }, + { + "description": "The float value that the lowest quantized `b` value represents.", + "name": "min_b", + "type": 1 + }, + { + "description": "The float value that the highest quantized `b` value represents.", + "name": "max_b", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + }, + { + "description": "The float value that the lowest quantized output value represents.", + "name": "min_out", + "type": 1 + }, + { + "description": "The float value that the highest quantized output value represents.", + "name": "max_out", + "type": 1 + } + ], + "summary": "Perform a quantized matrix multiplication of `a` by the matrix `b`." + } + }, + { + "name": "QuantizedMatMulWithBias", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T1", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T2", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`, `qint32`.", + "name": "Tbias", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Toutput", + "type": "type" + }, + { + "default": false, + "description": "If true, `a` is transposed before multiplication.", + "name": "transpose_a", + "type": "boolean" + }, + { + "default": false, + "description": "If true, `b` is transposed before multiplication.", + "name": "transpose_b", + "type": "boolean" + }, + { + "default": "MIN_FIRST", + "description": "Input data quantization mode. Either MIN_FIRST(default) or SCALED. Must be one of the following: `MIN_FIRST`, `SCALED`.", + "name": "input_quant_mode", + "type": "string" + } + ], + "description": "The inputs must be two-dimensional matrices and 1D bias vector. And the inner\ndimension of `a` (after being transposed if `transpose_a` is non-zero) must\nmatch the outer dimension of `b` (after being transposed if `transposed_b` is\nnon-zero). Then do broadcast add operation with bias values on the matrix\nmultiplication result. The bias size must match inner dimension of `b`.", + "inputs": [ + { + "description": "A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.", + "name": "a", + "typeAttr": "T1" + }, + { + "description": "A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.", + "name": "b", + "typeAttr": "T2" + }, + { + "description": "A 1D bias tensor with size matching inner dimension of `b` (after being\ntransposed if `transposed_b` is non-zero).", + "name": "bias", + "typeAttr": "Tbias" + }, + { + "description": "The float value that the lowest quantized `a` value represents.", + "name": "min_a", + "type": 1 + }, + { + "description": "The float value that the highest quantized `a` value represents.", + "name": "max_a", + "type": 1 + }, + { + "description": "The float value that the lowest quantized `b` value represents.", + "name": "min_b", + "type": 1 + }, + { + "description": "The float value that the highest quantized `b` value represents.", + "name": "max_b", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + }, + { + "description": "The float value that the lowest quantized output value represents.", + "name": "min_out", + "type": 1 + }, + { + "description": "The float value that the highest quantized output value represents.", + "name": "max_out", + "type": 1 + } + ], + "summary": "Performs a quantized matrix multiplication of `a` by the matrix `b` with bias\nadd." + } + }, + { + "name": "QuantizedMatMulWithBiasAndDequantize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T1", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T2", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`, `qint32`.", + "name": "Tbias", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`.", + "name": "Toutput", + "type": "type" + }, + { + "default": false, + "name": "transpose_a", + "type": "boolean" + }, + { + "default": false, + "name": "transpose_b", + "type": "boolean" + }, + { + "default": "MIN_FIRST", + "description": "Must be one of the following: `MIN_FIRST`, `SCALED`.", + "name": "input_quant_mode", + "type": "string" + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T1" + }, + { + "name": "b", + "typeAttr": "T2" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_a", + "type": 1 + }, + { + "name": "max_a", + "type": 1 + }, + { + "name": "min_b", + "type": 1 + }, + { + "name": "max_b", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + } + ] + } + }, + { + "name": "QuantizedMatMulWithBiasAndRelu", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T1", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T2", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Toutput", + "type": "type" + }, + { + "default": false, + "description": "If true, `a` is transposed before multiplication.", + "name": "transpose_a", + "type": "boolean" + }, + { + "default": false, + "description": "If true, `b` is transposed before multiplication.", + "name": "transpose_b", + "type": "boolean" + }, + { + "default": "MIN_FIRST", + "description": "Input data quantization mode. Either MIN_FIRST(default) or SCALED. Must be one of the following: `MIN_FIRST`, `SCALED`.", + "name": "input_quant_mode", + "type": "string" + } + ], + "description": "The inputs must be two-dimensional matrices and 1D bias vector. And the inner\ndimension of `a` (after being transposed if `transpose_a` is non-zero) must\nmatch the outer dimension of `b` (after being transposed if `transposed_b` is\nnon-zero). Then do broadcast add operation with bias values on the matrix\nmultiplication result. The bias size must match inner dimension of `b`. Then do\nrelu activation to get non-negative result.", + "inputs": [ + { + "description": "A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.", + "name": "a", + "typeAttr": "T1" + }, + { + "description": "A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.", + "name": "b", + "typeAttr": "T2" + }, + { + "description": "A 1D bias tensor with size matching with inner dimension of `b` (after being\ntransposed if `transposed_b` is non-zero).", + "name": "bias", + "type": 1 + }, + { + "description": "The float value that the lowest quantized `a` value represents.", + "name": "min_a", + "type": 1 + }, + { + "description": "The float value that the highest quantized `a` value represents.", + "name": "max_a", + "type": 1 + }, + { + "description": "The float value that the lowest quantized `b` value represents.", + "name": "min_b", + "type": 1 + }, + { + "description": "The float value that the highest quantized `b` value represents.", + "name": "max_b", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + }, + { + "description": "The float value that the lowest quantized output value represents.", + "name": "min_out", + "type": 1 + }, + { + "description": "The float value that the highest quantized output value represents.", + "name": "max_out", + "type": 1 + } + ], + "summary": "Perform a quantized matrix multiplication of `a` by the matrix `b` with bias\nadd and relu fusion." + } + }, + { + "name": "QuantizedMatMulWithBiasAndReluAndRequantize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T1", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T2", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`, `qint32`.", + "name": "Tbias", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Toutput", + "type": "type" + }, + { + "default": false, + "description": "If true, `a` is transposed before multiplication.", + "name": "transpose_a", + "type": "boolean" + }, + { + "default": false, + "description": "If true, `b` is transposed before multiplication.", + "name": "transpose_b", + "type": "boolean" + }, + { + "default": "MIN_FIRST", + "description": "Input data quantization mode. Either MIN_FIRST(default) or SCALED. Must be one of the following: `MIN_FIRST`, `SCALED`.", + "name": "input_quant_mode", + "type": "string" + } + ], + "description": "The inputs must be two-dimensional matrices and 1D bias vector. And the inner\ndimension of `a` (after being transposed if `transpose_a` is non-zero) must\nmatch the outer dimension of `b` (after being transposed if `transposed_b` is\nnon-zero). Then do broadcast add operation with bias values on the matrix\nmultiplication result. The bias size must match inner dimension of `b`. Then do\nrelu activation to get non-negative result. Then do requantize operation to get\nfinal uint8 result.", + "inputs": [ + { + "description": "A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.", + "name": "a", + "typeAttr": "T1" + }, + { + "description": "A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.", + "name": "b", + "typeAttr": "T2" + }, + { + "description": "A 1D bias tensor with size matching with inner dimension of `b` (after being\ntransposed if `transposed_b` is non-zero).", + "name": "bias", + "typeAttr": "Tbias" + }, + { + "description": "The float value that the lowest quantized `a` value represents.", + "name": "min_a", + "type": 1 + }, + { + "description": "The float value that the highest quantized `a` value represents.", + "name": "max_a", + "type": 1 + }, + { + "description": "The float value that the lowest quantized `b` value represents.", + "name": "min_b", + "type": 1 + }, + { + "description": "The float value that the highest quantized `b` value represents.", + "name": "max_b", + "type": 1 + }, + { + "description": "The float value that the highest quantized output value after requantize.", + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + }, + { + "description": "The float value that the lowest quantized output value represents.", + "name": "min_out", + "type": 1 + }, + { + "description": "The float value that the highest quantized output value represents.", + "name": "max_out", + "type": 1 + } + ], + "summary": "Perform a quantized matrix multiplication of `a` by the matrix `b` with bias\nadd and relu and requantize fusion." + } + }, + { + "name": "QuantizedMatMulWithBiasAndRequantize", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T1", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T2", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`, `qint32`.", + "name": "Tbias", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Toutput", + "type": "type" + }, + { + "default": false, + "name": "transpose_a", + "type": "boolean" + }, + { + "default": false, + "name": "transpose_b", + "type": "boolean" + }, + { + "default": "MIN_FIRST", + "description": "Must be one of the following: `MIN_FIRST`, `SCALED`.", + "name": "input_quant_mode", + "type": "string" + } + ], + "inputs": [ + { + "name": "a", + "typeAttr": "T1" + }, + { + "name": "b", + "typeAttr": "T2" + }, + { + "name": "bias", + "typeAttr": "Tbias" + }, + { + "name": "min_a", + "type": 1 + }, + { + "name": "max_a", + "type": 1 + }, + { + "name": "min_b", + "type": 1 + }, + { + "name": "max_b", + "type": 1 + }, + { + "name": "min_freezed_output", + "type": 1 + }, + { + "name": "max_freezed_output", + "type": 1 + } + ], + "outputs": [ + { + "name": "out", + "typeAttr": "Toutput" + }, + { + "name": "min_out", + "type": 1 + }, + { + "name": "max_out", + "type": 1 + } + ] + } + }, + { + "name": "QuantizedMaxPool", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T", + "type": "type" + }, + { + "description": "The size of the window for each dimension of the input tensor.\nThe length must be 4 to match the number of dimensions of the input.", + "name": "ksize", + "type": "int64[]" + }, + { + "description": "The stride of the sliding window for each dimension of the input\ntensor. The length must be 4 to match the number of dimensions of the input.", + "name": "strides", + "type": "int64[]" + }, + { + "description": "The type of padding algorithm to use. Must be one of the following: `SAME`, `VALID`.", + "name": "padding", + "type": "string" + } + ], + "inputs": [ + { + "description": "The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "The float value that the lowest quantized input value represents.", + "name": "min_input", + "type": 1 + }, + { + "description": "The float value that the highest quantized input value represents.", + "name": "max_input", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "description": "The float value that the lowest quantized output value represents.", + "name": "min_output", + "type": 1 + }, + { + "description": "The float value that the highest quantized output value represents.", + "name": "max_output", + "type": 1 + } + ], + "summary": "Produces the max pool of the input tensor for quantized types." + } + }, + { + "name": "QuantizedMul", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T1", + "type": "type" + }, + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T2", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 13 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Toutput", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T1" + }, + { + "name": "y", + "typeAttr": "T2" + }, + { + "description": "The float value that the lowest quantized `x` value represents.", + "name": "min_x", + "type": 1 + }, + { + "description": "The float value that the highest quantized `x` value represents.", + "name": "max_x", + "type": 1 + }, + { + "description": "The float value that the lowest quantized `y` value represents.", + "name": "min_y", + "type": 1 + }, + { + "description": "The float value that the highest quantized `y` value represents.", + "name": "max_y", + "type": 1 + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "Toutput" + }, + { + "description": "The float value that the lowest quantized output value represents.", + "name": "min_z", + "type": 1 + }, + { + "description": "The float value that the highest quantized output value represents.\n\n*NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about\nbroadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "name": "max_z", + "type": 1 + } + ], + "summary": "Returns x * y element-wise, working on quantized buffers." + } + }, + { + "name": "QuantizedRelu", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "Tinput" + }, + { + "description": "The float value that the lowest quantized value represents.", + "name": "min_features", + "type": 1 + }, + { + "description": "The float value that the highest quantized value represents.", + "name": "max_features", + "type": 1 + } + ], + "outputs": [ + { + "description": "Has the same output shape as \"features\".", + "name": "activations", + "typeAttr": "out_type" + }, + { + "description": "The float value that the lowest quantized value represents.", + "name": "min_activations", + "type": 1 + }, + { + "description": "The float value that the highest quantized value represents.", + "name": "max_activations", + "type": 1 + } + ], + "summary": "Computes Quantized Rectified Linear: `max(features, 0)`" + } + }, + { + "name": "QuantizedRelu6", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "Tinput" + }, + { + "description": "The float value that the lowest quantized value represents.", + "name": "min_features", + "type": 1 + }, + { + "description": "The float value that the highest quantized value represents.", + "name": "max_features", + "type": 1 + } + ], + "outputs": [ + { + "description": "Has the same output shape as \"features\".", + "name": "activations", + "typeAttr": "out_type" + }, + { + "description": "The float value that the lowest quantized value represents.", + "name": "min_activations", + "type": 1 + }, + { + "description": "The float value that the highest quantized value represents.", + "name": "max_activations", + "type": 1 + } + ], + "summary": "Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`" + } + }, + { + "name": "QuantizedReluX", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "Tinput" + }, + { + "name": "max_value", + "type": 1 + }, + { + "description": "The float value that the lowest quantized value represents.", + "name": "min_features", + "type": 1 + }, + { + "description": "The float value that the highest quantized value represents.", + "name": "max_features", + "type": 1 + } + ], + "outputs": [ + { + "description": "Has the same output shape as \"features\".", + "name": "activations", + "typeAttr": "out_type" + }, + { + "description": "The float value that the lowest quantized value represents.", + "name": "min_activations", + "type": 1 + }, + { + "description": "The float value that the highest quantized value represents.", + "name": "max_activations", + "type": 1 + } + ], + "summary": "Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`" + } + }, + { + "name": "QuantizedReshape", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tshape", + "type": "type" + } + ], + "description": "```", + "inputs": [ + { + "name": "tensor", + "typeAttr": "T" + }, + { + "description": "Defines the shape of the output tensor.", + "name": "shape", + "typeAttr": "Tshape" + }, + { + "description": "The minimum value of the input.", + "name": "input_min", + "type": 1 + }, + { + "description": "The maximum value of the input.", + "name": "input_max", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + }, + { + "description": "This value is copied from input_min.", + "name": "output_min", + "type": 1 + }, + { + "description": "This value is copied from input_max.", + "name": "output_max", + "type": 1 + } + ], + "summary": "Reshapes a quantized tensor as per the Reshape op." + } + }, + { + "name": "QuantizedResizeBilinear", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `quint8`, `qint32`, `float32`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "name": "align_corners", + "type": "boolean" + }, + { + "default": false, + "name": "half_pixel_centers", + "type": "boolean" + } + ], + "description": "Input images and output images must be quantized types.", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "images", + "typeAttr": "T" + }, + { + "description": "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "name": "size", + "type": 3 + }, + { + "name": "min", + "type": 1 + }, + { + "name": "max", + "type": 1 + } + ], + "outputs": [ + { + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "name": "resized_images", + "typeAttr": "T" + }, + { + "name": "out_min", + "type": 1 + }, + { + "name": "out_max", + "type": 1 + } + ], + "summary": "Resize quantized `images` to `size` using quantized bilinear interpolation." + } + }, + { + "name": "QueueClose", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, all pending enqueue requests that are\nblocked on the given queue will be canceled.", + "name": "cancel_pending_enqueues", + "type": "boolean" + } + ], + "description": "This operation signals that no more elements will be enqueued in the\ngiven queue. Subsequent Enqueue(Many) operations will fail.\nSubsequent Dequeue(Many) operations will continue to succeed if\nsufficient elements remain in the queue. Subsequent Dequeue(Many)\noperations that would block will fail immediately.", + "inputs": [ + { + "description": "The handle to a queue.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "Closes the given queue." + } + }, + { + "name": "QueueCloseV2", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, all pending enqueue requests that are\nblocked on the given queue will be canceled.", + "name": "cancel_pending_enqueues", + "type": "boolean" + } + ], + "description": "This operation signals that no more elements will be enqueued in the\ngiven queue. Subsequent Enqueue(Many) operations will fail.\nSubsequent Dequeue(Many) operations will continue to succeed if\nsufficient elements remain in the queue. Subsequent Dequeue(Many)\noperations that would block will fail immediately.", + "inputs": [ + { + "description": "The handle to a queue.", + "name": "handle", + "type": 20 + } + ], + "summary": "Closes the given queue." + } + }, + { + "name": "QueueDequeue", + "schema": { + "attributes": [ + { + "description": "The type of each component in a tuple.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": -1, + "description": "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet.", + "name": "timeout_ms", + "type": "int64" + } + ], + "description": "This operation has k outputs, where k is the number of components\nin the tuples stored in the given queue, and output i is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until an element\nhas been dequeued (or 'timeout_ms' elapses, if specified).", + "inputs": [ + { + "description": "The handle to a queue.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "outputs": [ + { + "description": "One or more tensors that were dequeued as a tuple.", + "name": "components", + "typeListAttr": "component_types" + } + ], + "summary": "Dequeues a tuple of one or more tensors from the given queue." + } + }, + { + "name": "QueueDequeueMany", + "schema": { + "attributes": [ + { + "description": "The type of each component in a tuple.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": -1, + "description": "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet.", + "name": "timeout_ms", + "type": "int64" + } + ], + "description": "If the queue is closed and there are fewer than `n` elements, then an\nOutOfRange error is returned.\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size `n` in the 0th dimension.\n\nThis operation has `k` outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until `n` elements\nhave been dequeued (or 'timeout_ms' elapses, if specified).", + "inputs": [ + { + "description": "The handle to a queue.", + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "description": "The number of tuples to dequeue.", + "name": "n", + "type": 3 + } + ], + "outputs": [ + { + "description": "One or more tensors that were dequeued as a tuple.", + "name": "components", + "typeListAttr": "component_types" + } + ], + "summary": "Dequeues `n` tuples of one or more tensors from the given queue." + } + }, + { + "name": "QueueDequeueManyV2", + "schema": { + "attributes": [ + { + "description": "The type of each component in a tuple.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": -1, + "description": "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet.", + "name": "timeout_ms", + "type": "int64" + } + ], + "description": "If the queue is closed and there are fewer than `n` elements, then an\nOutOfRange error is returned.\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size `n` in the 0th dimension.\n\nThis operation has `k` outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until `n` elements\nhave been dequeued (or 'timeout_ms' elapses, if specified).", + "inputs": [ + { + "description": "The handle to a queue.", + "name": "handle", + "type": 20 + }, + { + "description": "The number of tuples to dequeue.", + "name": "n", + "type": 3 + } + ], + "outputs": [ + { + "description": "One or more tensors that were dequeued as a tuple.", + "name": "components", + "typeListAttr": "component_types" + } + ], + "summary": "Dequeues `n` tuples of one or more tensors from the given queue." + } + }, + { + "name": "QueueDequeueUpTo", + "schema": { + "attributes": [ + { + "description": "The type of each component in a tuple.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": -1, + "description": "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet.", + "name": "timeout_ms", + "type": "int64" + } + ], + "description": "This operation is not supported by all queues. If a queue does not support\nDequeueUpTo, then an Unimplemented error is returned.\n\nIf the queue is closed and there are more than 0 but less than `n`\nelements remaining, then instead of returning an OutOfRange error like\nQueueDequeueMany, less than `n` elements are returned immediately. If\nthe queue is closed and there are 0 elements left in the queue, then\nan OutOfRange error is returned just like in QueueDequeueMany.\nOtherwise the behavior is identical to QueueDequeueMany:\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size `n` in the 0th dimension.\n\nThis operation has k outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple.", + "inputs": [ + { + "description": "The handle to a queue.", + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "description": "The number of tuples to dequeue.", + "name": "n", + "type": 3 + } + ], + "outputs": [ + { + "description": "One or more tensors that were dequeued as a tuple.", + "name": "components", + "typeListAttr": "component_types" + } + ], + "summary": "Dequeues `n` tuples of one or more tensors from the given queue." + } + }, + { + "name": "QueueDequeueUpToV2", + "schema": { + "attributes": [ + { + "description": "The type of each component in a tuple.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": -1, + "description": "If the queue has fewer than n elements, this operation\nwill block for up to timeout_ms milliseconds.\nNote: This option is not supported yet.", + "name": "timeout_ms", + "type": "int64" + } + ], + "description": "This operation is not supported by all queues. If a queue does not support\nDequeueUpTo, then an Unimplemented error is returned.\n\nIf the queue is closed and there are more than 0 but less than `n`\nelements remaining, then instead of returning an OutOfRange error like\nQueueDequeueMany, less than `n` elements are returned immediately. If\nthe queue is closed and there are 0 elements left in the queue, then\nan OutOfRange error is returned just like in QueueDequeueMany.\nOtherwise the behavior is identical to QueueDequeueMany:\n\nThis operation concatenates queue-element component tensors along the\n0th dimension to make a single component tensor. All of the components\nin the dequeued tuple will have size n in the 0th dimension.\n\nThis operation has `k` outputs, where `k` is the number of components in\nthe tuples stored in the given queue, and output `i` is the ith\ncomponent of the dequeued tuple.", + "inputs": [ + { + "description": "The handle to a queue.", + "name": "handle", + "type": 20 + }, + { + "description": "The number of tuples to dequeue.", + "name": "n", + "type": 3 + } + ], + "outputs": [ + { + "description": "One or more tensors that were dequeued as a tuple.", + "name": "components", + "typeListAttr": "component_types" + } + ], + "summary": "Dequeues `n` tuples of one or more tensors from the given queue." + } + }, + { + "name": "QueueDequeueV2", + "schema": { + "attributes": [ + { + "description": "The type of each component in a tuple.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": -1, + "description": "If the queue is empty, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet.", + "name": "timeout_ms", + "type": "int64" + } + ], + "description": "This operation has k outputs, where k is the number of components\nin the tuples stored in the given queue, and output i is the ith\ncomponent of the dequeued tuple.\n\nN.B. If the queue is empty, this operation will block until an element\nhas been dequeued (or 'timeout_ms' elapses, if specified).", + "inputs": [ + { + "description": "The handle to a queue.", + "name": "handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "One or more tensors that were dequeued as a tuple.", + "name": "components", + "typeListAttr": "component_types" + } + ], + "summary": "Dequeues a tuple of one or more tensors from the given queue." + } + }, + { + "name": "QueueEnqueue", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "Tcomponents", + "type": "type[]" + }, + { + "default": -1, + "description": "If the queue is full, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet.", + "name": "timeout_ms", + "type": "int64" + } + ], + "description": "The components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelement has been enqueued (or 'timeout_ms' elapses, if specified).", + "inputs": [ + { + "description": "The handle to a queue.", + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "description": "One or more tensors from which the enqueued tensors should be taken.", + "name": "components", + "typeListAttr": "Tcomponents" + } + ], + "summary": "Enqueues a tuple of one or more tensors in the given queue." + } + }, + { + "name": "QueueEnqueueMany", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "Tcomponents", + "type": "type[]" + }, + { + "default": -1, + "description": "If the queue is too full, this operation will block for up\nto timeout_ms milliseconds.\nNote: This option is not supported yet.", + "name": "timeout_ms", + "type": "int64" + } + ], + "description": "This operation slices each component tensor along the 0th dimension to\nmake multiple queue elements. All of the tuple components must have the\nsame size in the 0th dimension.\n\nThe components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelements have been enqueued (or 'timeout_ms' elapses, if specified).", + "inputs": [ + { + "description": "The handle to a queue.", + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "description": "One or more tensors from which the enqueued tensors should\nbe taken.", + "name": "components", + "typeListAttr": "Tcomponents" + } + ], + "summary": "Enqueues zero or more tuples of one or more tensors in the given queue." + } + }, + { + "name": "QueueEnqueueManyV2", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "Tcomponents", + "type": "type[]" + }, + { + "default": -1, + "description": "If the queue is too full, this operation will block for up\nto timeout_ms milliseconds.\nNote: This option is not supported yet.", + "name": "timeout_ms", + "type": "int64" + } + ], + "description": "This operation slices each component tensor along the 0th dimension to\nmake multiple queue elements. All of the tuple components must have the\nsame size in the 0th dimension.\n\nThe components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelements have been enqueued (or 'timeout_ms' elapses, if specified).", + "inputs": [ + { + "description": "The handle to a queue.", + "name": "handle", + "type": 20 + }, + { + "description": "One or more tensors from which the enqueued tensors should\nbe taken.", + "name": "components", + "typeListAttr": "Tcomponents" + } + ], + "summary": "Enqueues zero or more tuples of one or more tensors in the given queue." + } + }, + { + "name": "QueueEnqueueV2", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "Tcomponents", + "type": "type[]" + }, + { + "default": -1, + "description": "If the queue is full, this operation will block for up to\ntimeout_ms milliseconds.\nNote: This option is not supported yet.", + "name": "timeout_ms", + "type": "int64" + } + ], + "description": "The components input has k elements, which correspond to the components of\ntuples stored in the given queue.\n\nN.B. If the queue is full, this operation will block until the given\nelement has been enqueued (or 'timeout_ms' elapses, if specified).", + "inputs": [ + { + "description": "The handle to a queue.", + "name": "handle", + "type": 20 + }, + { + "description": "One or more tensors from which the enqueued tensors should be taken.", + "name": "components", + "typeListAttr": "Tcomponents" + } + ], + "summary": "Enqueues a tuple of one or more tensors in the given queue." + } + }, + { + "name": "QueueIsClosed", + "schema": { + "description": "This operation returns true if the queue is closed and false if the queue\nis open.", + "inputs": [ + { + "description": "The handle to a queue.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "outputs": [ + { + "name": "is_closed", + "type": 10 + } + ], + "summary": "Returns true if queue is closed." + } + }, + { + "name": "QueueIsClosedV2", + "schema": { + "description": "This operation returns true if the queue is closed and false if the queue\nis open.", + "inputs": [ + { + "description": "The handle to a queue.", + "name": "handle", + "type": 20 + } + ], + "outputs": [ + { + "name": "is_closed", + "type": 10 + } + ], + "summary": "Returns true if queue is closed." + } + }, + { + "name": "QueueSize", + "schema": { + "inputs": [ + { + "description": "The handle to a queue.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "outputs": [ + { + "description": "The number of elements in the given queue.", + "name": "size", + "type": 3 + } + ], + "summary": "Computes the number of elements in the given queue." + } + }, + { + "name": "QueueSizeV2", + "schema": { + "inputs": [ + { + "description": "The handle to a queue.", + "name": "handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "The number of elements in the given queue.", + "name": "size", + "type": 3 + } + ], + "summary": "Computes the number of elements in the given queue." + } + }, + { + "name": "RFFT", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "Treal", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the 1-dimensional discrete Fourier transform of a real-valued signal\nover the inner-most dimension of `input`.\n\nSince the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the\n`fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,\nfollowed by the `fft_length / 2` positive-frequency terms.\n\nAlong the axis `RFFT` is computed on, if `fft_length` is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros.", + "inputs": [ + { + "description": "A float32 tensor.", + "name": "input", + "typeAttr": "Treal" + }, + { + "description": "An int32 tensor of shape [1]. The FFT length.", + "name": "fft_length", + "type": 3 + } + ], + "outputs": [ + { + "description": "A complex64 tensor of the same rank as `input`. The inner-most\n dimension of `input` is replaced with the `fft_length / 2 + 1` unique\n frequency components of its 1D Fourier transform.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfft\n@end_compatibility", + "name": "output", + "typeAttr": "Tcomplex" + } + ], + "summary": "Real-valued fast Fourier transform." + } + }, + { + "name": "RFFT2D", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "Treal", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the 2-dimensional discrete Fourier transform of a real-valued signal\nover the inner-most 2 dimensions of `input`.\n\nSince the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the\n`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension\nof `output`: the zero-frequency term, followed by the `fft_length / 2`\npositive-frequency terms.\n\nAlong each axis `RFFT2D` is computed on, if `fft_length` is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros.", + "inputs": [ + { + "description": "A float32 tensor.", + "name": "input", + "typeAttr": "Treal" + }, + { + "description": "An int32 tensor of shape [2]. The FFT length for each dimension.", + "name": "fft_length", + "type": 3 + } + ], + "outputs": [ + { + "description": "A complex64 tensor of the same rank as `input`. The inner-most 2\n dimensions of `input` are replaced with their 2D Fourier transform. The\n inner-most dimension contains `fft_length / 2 + 1` unique frequency\n components.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfft2\n@end_compatibility", + "name": "output", + "typeAttr": "Tcomplex" + } + ], + "summary": "2D real-valued fast Fourier transform." + } + }, + { + "name": "RFFT3D", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "Treal", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "Tcomplex", + "type": "type" + } + ], + "description": "Computes the 3-dimensional discrete Fourier transform of a real-valued signal\nover the inner-most 3 dimensions of `input`.\n\nSince the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the\n`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension\nof `output`: the zero-frequency term, followed by the `fft_length / 2`\npositive-frequency terms.\n\nAlong each axis `RFFT3D` is computed on, if `fft_length` is smaller than the\ncorresponding dimension of `input`, the dimension is cropped. If it is larger,\nthe dimension is padded with zeros.", + "inputs": [ + { + "description": "A float32 tensor.", + "name": "input", + "typeAttr": "Treal" + }, + { + "description": "An int32 tensor of shape [3]. The FFT length for each dimension.", + "name": "fft_length", + "type": 3 + } + ], + "outputs": [ + { + "description": "A complex64 tensor of the same rank as `input`. The inner-most 3\n dimensions of `input` are replaced with the their 3D Fourier transform. The\n inner-most dimension contains `fft_length / 2 + 1` unique frequency\n components.\n\n@compatibility(numpy)\nEquivalent to np.fft.rfftn with 3 dimensions.\n@end_compatibility", + "name": "output", + "typeAttr": "Tcomplex" + } + ], + "summary": "3D real-valued fast Fourier transform." + } + }, + { + "name": "RGBToHSV", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "Outputs a tensor of the same shape as the `images` tensor, containing the HSV\nvalue of the pixels. The output is only well defined if the value in `images`\nare in `[0,1]`.\n\n`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and\n`output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0\ncorresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.\n\nUsage Example:\n\n>>> blue_image = tf.stack([\n... tf.zeros([5,5]),\n... tf.zeros([5,5]),\n... tf.ones([5,5])],\n... axis=-1)\n>>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image)\n>>> blue_hsv_image[0,0].numpy()\narray([0.6666667, 1. , 1. ], dtype=float32)\n", + "inputs": [ + { + "description": "1-D or higher rank. RGB data to convert. Last dimension must be size 3.", + "name": "images", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "`images` converted to HSV.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Converts one or more images from RGB to HSV." + } + }, + { + "name": "RaggedBincount", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "bool; Whether the kernel should count the appearance or number of occurrences.", + "name": "binary_output", + "type": "boolean" + } + ], + "description": "Outputs a vector with length `size` and the same dtype as `weights`. If\n`weights` are empty, then index `i` stores the number of times the value `i` is\ncounted in `arr`. If `weights` are non-empty, then index `i` stores the sum of\nthe value in `weights` at each index where the corresponding value in `arr` is\n`i`.\n\nValues in `arr` outside of the range [0, size) are ignored.", + "inputs": [ + { + "description": "1D int64 `Tensor`.", + "name": "splits", + "type": 9 + }, + { + "description": "2D int `Tensor`.", + "name": "values", + "typeAttr": "Tidx" + }, + { + "description": "non-negative int scalar `Tensor`.", + "name": "size", + "typeAttr": "Tidx" + }, + { + "description": "is an int32, int64, float32, or float64 `Tensor` with the same\nshape as `input`, or a length-0 `Tensor`, in which case it acts as all weights\nequal to 1.", + "name": "weights", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].\nThe counts or summed weights for each value in the range [0, size).", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Counts the number of occurrences of each value in an integer array." + } + }, + { + "name": "RaggedCountSparseOutput", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": -1, + "description": "Minimum value to count. Can be set to -1 for no minimum.", + "minimum": -1, + "name": "minlength", + "type": "int64" + }, + { + "default": -1, + "description": "Maximum value to count. Can be set to -1 for no maximum.", + "minimum": -1, + "name": "maxlength", + "type": "int64" + }, + { + "description": "Whether to output the number of occurrences of each value or 1.", + "name": "binary_output", + "type": "boolean" + }, + { + "description": "Dtype of the output values tensor. Must be one of the following: `int32`, `int64`, `float32`, `float64`.", + "name": "output_type", + "type": "type" + } + ], + "description": " Counts the number of times each value occurs in the input.", + "inputs": [ + { + "description": "Tensor containing the row splits of the ragged tensor to count.", + "name": "splits", + "type": 9 + }, + { + "description": "Tensor containing values of the sparse tensor to count.", + "name": "values", + "typeAttr": "T" + }, + { + "description": "A Tensor of the same shape as indices containing per-index weight values.\nMay also be the empty tensor if no weights are used.", + "name": "weights", + "typeAttr": "output_type" + } + ], + "outputs": [ + { + "description": "Indices tensor for the resulting sparse tensor object.", + "name": "output_indices", + "type": 9 + }, + { + "description": "Values tensor for the resulting sparse tensor object.", + "name": "output_values", + "typeAttr": "output_type" + }, + { + "description": "Shape tensor for the resulting sparse tensor object.\n END\n }\n attr {\n name: \"T\"\n description: <\n```\n\nThe input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.\nThe vector inputs must all have the same size. Scalar inputs are broadcast\nto match the size of the vector inputs.", + "inputs": [ + { + "description": "The starts of each range.", + "name": "starts", + "typeAttr": "T" + }, + { + "description": "The limits of each range.", + "name": "limits", + "typeAttr": "T" + }, + { + "description": "The deltas of each range.", + "name": "deltas", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The `row_splits` for the returned `RaggedTensor`.", + "name": "rt_nested_splits", + "typeAttr": "Tsplits" + }, + { + "description": "The `flat_values` for the returned `RaggedTensor`.", + "name": "rt_dense_values", + "typeAttr": "T" + } + ], + "summary": "Returns a `RaggedTensor` containing the specified sequences of numbers." + } + }, + { + "name": "RaggedTensorFromVariant", + "schema": { + "attributes": [ + { + "description": "The ragged rank of each encoded `RaggedTensor` component in the input. If set to\n-1, this is inferred as `output_ragged_rank` - `rank(encoded_ragged)`", + "minimum": -1, + "name": "input_ragged_rank", + "type": "int64" + }, + { + "description": "The expected ragged rank of the output `RaggedTensor`. The following must hold:\n`output_ragged_rank = rank(encoded_ragged) + input_ragged_rank`.", + "minimum": 0, + "name": "output_ragged_rank", + "type": "int64" + }, + { + "name": "Tvalues", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsplits", + "type": "type" + } + ], + "description": "Decodes the given `variant` Tensor and returns a `RaggedTensor`. The input\ncould be a scalar, meaning it encodes a single `RaggedTensor` with ragged_rank\n`output_ragged_rank`. It could also have an arbitrary rank, in which case each\nelement is decoded into a `RaggedTensor` with ragged_rank `input_ragged_rank`\nand these are then stacked according to the input shape to output a single\n`RaggedTensor` with ragged_rank `output_ragged_rank`. Each `variant` element in\nthe input Tensor is decoded by retrieving from the element a 1-D `variant`\nTensor with `input_ragged_rank + 1` Tensors, corresponding to the splits and\nvalues of the decoded `RaggedTensor`. If `input_ragged_rank` is -1, then it is\ninferred as `output_ragged_rank` - `rank(encoded_ragged)`. See\n`RaggedTensorToVariant` for the corresponding encoding logic.\n", + "inputs": [ + { + "description": "A `variant` Tensor containing encoded `RaggedTensor`s.", + "name": "encoded_ragged", + "type": 21 + } + ], + "outputs": [ + { + "description": "A list of one or more Tensors representing the splits of the output\n`RaggedTensor`.", + "name": "output_nested_splits", + "numberAttr": "output_ragged_rank", + "typeAttr": "Tsplits" + }, + { + "description": "A Tensor representing the values of the output `RaggedTensor`.", + "name": "output_dense_values", + "typeAttr": "Tvalues" + } + ], + "summary": "Decodes a `variant` Tensor into a `RaggedTensor`." + } + }, + { + "name": "RaggedTensorToSparse", + "schema": { + "attributes": [ + { + "description": "The ragged rank of the input RaggedTensor. `rt_nested_splits` should contain\nthis number of ragged-splits tensors. This value should equal\n`input.ragged_rank`.", + "minimum": 1, + "name": "RAGGED_RANK", + "type": "int64" + }, + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsplits", + "type": "type" + } + ], + "description": "input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits)\noutput=SparseTensor(indices=sparse_indices, values=sparse_values,\n dense_shape=sparse_dense_shape)", + "inputs": [ + { + "description": "The `row_splits` for the `RaggedTensor`.", + "name": "rt_nested_splits", + "numberAttr": "RAGGED_RANK", + "typeAttr": "Tsplits" + }, + { + "description": "The `flat_values` for the `RaggedTensor`.", + "name": "rt_dense_values", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The indices for the `SparseTensor`.", + "name": "sparse_indices", + "type": 9 + }, + { + "description": "The values of the `SparseTensor`.", + "name": "sparse_values", + "typeAttr": "T" + }, + { + "description": "`sparse_dense_shape` is a tight bounding box of the input `RaggedTensor`.", + "name": "sparse_dense_shape", + "type": 9 + } + ], + "summary": "Converts a `RaggedTensor` into a `SparseTensor` with the same values." + } + }, + { + "name": "RaggedTensorToTensor", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int64`, `int32`.", + "name": "Tindex", + "type": "type" + }, + { + "description": "Must be one of the following: `int64`, `int32`.", + "name": "Tshape", + "type": "type" + }, + { + "minimum": 1, + "name": "num_row_partition_tensors", + "type": "int64" + }, + { + "description": "The types of the row partition tensors. At present, these can be:\n* \"ROW_SPLITS\": the row_splits tensor from the ragged tensor.\n* \"VALUE_ROWIDS\": the value_rowids tensor from the ragged tensor.\n* \"FIRST_DIM_SIZE\": if value_rowids is used for the first dimension, then it\n is preceeded by \"FIRST_DIM_SIZE\".\nThe tensors are in the order of the dimensions.", + "name": "row_partition_types", + "type": "string[]" + } + ], + "description": "The `ragged_to_dense` op creates a dense tensor from a list of row partition\ntensors, a value vector, and default values. If the shape is unspecified, the\nminimal shape required to contain all the elements in the ragged tensor (the\nnatural shape) will be used. If some dimensions are left unspecified, then the\nsize of the natural shape is used in that dimension.\n\nThe default_value will be broadcast to the output shape. After that, the values\nfrom the ragged tensor overwrite the default values. Note that the default_value\nmust have less dimensions than the value.\n\nThe row partition tensors are in the order of the dimensions.\nAt present, the types can be:\n* \"ROW_SPLITS\": the row_splits tensor from the ragged tensor.\n* \"VALUE_ROWIDS\": the value_rowids tensor from the ragged tensor.\n* \"FIRST_DIM_SIZE\": if value_rowids is used for the first dimension, then it\n is preceded by \"FIRST_DIM_SIZE\".", + "inputs": [ + { + "description": "The desired shape of the the output tensor. If left unspecified (empty),\nthe minimal shape required to contain all the elements in the ragged tensor\n(the natural shape) will be used. If some dimensions are left unspecified, then\nthe size of the natural shape is used in that dimension.\n\nNote that dense dimensions cannot be modified by the shape argument. Trying to\nchange the size of a dense dimension will cause the op to fail.\nExamples:\nnatural shape: [4, 5, 6]\nshape: -1\noutput shape: [4, 5, 6]\n\nnatural shape: [4, 5, 6]\nshape: [3, -1, 2]\noutput shape: [3, 5, 2]\n\nnatural shape: [4, 5, 6]\nshape: [3, 7, 2]\noutput shape: [3, 7, 2]\n", + "name": "shape", + "typeAttr": "Tshape" + }, + { + "description": "A 1D tensor representing the values of the ragged tensor.", + "name": "values", + "typeAttr": "T" + }, + { + "description": "The default_value when the shape is larger than the ragged tensor. The\ndefault_value is broadcast until it is the shape of the output tensor, and\nthen overwritten by values in the ragged tensor. The default value must be\ncompatible with this broadcast operation, and must have fewer dimensions than\nthe value tensor.", + "name": "default_value", + "typeAttr": "T" + }, + { + "name": "row_partition_tensors", + "numberAttr": "num_row_partition_tensors", + "typeAttr": "Tindex" + } + ], + "outputs": [ + { + "description": "The resulting dense tensor.", + "name": "result", + "typeAttr": "T" + } + ], + "summary": "Create a dense tensor from a ragged tensor, possibly altering its shape." + } + }, + { + "name": "RaggedTensorToVariant", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "RAGGED_RANK", + "type": "int64" + }, + { + "name": "Tvalues", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsplits", + "type": "type" + }, + { + "description": "A `bool` denoting whether the input is a batched `RaggedTensor`.", + "name": "batched_input", + "type": "boolean" + } + ], + "description": "\nEncodes the given `RaggedTensor` and returns a `variant` Tensor. If\n`batched_input` is True, then input `RaggedTensor` is unbatched along the\nzero-th dimension, each component `RaggedTensor` is encoded into a scalar\n`variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.\nIf `batched_input` is False, then the input `RaggedTensor` is encoded as is and\na scalar `variant` Tensor is returned. A `RaggedTensor` is encoded by first\ncreating a 1-D `variant` Tensor with `ragged_rank + 1` elements, containing the\nsplits and values Tensors of the `RaggedTensor`. Then the 1-D `variant` Tensor\nis wrapped in a scalar `variant` Tensor. See `RaggedTensorFromVariant` for the\ncorresponding decoding logic.\n", + "inputs": [ + { + "description": "A list of one or more Tensors representing the splits of the input\n`RaggedTensor`.", + "name": "rt_nested_splits", + "numberAttr": "RAGGED_RANK", + "typeAttr": "Tsplits" + }, + { + "description": "A Tensor representing the values of the input `RaggedTensor`.", + "name": "rt_dense_values", + "typeAttr": "Tvalues" + } + ], + "outputs": [ + { + "description": "A `variant` Tensor that containing encoded `RaggedTensor`.", + "name": "encoded_ragged", + "type": 21 + } + ], + "summary": "Encodes a `RaggedTensor` into a `variant` Tensor." + } + }, + { + "name": "RandomCrop", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `uint8`, `int8`, `int16`, `int32`, `int64`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": 0, + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "An second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + } + ], + "description": "`size` is a 1-D int64 tensor with 2 elements representing the crop height and\nwidth. The values must be non negative.\n\nThis Op picks a random location in `image` and crops a `height` by `width`\nrectangle from that location. The random location is picked so the cropped\narea will fit inside the original image.", + "inputs": [ + { + "description": "3-D of shape `[height, width, channels]`.", + "name": "image", + "typeAttr": "T" + }, + { + "description": "1-D of length 2 containing: `crop_height`, `crop_width`..", + "name": "size", + "type": 9 + } + ], + "outputs": [ + { + "description": "3-D of shape `[crop_height, crop_width, channels].`", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Randomly crop `image`." + } + }, + { + "name": "RandomDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "Creates a Dataset that returns a stream of uniformly distributed\npseudorandom 64-bit signed integers.\n\nIn the TensorFlow Python API, you can instantiate this dataset via the\nclass `tf.data.experimental.RandomDataset`.\n\nInstances of this dataset are also created as a result of the\n`hoist_random_uniform` static optimization. Whether this optimization is\nperformed is determined by the `experimental_optimization.hoist_random_uniform`\noption of `tf.data.Options`.", + "inputs": [ + { + "description": "A scalar seed for the random number generator. If either seed or\nseed2 is set to be non-zero, the random number generator is seeded\nby the given seed. Otherwise, a random seed is used.", + "name": "seed", + "type": 9 + }, + { + "description": "A second scalar seed to avoid seed collision.", + "name": "seed2", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a Dataset that returns pseudorandom numbers." + } + }, + { + "name": "RandomGamma", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "S", + "type": "type" + }, + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "This op uses the algorithm by Marsaglia et al. to acquire samples via\ntransformation-rejection from pairs of uniform and normal random variables.\nSee http://dl.acm.org/citation.cfm?id=358414", + "inputs": [ + { + "description": "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in alpha.", + "name": "shape", + "typeAttr": "S" + }, + { + "description": "A tensor in which each scalar is a \"shape\" parameter describing the\nassociated gamma distribution.", + "name": "alpha", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A tensor with shape `shape + shape(alpha)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Outputs random values from the Gamma distribution(s) described by alpha." + } + }, + { + "name": "RandomGammaGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "alpha", + "typeAttr": "T" + }, + { + "name": "sample", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the derivative of a Gamma random sample w.r.t. `alpha`." + } + }, + { + "name": "RandomPoisson", + "schema": { + "attributes": [ + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "S", + "type": "type" + }, + { + "description": "Must be one of the following: `float16`, `float32`, `float64`.", + "name": "dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "shape", + "typeAttr": "S" + }, + { + "name": "rate", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Use RandomPoissonV2 instead." + } + }, + { + "name": "RandomPoissonV2", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "S", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 2 + }, + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "R", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "dtype", + "type": "type" + } + ], + "description": "This op uses two algorithms, depending on rate. If rate >= 10, then\nthe algorithm by Hormann is used to acquire samples via\ntransformation-rejection.\nSee http://www.sciencedirect.com/science/article/pii/0167668793909974.\n\nOtherwise, Knuth's algorithm is used to acquire samples via multiplying uniform\nrandom variables.\nSee Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer\nProgramming, Volume 2. Addison Wesley", + "inputs": [ + { + "description": "1-D integer tensor. Shape of independent samples to draw from each\ndistribution described by the shape parameters given in rate.", + "name": "shape", + "typeAttr": "S" + }, + { + "description": "A tensor in which each scalar is a \"rate\" parameter describing the\nassociated poisson distribution.", + "name": "rate", + "typeAttr": "R" + } + ], + "outputs": [ + { + "description": "A tensor with shape `shape + shape(rate)`. Each slice\n`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for\n`rate[i0, i1, ...iN]`.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs random values from the Poisson distribution(s) described by rate." + } + }, + { + "name": "RandomShuffle", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "name": "T", + "type": "type" + } + ], + "description": " The tensor is shuffled along dimension 0, such that each `value[j]` is mapped\n to one and only one `output[i]`. For example, a mapping that might occur for a\n 3x2 tensor is:\n\n```\n[[1, 2], [[5, 6],\n [3, 4], ==> [1, 2],\n [5, 6]] [3, 4]]\n```", + "inputs": [ + { + "description": "The tensor to be shuffled.", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A tensor of same shape and type as `value`, shuffled along its first\ndimension.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Randomly shuffles a tensor along its first dimension." + } + }, + { + "name": "RandomShuffleQueue", + "schema": { + "attributes": [ + { + "description": "The type of each component in a value.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": [], + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0, + "name": "shapes", + "type": "shape[]" + }, + { + "default": -1, + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "description": "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements.", + "name": "min_after_dequeue", + "type": "int64" + }, + { + "default": 0, + "description": "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "description": "The handle to the queue.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "A queue that randomizes the order of elements." + } + }, + { + "name": "RandomShuffleQueueV2", + "schema": { + "attributes": [ + { + "description": "The type of each component in a value.", + "minimum": 1, + "name": "component_types", + "type": "type[]" + }, + { + "default": [], + "description": "The shape of each component in a value. The length of this attr must\nbe either 0 or the same as the length of component_types. If the length of\nthis attr is 0, the shapes of queue elements are not constrained, and\nonly one element may be dequeued at a time.", + "minimum": 0, + "name": "shapes", + "type": "shape[]" + }, + { + "default": -1, + "description": "The upper bound on the number of elements in this queue.\nNegative numbers mean no limit.", + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "description": "Dequeue will block unless there would be this\nmany elements after the dequeue or the queue is closed. This\nensures a minimum level of mixing of elements.", + "name": "min_after_dequeue", + "type": "int64" + }, + { + "default": 0, + "description": "If either seed or seed2 is set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, a random seed is used.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this queue is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this queue will be shared under the given name\nacross multiple sessions.", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "description": "The handle to the queue.", + "name": "handle", + "type": 20 + } + ], + "summary": "A queue that randomizes the order of elements." + } + }, + { + "name": "RandomStandardNormal", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "The generated values will have mean 0 and standard deviation 1.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A tensor of the specified shape filled with random normal values.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs random values from a normal distribution." + } + }, + { + "name": "RandomUniform", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "The generated values follow a uniform distribution in the range `[0, 1)`. The\nlower bound 0 is included in the range, while the upper bound 1 is excluded.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A tensor of the specified shape filled with uniform random values.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs random values from a uniform distribution." + } + }, + { + "name": "RandomUniformInt", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tout", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "The generated values are uniform integers in the range `[minval, maxval)`.\nThe lower bound `minval` is included in the range, while the upper bound\n`maxval` is excluded.\n\nThe random integers are slightly biased unless `maxval - minval` is an exact\npower of two. The bias is small for values of `maxval - minval` significantly\nsmaller than the range of the output (either `2^32` or `2^64`).", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "T" + }, + { + "description": "0-D. Inclusive lower bound on the generated integers.", + "name": "minval", + "typeAttr": "Tout" + }, + { + "description": "0-D. Exclusive upper bound on the generated integers.", + "name": "maxval", + "typeAttr": "Tout" + } + ], + "outputs": [ + { + "description": "A tensor of the specified shape filled with uniform random integers.", + "name": "output", + "typeAttr": "Tout" + } + ], + "summary": "Outputs random integers from a uniform distribution." + } + }, + { + "name": "Range", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "This operation creates a sequence of numbers that begins at `start` and\nextends by increments of `delta` up to but not including `limit`.\n\nFor example:\n\n```\n# 'start' is 3\n# 'limit' is 18\n# 'delta' is 3\ntf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]\n```", + "inputs": [ + { + "description": "0-D (scalar). First entry in the sequence.", + "name": "start", + "typeAttr": "Tidx" + }, + { + "description": "0-D (scalar). Upper limit of sequence, exclusive.", + "name": "limit", + "typeAttr": "Tidx" + }, + { + "description": "0-D (scalar). Optional. Default is 1. Number that increments `start`.", + "name": "delta", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "1-D.", + "name": "output", + "typeAttr": "Tidx" + } + ], + "summary": "Creates a sequence of numbers." + } + }, + { + "name": "RangeDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "corresponds to start in python's xrange().", + "name": "start", + "type": 9 + }, + { + "description": "corresponds to stop in python's xrange().", + "name": "stop", + "type": 9 + }, + { + "description": "corresponds to step in python's xrange().", + "name": "step", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset with a range of values. Corresponds to python's xrange." + } + }, + { + "name": "Rank", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "This operation returns an integer representing the rank of `input`.\n\nFor example:\n\n```\n# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\n# shape of tensor 't' is [2, 2, 3]\nrank(t) ==> 3\n```\n\n**Note**: The rank of a tensor is not the same as the rank of a matrix. The rank\nof a tensor is the number of indices required to uniquely select each element\nof the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": 3 + } + ], + "summary": "Returns the rank of a tensor." + } + }, + { + "name": "ReadFile", + "schema": { + "inputs": [ + { + "name": "filename", + "type": 7 + } + ], + "outputs": [ + { + "name": "contents", + "type": 7 + } + ], + "summary": "Reads and outputs the entire contents of the input filename." + } + }, + { + "name": "ReadVariableOp", + "schema": { + "attributes": [ + { + "description": "the dtype of the value.", + "name": "dtype", + "type": "type" + } + ], + "description": "The tensor returned by this operation is immutable.\n\nThe value returned by this operation is guaranteed to be influenced by all the\nwrites on which this operation depends directly or indirectly, and to not be\ninfluenced by any of the writes which depend directly or indirectly on this\noperation.", + "inputs": [ + { + "description": "handle to the resource in which to store the variable.", + "name": "resource", + "type": 20 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ], + "summary": "Reads the value of a variable." + } + }, + { + "name": "ReaderNumRecordsProduced", + "schema": { + "description": "This is the same as the number of ReaderRead executions that have\nsucceeded.", + "inputs": [ + { + "description": "Handle to a Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + } + ], + "outputs": [ + { + "name": "records_produced", + "type": 9 + } + ], + "summary": "Returns the number of records this Reader has produced." + } + }, + { + "name": "ReaderNumRecordsProducedV2", + "schema": { + "description": "This is the same as the number of ReaderRead executions that have\nsucceeded.", + "inputs": [ + { + "description": "Handle to a Reader.", + "name": "reader_handle", + "type": 20 + } + ], + "outputs": [ + { + "name": "records_produced", + "type": 9 + } + ], + "summary": "Returns the number of records this Reader has produced." + } + }, + { + "name": "ReaderNumWorkUnitsCompleted", + "schema": { + "inputs": [ + { + "description": "Handle to a Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + } + ], + "outputs": [ + { + "name": "units_completed", + "type": 9 + } + ], + "summary": "Returns the number of work units this Reader has finished processing." + } + }, + { + "name": "ReaderNumWorkUnitsCompletedV2", + "schema": { + "inputs": [ + { + "description": "Handle to a Reader.", + "name": "reader_handle", + "type": 20 + } + ], + "outputs": [ + { + "name": "units_completed", + "type": 9 + } + ], + "summary": "Returns the number of work units this Reader has finished processing." + } + }, + { + "name": "ReaderRead", + "schema": { + "description": "Will dequeue from the input queue if necessary (e.g. when the\nReader needs to start reading from a new file since it has finished\nwith the previous file).", + "inputs": [ + { + "description": "Handle to a Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + }, + { + "description": "Handle to a Queue, with string work items.", + "isRef": true, + "name": "queue_handle", + "type": 7 + } + ], + "outputs": [ + { + "description": "A scalar.", + "name": "key", + "type": 7 + }, + { + "description": "A scalar.", + "name": "value", + "type": 7 + } + ], + "summary": "Returns the next record (key, value pair) produced by a Reader." + } + }, + { + "name": "ReaderReadUpTo", + "schema": { + "description": "Will dequeue from the input queue if necessary (e.g. when the\nReader needs to start reading from a new file since it has finished\nwith the previous file).\nIt may return less than `num_records` even before the last batch.", + "inputs": [ + { + "description": "Handle to a `Reader`.", + "isRef": true, + "name": "reader_handle", + "type": 7 + }, + { + "description": "Handle to a `Queue`, with string work items.", + "isRef": true, + "name": "queue_handle", + "type": 7 + }, + { + "description": "number of records to read from `Reader`.", + "name": "num_records", + "type": 9 + } + ], + "outputs": [ + { + "description": "A 1-D tensor.", + "name": "keys", + "type": 7 + }, + { + "description": "A 1-D tensor.", + "name": "values", + "type": 7 + } + ], + "summary": "Returns up to `num_records` (key, value) pairs produced by a Reader." + } + }, + { + "name": "ReaderReadUpToV2", + "schema": { + "description": "Will dequeue from the input queue if necessary (e.g. when the\nReader needs to start reading from a new file since it has finished\nwith the previous file).\nIt may return less than `num_records` even before the last batch.", + "inputs": [ + { + "description": "Handle to a `Reader`.", + "name": "reader_handle", + "type": 20 + }, + { + "description": "Handle to a `Queue`, with string work items.", + "name": "queue_handle", + "type": 20 + }, + { + "description": "number of records to read from `Reader`.", + "name": "num_records", + "type": 9 + } + ], + "outputs": [ + { + "description": "A 1-D tensor.", + "name": "keys", + "type": 7 + }, + { + "description": "A 1-D tensor.", + "name": "values", + "type": 7 + } + ], + "summary": "Returns up to `num_records` (key, value) pairs produced by a Reader." + } + }, + { + "name": "ReaderReadV2", + "schema": { + "description": "Will dequeue from the input queue if necessary (e.g. when the\nReader needs to start reading from a new file since it has finished\nwith the previous file).", + "inputs": [ + { + "description": "Handle to a Reader.", + "name": "reader_handle", + "type": 20 + }, + { + "description": "Handle to a Queue, with string work items.", + "name": "queue_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "A scalar.", + "name": "key", + "type": 7 + }, + { + "description": "A scalar.", + "name": "value", + "type": 7 + } + ], + "summary": "Returns the next record (key, value pair) produced by a Reader." + } + }, + { + "name": "ReaderReset", + "schema": { + "inputs": [ + { + "description": "Handle to a Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + } + ], + "summary": "Restore a Reader to its initial clean state." + } + }, + { + "name": "ReaderResetV2", + "schema": { + "inputs": [ + { + "description": "Handle to a Reader.", + "name": "reader_handle", + "type": 20 + } + ], + "summary": "Restore a Reader to its initial clean state." + } + }, + { + "name": "ReaderRestoreState", + "schema": { + "description": "Not all Readers support being restored, so this can produce an\nUnimplemented error.", + "inputs": [ + { + "description": "Handle to a Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + }, + { + "description": "Result of a ReaderSerializeState of a Reader with type\nmatching reader_handle.", + "name": "state", + "type": 7 + } + ], + "summary": "Restore a reader to a previously saved state." + } + }, + { + "name": "ReaderRestoreStateV2", + "schema": { + "description": "Not all Readers support being restored, so this can produce an\nUnimplemented error.", + "inputs": [ + { + "description": "Handle to a Reader.", + "name": "reader_handle", + "type": 20 + }, + { + "description": "Result of a ReaderSerializeState of a Reader with type\nmatching reader_handle.", + "name": "state", + "type": 7 + } + ], + "summary": "Restore a reader to a previously saved state." + } + }, + { + "name": "ReaderSerializeState", + "schema": { + "description": "Not all Readers support being serialized, so this can produce an\nUnimplemented error.", + "inputs": [ + { + "description": "Handle to a Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + } + ], + "outputs": [ + { + "name": "state", + "type": 7 + } + ], + "summary": "Produce a string tensor that encodes the state of a Reader." + } + }, + { + "name": "ReaderSerializeStateV2", + "schema": { + "description": "Not all Readers support being serialized, so this can produce an\nUnimplemented error.", + "inputs": [ + { + "description": "Handle to a Reader.", + "name": "reader_handle", + "type": 20 + } + ], + "outputs": [ + { + "name": "state", + "type": 7 + } + ], + "summary": "Produce a string tensor that encodes the state of a Reader." + } + }, + { + "name": "Real", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 8 + }, + "description": "Must be one of the following: `complex64`, `complex128`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`.", + "name": "Tout", + "type": "type" + } + ], + "description": "Given a tensor `input` of complex numbers, this operation returns a tensor of\ntype `float` that is the real part of each element in `input`. All elements in\n`input` must be complex numbers of the form \\\\(a + bj\\\\), where *a* is the real\n part returned by this operation and *b* is the imaginary part.\n\nFor example:\n\n```\n# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]\ntf.real(input) ==> [-2.25, 3.25]\n```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "Tout" + } + ], + "summary": "Returns the real part of a complex number." + } + }, + { + "name": "RealDiv", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "If `x` and `y` are reals, this will return the floating-point division.\n\n*NOTE*: `Div` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns x / y element-wise for real types." + } + }, + { + "name": "RebatchDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": true, + "name": "use_fallback", + "type": "boolean" + } + ], + "description": "Creates a dataset that changes the batch size of the dataset to current batch\nsize // num_workers.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of replicas to distribute this batch across. As\na result of this transformation the current batch size would end up being\ndivided by this parameter.", + "name": "num_replicas", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that changes the batch size." + } + }, + { + "name": "Reciprocal", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "I.e., \\\\(y = 1 / x\\\\).", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes the reciprocal of x element-wise." + } + }, + { + "name": "ReciprocalGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`\nis the corresponding input gradient.", + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient for the inverse of `x` wrt its input." + } + }, + { + "name": "RecordInput", + "schema": { + "attributes": [ + { + "description": "Glob pattern for the data files.", + "name": "file_pattern", + "type": "string" + }, + { + "default": 301, + "description": "Random seeds used to produce randomized records.", + "name": "file_random_seed", + "type": "int64" + }, + { + "default": 0.0, + "description": "Shifts the list of files after the list is randomly\nshuffled.", + "name": "file_shuffle_shift_ratio", + "type": "float32" + }, + { + "default": 10000, + "description": "The randomization shuffling buffer.", + "name": "file_buffer_size", + "type": "int64" + }, + { + "default": 16, + "description": "How many sstables are opened and concurrently iterated over.", + "name": "file_parallelism", + "type": "int64" + }, + { + "default": 32, + "description": "The batch size.", + "name": "batch_size", + "type": "int64" + }, + { + "default": "", + "description": "The type of compression for the file. Currently ZLIB and\nGZIP are supported. Defaults to none.", + "name": "compression_type", + "type": "string" + } + ], + "outputs": [ + { + "description": "A tensor of shape [batch_size].", + "name": "records", + "type": 7 + } + ], + "summary": "Emits randomized records." + } + }, + { + "name": "Recv", + "schema": { + "attributes": [ + { + "name": "tensor_type", + "type": "type" + }, + { + "description": "The name of the tensor to receive.", + "name": "tensor_name", + "type": "string" + }, + { + "description": "The name of the device sending the tensor.", + "name": "send_device", + "type": "string" + }, + { + "description": "The current incarnation of send_device.", + "name": "send_device_incarnation", + "type": "int64" + }, + { + "description": "The name of the device receiving the tensor.", + "name": "recv_device", + "type": "string" + }, + { + "default": false, + "description": "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller.", + "name": "client_terminated", + "type": "boolean" + } + ], + "outputs": [ + { + "description": "The tensor to receive.", + "name": "tensor", + "typeAttr": "tensor_type" + } + ], + "summary": "Receives the named tensor from send_device on recv_device." + } + }, + { + "name": "RecvTPUEmbeddingActivations", + "schema": { + "attributes": [ + { + "description": "The number of output activation tensors, equal to the number of\nembedding tables in the model.", + "minimum": 1, + "name": "num_outputs", + "type": "int64" + }, + { + "description": "Serialized TPUEmbeddingConfiguration proto.", + "name": "config", + "type": "string" + } + ], + "description": "The TPU system performs the embedding lookups and aggregations specified by\nthe arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The\nresults of these aggregations are visible to the Tensorflow Graph as the\noutputs of a RecvTPUEmbeddingActivations op. This op returns a list containing\none Tensor of activations per table specified in the model. There can be at\nmost one RecvTPUEmbeddingActivations op in the TPU graph.", + "outputs": [ + { + "description": "A TensorList of embedding activations containing one Tensor per\nembedding table in the model.", + "name": "outputs", + "numberAttr": "num_outputs", + "type": 1 + } + ], + "summary": "An op that receives embedding activations on the TPU." + } + }, + { + "name": "ReduceDataset", + "schema": { + "attributes": [ + { + "description": "A function that maps `(old_state, input_element)` to `new_state`. It must take\ntwo arguments and return a nested structures of tensors. The structure of\n`new_state` must match the structure of `initial_state`.", + "name": "f", + "type": "function" + }, + { + "minimum": 1, + "name": "Tstate", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": true, + "name": "use_inter_op_parallelism", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "A nested structure of tensors, representing the initial state of the\ntransformation.", + "name": "initial_state", + "typeListAttr": "Tstate" + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "components", + "typeListAttr": "output_types" + } + ], + "summary": "Reduces the input dataset to a singleton using a reduce function." + } + }, + { + "name": "ReduceJoin", + "schema": { + "attributes": [ + { + "default": false, + "description": "If `True`, retain reduced dimensions with length `1`.", + "name": "keep_dims", + "type": "boolean" + }, + { + "default": "", + "description": "The separator to use when joining.", + "name": "separator", + "type": "string" + } + ], + "description": "Computes the string join across dimensions in the given string Tensor of shape\n`[\\\\(d_0, d_1, ..., d_{n-1}\\\\)]`. Returns a new Tensor created by joining the input\nstrings with the given separator (default: empty string). Negative indices are\ncounted backwards from the end, with `-1` being equivalent to `n - 1`. If\nindices are not specified, joins across all dimensions beginning from `n - 1`\nthrough `0`.\n\nFor example:\n\n```python\n# tensor `a` is [[\"a\", \"b\"], [\"c\", \"d\"]]\ntf.reduce_join(a, 0) ==> [\"ac\", \"bd\"]\ntf.reduce_join(a, 1) ==> [\"ab\", \"cd\"]\ntf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> [\"ac\", \"bd\"]\ntf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> [\"ab\", \"cd\"]\ntf.reduce_join(a, 0, keep_dims=True) ==> [[\"ac\", \"bd\"]]\ntf.reduce_join(a, 1, keep_dims=True) ==> [[\"ab\"], [\"cd\"]]\ntf.reduce_join(a, 0, separator=\".\") ==> [\"a.c\", \"b.d\"]\ntf.reduce_join(a, [0, 1]) ==> \"acbd\"\ntf.reduce_join(a, [1, 0]) ==> \"abcd\"\ntf.reduce_join(a, []) ==> [[\"a\", \"b\"], [\"c\", \"d\"]]\ntf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> \"abcd\"\n```", + "inputs": [ + { + "description": "The input to be joined. All reduced indices must have non-zero size.", + "name": "inputs", + "type": 7 + }, + { + "description": "The dimensions to reduce over. Dimensions are reduced in the\norder specified. Omitting `reduction_indices` is equivalent to passing\n`[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported.", + "name": "reduction_indices", + "type": 3 + } + ], + "outputs": [ + { + "description": "Has shape equal to that of the input with reduced dimensions removed or\nset to `1` depending on `keep_dims`.", + "name": "output", + "type": 7 + } + ], + "summary": "Joins a string Tensor across the given dimensions." + } + }, + { + "name": "RefEnter", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "The name of the child frame.", + "name": "frame_name", + "type": "string" + }, + { + "default": false, + "description": "If true, the output is constant within the child frame.", + "name": "is_constant", + "type": "boolean" + }, + { + "default": 10, + "description": "The number of iterations allowed to run in parallel.", + "name": "parallel_iterations", + "type": "int64" + } + ], + "description": "The unique `frame_name` is used by the `Executor` to identify frames. If\n`is_constant` is true, `output` is a constant in the child frame; otherwise\nit may be changed in the child frame. At most `parallel_iterations` iterations\nare run in parallel in the child frame.", + "inputs": [ + { + "description": "The tensor to be made available to the child frame.", + "isRef": true, + "name": "data", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The same tensor as `data`.", + "isRef": true, + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Creates or finds a child frame, and makes `data` available to the child frame." + } + }, + { + "name": "RefExit", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Exit makes its input `data` available to the parent frame.", + "inputs": [ + { + "description": "The tensor to be made available to the parent frame.", + "isRef": true, + "name": "data", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The same tensor as `data`.", + "isRef": true, + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Exits the current frame to its parent frame." + } + }, + { + "name": "RefIdentity", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "isRef": true, + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "isRef": true, + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Return the same ref tensor as the input ref tensor." + } + }, + { + "name": "RefMerge", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "minimum": 1, + "name": "N", + "type": "int64" + } + ], + "description": "`Merge` waits for at least one of the tensors in `inputs` to become available.\nIt is usually combined with `Switch` to implement branching.\n\n`Merge` forwards the first tensor for become available to `output`, and sets\n`value_index` to its index in `inputs`.", + "inputs": [ + { + "description": "The input tensors, exactly one of which will become available.", + "isRef": true, + "name": "inputs", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Will be set to the available input tensor.", + "isRef": true, + "name": "output", + "typeAttr": "T" + }, + { + "description": "The index of the chosen input tensor in `inputs`.", + "name": "value_index", + "type": 3 + } + ], + "summary": "Forwards the value of an available tensor from `inputs` to `output`." + } + }, + { + "name": "RefNextIteration", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The tensor to be made available to the next iteration.", + "isRef": true, + "name": "data", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The same tensor as `data`.", + "isRef": true, + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Makes its input available to the next iteration." + } + }, + { + "name": "RefSelect", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "minimum": 1, + "name": "N", + "type": "int64" + } + ], + "inputs": [ + { + "description": "A scalar that determines the input that gets selected.", + "name": "index", + "type": 3 + }, + { + "description": "A list of ref tensors, one of which will be forwarded to `output`.", + "isRef": true, + "name": "inputs", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The forwarded tensor.", + "isRef": true, + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Forwards the `index`th element of `inputs` to `output`." + } + }, + { + "name": "RefSwitch", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,\nthe data goes to `output_false`.\n\nSee also `Switch` and `Merge`.", + "inputs": [ + { + "description": "The ref tensor to be forwarded to the appropriate output.", + "isRef": true, + "name": "data", + "typeAttr": "T" + }, + { + "description": "A scalar that specifies which output port will receive data.", + "name": "pred", + "type": 10 + } + ], + "outputs": [ + { + "description": "If `pred` is false, data will be forwarded to this output.", + "isRef": true, + "name": "output_false", + "typeAttr": "T" + }, + { + "description": "If `pred` is true, data will be forwarded to this output.", + "isRef": true, + "name": "output_true", + "typeAttr": "T" + } + ], + "summary": "Forwards the ref tensor `data` to the output port determined by `pred`." + } + }, + { + "name": "RegexFullMatch", + "schema": { + "description": "The input is a string tensor of any shape. The pattern is a scalar\nstring tensor which is applied to every element of the input tensor.\nThe boolean values (True or False) of the output tensor indicate\nif the input matches the regex pattern provided.\n\nThe pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)\n\nExamples:\n\n>>> tf.strings.regex_full_match([\"TF lib\", \"lib TF\"], \".*lib$\")\n\n>>> tf.strings.regex_full_match([\"TF lib\", \"lib TF\"], \".*TF$\")\n", + "inputs": [ + { + "description": "A string tensor of the text to be processed.", + "name": "input", + "type": 7 + }, + { + "description": "A scalar string tensor containing the regular expression to match the input.", + "name": "pattern", + "type": 7 + } + ], + "outputs": [ + { + "description": "A bool tensor with the same shape as `input`.", + "name": "output", + "type": 10 + } + ], + "summary": "Check if the input matches the regex pattern." + } + }, + { + "name": "RegexReplace", + "schema": { + "attributes": [ + { + "default": true, + "description": "If True, the replacement is global (that is, all matches of the `pattern` regular\nexpression in each input string are rewritten), otherwise the `rewrite`\nsubstitution is only made for the first `pattern` match.", + "name": "replace_global", + "type": "boolean" + } + ], + "description": "It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)", + "inputs": [ + { + "description": "The text to be processed.", + "name": "input", + "type": 7 + }, + { + "description": "The regular expression to be matched in the `input` strings.", + "name": "pattern", + "type": 7 + }, + { + "description": "The rewrite string to be substituted for the `pattern` expression where it is\nmatched in the `input` strings.", + "name": "rewrite", + "type": 7 + } + ], + "outputs": [ + { + "description": "The text after applying pattern match and rewrite substitution.", + "name": "output", + "type": 7 + } + ], + "summary": "Replaces matches of the `pattern` regular expression in `input` with the\nreplacement string provided in `rewrite`." + } + }, + { + "name": "RegisterDataset", + "schema": { + "attributes": [ + { + "name": "external_state_policy", + "type": "int64" + } + ], + "inputs": [ + { + "name": "dataset", + "type": 21 + }, + { + "name": "address", + "type": 7 + }, + { + "name": "protocol", + "type": 7 + } + ], + "outputs": [ + { + "name": "dataset_id", + "type": 9 + } + ], + "summary": "Registers a dataset with the tf.data service." + } + }, + { + "name": "Relu", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`, `qint8`.", + "name": "T", + "type": "type" + } + ], + "category": "Activation", + "description": "See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)\nExample usage:\n>>> tf.nn.relu([-2., 0., -0., 3.]).numpy()\narray([ 0., 0., -0., 3.], dtype=float32)", + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ], + "summary": "Computes rectified linear: `max(features, 0)`." + } + }, + { + "name": "Relu6", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "category": "Activation", + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ], + "summary": "Computes rectified linear 6: `min(max(features, 0), 6)`." + } + }, + { + "name": "Relu6Grad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The backpropagated gradients to the corresponding Relu6 operation.", + "name": "gradients", + "typeAttr": "T" + }, + { + "description": "The features passed as input to the corresponding Relu6 operation, or\nits output; using either one produces the same result.", + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The gradients:\n`gradients * (features > 0) * (features < 6)`.", + "name": "backprops", + "typeAttr": "T" + } + ], + "summary": "Computes rectified linear 6 gradients for a Relu6 operation." + } + }, + { + "name": "ReluGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The backpropagated gradients to the corresponding Relu operation.", + "name": "gradients", + "typeAttr": "T" + }, + { + "description": "The features passed as input to the corresponding Relu operation, OR\nthe outputs of that operation (both work equivalently).", + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "`gradients * (features > 0)`.", + "name": "backprops", + "typeAttr": "T" + } + ], + "summary": "Computes rectified linear gradients for a Relu operation." + } + }, + { + "name": "RemoteCall", + "schema": { + "attributes": [ + { + "description": "The type list for the arguments.", + "minimum": 1, + "name": "Tin", + "type": "type[]" + }, + { + "description": "The type list for the return values.", + "minimum": 1, + "name": "Tout", + "type": "type[]" + }, + { + "description": "The function to run remotely.", + "name": "f", + "type": "function" + } + ], + "inputs": [ + { + "description": "A fully specified device name where we want to run the function.", + "name": "target", + "type": 7 + }, + { + "description": "A list of arguments for the function.", + "name": "args", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "description": "A list of return values.", + "name": "output", + "typeListAttr": "Tout" + } + ], + "summary": "Runs function `f` on a remote device indicated by `target`." + } + }, + { + "name": "RemoteFusedGraphExecute", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "Tinputs", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Toutputs", + "type": "type[]" + }, + { + "description": "Serialized protocol buffer\nof RemoteFusedGraphExecuteInfo which contains graph specifications.", + "name": "serialized_remote_fused_graph_execute_info", + "type": "string" + } + ], + "description": "The graph specifications(such as graph itself, input tensors and output names)\nare stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo\nas serialized_remote_fused_graph_execute_info.\nThe specifications will be passed to a dedicated registered\nremote fused graph executor. The executor will send the graph specifications\nto a remote processor and execute that graph. The execution results\nwill be passed to consumer nodes as outputs of this node.", + "inputs": [ + { + "description": "Arbitrary number of tensors with arbitrary data types", + "name": "inputs", + "typeListAttr": "Tinputs" + } + ], + "outputs": [ + { + "description": "Arbitrary number of tensors with arbitrary data types", + "name": "outputs", + "typeListAttr": "Toutputs" + } + ], + "summary": "Execute a sub graph on a remote processor." + } + }, + { + "name": "RepeatDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of times that `input_dataset` should\nbe repeated. A value of `-1` indicates that it should be repeated infinitely.", + "name": "count", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that emits the outputs of `input_dataset` `count` times." + } + }, + { + "name": "RequantizationRange", + "schema": { + "attributes": [ + { + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + } + ], + "description": "Given a quantized tensor described by `(input, input_min, input_max)`, outputs a\nrange that covers the actual values present in that tensor. This op is typically\nused to produce the `requested_output_min` and `requested_output_max` for\n`Requantize`.", + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "description": "The float value that the minimum quantized input value represents.", + "name": "input_min", + "type": 1 + }, + { + "description": "The float value that the maximum quantized input value represents.", + "name": "input_max", + "type": 1 + } + ], + "outputs": [ + { + "description": "The computed min output.", + "name": "output_min", + "type": 1 + }, + { + "description": "the computed max output.", + "name": "output_max", + "type": 1 + } + ], + "summary": "Computes a range that covers the actual values present in a quantized tensor." + } + }, + { + "name": "RequantizationRangePerChannel", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 13 + }, + "description": "The quantized type of input tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T", + "type": "type" + }, + { + "description": "The maximum value of the output that needs to be clipped.\nExample: set this to 6 for Relu6.", + "name": "clip_value_max", + "type": "float32" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "The minimum value of the input tensor", + "name": "input_min", + "type": 1 + }, + { + "description": "The maximum value of the input tensor.", + "name": "input_max", + "type": 1 + } + ], + "outputs": [ + { + "description": "The minimum value of the final output tensor", + "name": "output_min", + "type": 1 + }, + { + "description": "The maximum value of the final output tensor.", + "name": "output_max", + "type": 1 + } + ], + "summary": "Computes requantization range per channel." + } + }, + { + "name": "Requantize", + "schema": { + "attributes": [ + { + "description": "The type of the input. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "Tinput", + "type": "type" + }, + { + "description": "The type of the output. Should be a lower bit depth than Tinput. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + } + ], + "description": "Converts the quantized `input` tensor into a lower-precision `output`, using the\noutput range specified with `requested_output_min` and `requested_output_max`.\n\n`[input_min, input_max]` are scalar floats that specify the range for the float\ninterpretation of the `input` data. For example, if `input_min` is -1.0f and\n`input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0\nvalue in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.", + "inputs": [ + { + "name": "input", + "typeAttr": "Tinput" + }, + { + "description": "The float value that the minimum quantized input value represents.", + "name": "input_min", + "type": 1 + }, + { + "description": "The float value that the maximum quantized input value represents.", + "name": "input_max", + "type": 1 + }, + { + "description": "The float value that the minimum quantized output value represents.", + "name": "requested_output_min", + "type": 1 + }, + { + "description": "The float value that the maximum quantized output value represents.", + "name": "requested_output_max", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + }, + { + "description": "The requested_output_min value is copied into this output.", + "name": "output_min", + "type": 1 + }, + { + "description": "The requested_output_max value is copied into this output.", + "name": "output_max", + "type": 1 + } + ], + "summary": "Converts the quantized `input` tensor into a lower-precision `output`." + } + }, + { + "name": "RequantizePerChannel", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 13 + }, + "description": "The quantized type of input tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 12 + }, + "description": "The quantized type of output tensor that needs to be converted. Must be one of the following: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.", + "name": "out_type", + "type": "type" + } + ], + "inputs": [ + { + "description": "The original input tensor.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "The minimum value of the input tensor", + "name": "input_min", + "type": 1 + }, + { + "description": "The maximum value of the input tensor.", + "name": "input_max", + "type": 1 + }, + { + "description": "The minimum value of the output tensor requested.", + "name": "requested_output_min", + "type": 1 + }, + { + "description": "The maximum value of the output tensor requested.", + "name": "requested_output_max", + "type": 1 + } + ], + "outputs": [ + { + "description": "Output tensor.", + "name": "output", + "typeAttr": "out_type" + }, + { + "description": "The minimum value of the final output tensor", + "name": "output_min", + "type": 1 + }, + { + "description": "The maximum value of the final output tensor.", + "name": "output_max", + "type": 1 + } + ], + "summary": "Requantizes input with min and max values known per channel." + } + }, + { + "name": "Reshape", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tshape", + "type": "type" + } + ], + "category": "Shape", + "description": "Given `tensor`, this operation returns a tensor that has the same values\nas `tensor` with shape `shape`.\n\nIf one component of 1-D tensor `shape` is the special value -1, the size of that\ndimension is computed so that the total size remains constant. In particular, a\n`shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be\nunknown.\n\nThe `shape` must be 1-D and the operation returns a tensor with shape\n`shape` filled with the values of `tensor`. In this case, the number of elements\nimplied by `shape` must be the same as the number of elements in `tensor`.\n\nIt is an error if `shape` is not 1-D.\n\nFor example:\n\n```\n# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]\n# tensor 't' has shape [9]\nreshape(t, [3, 3]) ==> [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n\n# tensor 't' is [[[1, 1], [2, 2]],\n# [[3, 3], [4, 4]]]\n# tensor 't' has shape [2, 2, 2]\nreshape(t, [2, 4]) ==> [[1, 1, 2, 2],\n [3, 3, 4, 4]]\n\n# tensor 't' is [[[1, 1, 1],\n# [2, 2, 2]],\n# [[3, 3, 3],\n# [4, 4, 4]],\n# [[5, 5, 5],\n# [6, 6, 6]]]\n# tensor 't' has shape [3, 2, 3]\n# pass '[-1]' to flatten 't'\nreshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]\n\n# -1 can also be used to infer the shape\n\n# -1 is inferred to be 9:\nreshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],\n [4, 4, 4, 5, 5, 5, 6, 6, 6]]\n# -1 is inferred to be 2:\nreshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],\n [4, 4, 4, 5, 5, 5, 6, 6, 6]]\n# -1 is inferred to be 3:\nreshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]],\n [[4, 4, 4],\n [5, 5, 5],\n [6, 6, 6]]]\n\n# tensor 't' is [7]\n# shape `[]` reshapes to a scalar\nreshape(t, []) ==> 7\n```", + "inputs": [ + { + "name": "tensor", + "typeAttr": "T" + }, + { + "description": "Defines the shape of the output tensor.", + "name": "shape", + "typeAttr": "Tshape" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Reshapes a tensor." + } + }, + { + "name": "ResizeArea", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `float16`, `float32`, `float64`, `bfloat16`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "name": "align_corners", + "type": "boolean" + } + ], + "description": "Input images can be of different types but output images are always float.\n\nThe range of pixel values for the output image might be slightly different\nfrom the range for the input image because of limited numerical precision.\nTo guarantee an output range, for example `[0.0, 1.0]`, apply\n`tf.clip_by_value` to the output.\n\nEach output pixel is computed by first transforming the pixel's footprint into\nthe input tensor and then averaging the pixels that intersect the footprint. An\ninput pixel's contribution to the average is weighted by the fraction of its\narea that intersects the footprint. This is the same as OpenCV's INTER_AREA.", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "images", + "typeAttr": "T" + }, + { + "description": "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "name": "resized_images", + "type": 1 + } + ], + "summary": "Resize `images` to `size` using area interpolation." + } + }, + { + "name": "ResizeBicubic", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `float16`, `float32`, `float64`, `bfloat16`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "name": "align_corners", + "type": "boolean" + }, + { + "default": false, + "name": "half_pixel_centers", + "type": "boolean" + } + ], + "description": "Input images can be of different types but output images are always float.", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "images", + "typeAttr": "T" + }, + { + "description": "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "name": "resized_images", + "type": 1 + } + ], + "summary": "Resize `images` to `size` using bicubic interpolation." + } + }, + { + "name": "ResizeBicubicGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If true, the centers of the 4 corner pixels of the input and grad tensors are\naligned. Defaults to false.", + "name": "align_corners", + "type": "boolean" + }, + { + "default": false, + "name": "half_pixel_centers", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "grads", + "type": 1 + }, + { + "description": "4-D with shape `[batch, orig_height, orig_width, channels]`,\nThe image tensor that was resized.", + "name": "original_image", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "4-D with shape `[batch, orig_height, orig_width, channels]`.\nGradients with respect to the input image. Input image must have been\nfloat or double.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient of bicubic interpolation." + } + }, + { + "name": "ResizeBilinear", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `bfloat16`, `float16`, `float32`, `float64`, `bfloat16`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "name": "align_corners", + "type": "boolean" + }, + { + "default": false, + "name": "half_pixel_centers", + "type": "boolean" + } + ], + "description": "Input images can be of different types but output images are always float.", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "images", + "typeAttr": "T" + }, + { + "description": "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "name": "resized_images", + "type": 1 + } + ], + "summary": "Resize `images` to `size` using bilinear interpolation." + } + }, + { + "name": "ResizeBilinearGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `bfloat16`, `float16`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If true, the centers of the 4 corner pixels of the input and grad tensors are\naligned. Defaults to false.", + "name": "align_corners", + "type": "boolean" + }, + { + "default": false, + "name": "half_pixel_centers", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "grads", + "type": 1 + }, + { + "description": "4-D with shape `[batch, orig_height, orig_width, channels]`,\nThe image tensor that was resized.", + "name": "original_image", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "4-D with shape `[batch, orig_height, orig_width, channels]`.\nGradients with respect to the input image. Input image must have been\nfloat or double.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient of bilinear interpolation." + } + }, + { + "name": "ResizeNearestNeighbor", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `float16`, `float32`, `float64`, `bfloat16`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If true, the centers of the 4 corner pixels of the input and output tensors are\naligned, preserving the values at the corner pixels. Defaults to false.", + "name": "align_corners", + "type": "boolean" + }, + { + "default": false, + "name": "half_pixel_centers", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "images", + "typeAttr": "T" + }, + { + "description": "= A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The\nnew size for the images.", + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "description": "4-D with shape\n`[batch, new_height, new_width, channels]`.", + "name": "resized_images", + "typeAttr": "T" + } + ], + "summary": "Resize `images` to `size` using nearest neighbor interpolation." + } + }, + { + "name": "ResizeNearestNeighborGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `uint8`, `int8`, `int32`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If true, the centers of the 4 corner pixels of the input and grad tensors are\naligned. Defaults to false.", + "name": "align_corners", + "type": "boolean" + }, + { + "default": false, + "name": "half_pixel_centers", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, channels]`.", + "name": "grads", + "typeAttr": "T" + }, + { + "description": "= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The\noriginal input size.", + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "description": "4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients\nwith respect to the input image.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient of nearest neighbor interpolation." + } + }, + { + "name": "ResourceAccumulatorApplyGradient", + "schema": { + "attributes": [ + { + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + } + ], + "description": "Does not add if local_step is lesser than the accumulator's global_step.", + "inputs": [ + { + "description": "The handle to a accumulator.", + "name": "handle", + "type": 20 + }, + { + "description": "The local_step value at which the gradient was computed.", + "name": "local_step", + "type": 9 + }, + { + "description": "A tensor of the gradient to be accumulated.", + "name": "gradient", + "typeAttr": "dtype" + } + ], + "summary": "Applies a gradient to a given accumulator." + } + }, + { + "name": "ResourceAccumulatorNumAccumulated", + "schema": { + "inputs": [ + { + "description": "The handle to an accumulator.", + "name": "handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "The number of gradients aggregated in the given accumulator.", + "name": "num_accumulated", + "type": 3 + } + ], + "summary": "Returns the number of gradients aggregated in the given accumulators." + } + }, + { + "name": "ResourceAccumulatorSetGlobalStep", + "schema": { + "description": "Logs warning if the accumulator's value is already higher than\nnew_global_step.", + "inputs": [ + { + "description": "The handle to an accumulator.", + "name": "handle", + "type": 20 + }, + { + "description": "The new global_step value to set.", + "name": "new_global_step", + "type": 9 + } + ], + "summary": "Updates the accumulator with a new value for global_step." + } + }, + { + "name": "ResourceAccumulatorTakeGradient", + "schema": { + "attributes": [ + { + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + } + ], + "description": "The op blocks until sufficient (i.e., more than num_required)\ngradients have been accumulated. If the accumulator has already\naggregated more than num_required gradients, it returns the average of\nthe accumulated gradients. Also automatically increments the recorded\nglobal_step in the accumulator by 1, and resets the aggregate to 0.", + "inputs": [ + { + "description": "The handle to an accumulator.", + "name": "handle", + "type": 20 + }, + { + "description": "Number of gradients required before we return an aggregate.", + "name": "num_required", + "type": 3 + } + ], + "outputs": [ + { + "description": "The average of the accumulated gradients.", + "name": "average", + "typeAttr": "dtype" + } + ], + "summary": "Extracts the average gradient in the given ConditionalAccumulator." + } + }, + { + "name": "ResourceApplyAdaMax", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nv_t <- max(beta2 * v_{t-1}, abs(g))\nvariable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "m", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "v", + "type": 20 + }, + { + "description": "Must be a scalar.", + "name": "beta1_power", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Momentum factor. Must be a scalar.", + "name": "beta1", + "typeAttr": "T" + }, + { + "description": "Momentum factor. Must be a scalar.", + "name": "beta2", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the AdaMax algorithm." + } + }, + { + "name": "ResourceApplyAdadelta", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var, accum and update_accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "accum = rho() * accum + (1 - rho()) * grad.square();\nupdate = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;\nupdate_accum = rho() * update_accum + (1 - rho()) * update.square();\nvar -= update;", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum_update", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay factor. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "description": "Constant factor. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the adadelta scheme." + } + }, + { + "name": "ResourceApplyAdagrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": true, + "name": "update_slots", + "type": "boolean" + } + ], + "description": "accum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the adagrad scheme." + } + }, + { + "name": "ResourceApplyAdagradDA", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "gradient_accumulator", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "gradient_squared_accumulator", + "type": 20 + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "Training step number. Must be a scalar.", + "name": "global_step", + "type": 9 + } + ], + "summary": "Update '*var' according to the proximal adagrad scheme." + } + }, + { + "name": "ResourceApplyAdagradV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": true, + "name": "update_slots", + "type": "boolean" + } + ], + "description": "accum += grad * grad\nvar -= lr * grad * (1 / (sqrt(accum) + epsilon))", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Constant factor. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the adagrad scheme." + } + }, + { + "name": "ResourceApplyAdam", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "description": "If `True`, uses the nesterov update.", + "name": "use_nesterov", + "type": "boolean" + } + ], + "description": "$$\\text{lr}_t := \\mathrm{learning_rate} * \\sqrt{1 - \\beta_2^t} / (1 - \\beta_1^t)$$\n$$m_t := \\beta_1 * m_{t-1} + (1 - \\beta_1) * g$$\n$$v_t := \\beta_2 * v_{t-1} + (1 - \\beta_2) * g * g$$\n$$\\text{variable} := \\text{variable} - \\text{lr}_t * m_t / (\\sqrt{v_t} + \\epsilon)$$", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "m", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "v", + "type": 20 + }, + { + "description": "Must be a scalar.", + "name": "beta1_power", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "beta2_power", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Momentum factor. Must be a scalar.", + "name": "beta1", + "typeAttr": "T" + }, + { + "description": "Momentum factor. Must be a scalar.", + "name": "beta2", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the Adam algorithm." + } + }, + { + "name": "ResourceApplyAdamWithAmsgrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, m, and v tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "$$\\text{lr}_t := \\mathrm{learning_rate} * \\sqrt{1 - \\beta_2^t} / (1 - \\beta_1^t)$$\n$$m_t := \\beta_1 * m_{t-1} + (1 - \\beta_1) * g$$\n$$v_t := \\beta_2 * v_{t-1} + (1 - \\beta_2) * g * g$$\n$$\\hat{v}_t := max{\\hat{v}_{t-1}, v_t}$$\n$$\\text{variable} := \\text{variable} - \\text{lr}_t * m_t / (\\sqrt{\\hat{v}_t} + \\epsilon)$$", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "m", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "v", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "vhat", + "type": 20 + }, + { + "description": "Must be a scalar.", + "name": "beta1_power", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "beta2_power", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Momentum factor. Must be a scalar.", + "name": "beta1", + "typeAttr": "T" + }, + { + "description": "Momentum factor. Must be a scalar.", + "name": "beta2", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the Adam algorithm." + } + }, + { + "name": "ResourceApplyAddSign", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and m tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nupdate <- (alpha + sign_decay * sign(g) *sign(m)) * g\nvariable <- variable - lr_t * update", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "m", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "alpha", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "sign_decay", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "beta", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the AddSign update." + } + }, + { + "name": "ResourceApplyCenteredRMSProp", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\n\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nmg <- rho * mg_{t-1} + (1-rho) * grad\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)\nvar <- var - mom", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "mg", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "ms", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "mom", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay rate. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the centered RMSProp algorithm." + } + }, + { + "name": "ResourceApplyFtrl", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "name": "multiply_linear_by_lr", + "type": "boolean" + } + ], + "description": "accum_new = accum + grad * grad\nlinear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "linear", + "type": 20 + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr_power", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the Ftrl-proximal scheme." + } + }, + { + "name": "ResourceApplyFtrlV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "name": "multiply_linear_by_lr", + "type": "boolean" + } + ], + "description": "grad_with_shrinkage = grad + 2 * l2_shrinkage * var\naccum_new = accum + grad_with_shrinkage * grad_with_shrinkage\nlinear += grad_with_shrinkage +\n (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "linear", + "type": 20 + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 shrinkage regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "name": "l2_shrinkage", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr_power", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the Ftrl-proximal scheme." + } + }, + { + "name": "ResourceApplyGradientDescent", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "alpha", + "typeAttr": "T" + }, + { + "description": "The change.", + "name": "delta", + "typeAttr": "T" + } + ], + "summary": "Update '*var' by subtracting 'alpha' * 'delta' from it." + } + }, + { + "name": "ResourceApplyKerasMomentum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "description": "If `True`, the tensor passed to compute grad will be\nvar + momentum * accum, so in the end, the var you get is actually\nvar + momentum * accum.", + "name": "use_nesterov", + "type": "boolean" + } + ], + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\naccum = accum * momentum - lr * grad\nvar += accum", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "Momentum. Must be a scalar.", + "name": "momentum", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the momentum scheme." + } + }, + { + "name": "ResourceApplyMomentum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "description": "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum.", + "name": "use_nesterov", + "type": "boolean" + } + ], + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\naccum = accum * momentum + grad\nvar -= lr * accum", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "Momentum. Must be a scalar.", + "name": "momentum", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the momentum scheme." + } + }, + { + "name": "ResourceApplyPowerSign", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and m tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "m_t <- beta1 * m_{t-1} + (1 - beta1) * g\nupdate <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g\nvariable <- variable - lr_t * update", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "m", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "logbase", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "sign_decay", + "typeAttr": "T" + }, + { + "description": "Must be a scalar.", + "name": "beta", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the AddSign update." + } + }, + { + "name": "ResourceApplyProximalAdagrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "accum += grad * grad\nprox_v = var - lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "summary": "Update '*var' and '*accum' according to FOBOS with Adagrad learning rate." + } + }, + { + "name": "ResourceApplyProximalGradientDescent", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "prox_v = var - alpha * delta\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "alpha", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "The change.", + "name": "delta", + "typeAttr": "T" + } + ], + "summary": "Update '*var' as FOBOS algorithm with fixed learning rate." + } + }, + { + "name": "ResourceApplyRMSProp", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "ms", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "mom", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay rate. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the RMSProp algorithm." + } + }, + { + "name": "ResourceConditionalAccumulator", + "schema": { + "attributes": [ + { + "description": "The type of the value being accumulated. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "The shape of the values, can be [], in which case shape is unknown.", + "name": "shape", + "type": "shape" + }, + { + "default": "", + "description": "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this accumulator will be shared under the\ngiven name across multiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "default": "MEAN", + "description": "Must be one of the following: `MEAN`, `SUM`.", + "name": "reduction_type", + "type": "string" + } + ], + "description": "The accumulator accepts gradients marked with local_step greater or\nequal to the most recent global_step known to the accumulator. The\naverage can be extracted from the accumulator, provided sufficient\ngradients have been accumulated. Extracting the average automatically\nresets the aggregate to 0, and increments the global_step recorded by\nthe accumulator.\nThis is a resource version of ConditionalAccumulator that will work in TF2.0\nwith tf.cond version 2.", + "outputs": [ + { + "description": "The handle to the accumulator.", + "name": "handle", + "type": 20 + } + ], + "summary": "A conditional accumulator for aggregating gradients." + } + }, + { + "name": "ResourceCountUpTo", + "schema": { + "attributes": [ + { + "description": "If incrementing ref would bring it above limit, instead generates an\n'OutOfRange' error.", + "name": "limit", + "type": "int64" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "Should be from a scalar `Variable` node.", + "name": "resource", + "type": 20 + } + ], + "outputs": [ + { + "description": "A copy of the input before increment. If nothing else modifies the\ninput, the values produced will all be distinct.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Increments variable pointed to by 'resource' until it reaches 'limit'." + } + }, + { + "name": "ResourceGather", + "schema": { + "attributes": [ + { + "default": 0, + "name": "batch_dims", + "type": "int64" + }, + { + "default": true, + "name": "validate_indices", + "type": "boolean" + }, + { + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).\nProduces an output tensor with shape `indices.shape + params.shape[1:]` where:\n\n```python\n # Scalar indices\n output[:, ..., :] = params[indices, :, ... :]\n\n # Vector indices\n output[i, :, ..., :] = params[indices[i], :, ... :]\n\n # Higher rank indices\n output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]\n```", + "inputs": [ + { + "name": "resource", + "type": 20 + }, + { + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Gather slices from the variable pointed to by `resource` according to `indices`." + } + }, + { + "name": "ResourceGatherNd", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "inputs": [ + { + "name": "resource", + "type": 20 + }, + { + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ] + } + }, + { + "name": "ResourceScatterAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] += updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] += updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions add.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
    \n\n
    ", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "name": "resource", + "type": 20 + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to add to `ref`.", + "name": "updates", + "typeAttr": "dtype" + } + ], + "summary": "Adds sparse updates to the variable referenced by `resource`." + } + }, + { + "name": "ResourceScatterDiv", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] /= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] /= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions multiply.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
    \n\n
    ", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "name": "resource", + "type": 20 + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to add to `ref`.", + "name": "updates", + "typeAttr": "dtype" + } + ], + "summary": "Divides sparse updates into the variable referenced by `resource`." + } + }, + { + "name": "ResourceScatterMax", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] = max(ref[indices, ...], updates[...])\n\n # Vector indices (for each i)\n ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions are combined.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
    \n\n
    ", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "name": "resource", + "type": 20 + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to add to `ref`.", + "name": "updates", + "typeAttr": "dtype" + } + ], + "summary": "Reduces sparse updates into the variable referenced by `resource` using the `max` operation." + } + }, + { + "name": "ResourceScatterMin", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] = min(ref[indices, ...], updates[...])\n\n # Vector indices (for each i)\n ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions are combined.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
    \n\n
    ", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "name": "resource", + "type": 20 + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to add to `ref`.", + "name": "updates", + "typeAttr": "dtype" + } + ], + "summary": "Reduces sparse updates into the variable referenced by `resource` using the `min` operation." + } + }, + { + "name": "ResourceScatterMul", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] *= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] *= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions multiply.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
    \n\n
    ", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "name": "resource", + "type": 20 + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to add to `ref`.", + "name": "updates", + "typeAttr": "dtype" + } + ], + "summary": "Multiplies sparse updates into the variable referenced by `resource`." + } + }, + { + "name": "ResourceScatterNdAdd", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": true, + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]\n```\n\nFor example, say we want to add 4 scattered elements to a rank-1 tensor to\n8 elements. In Python, that addition would look like this:\n\n```python\nref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)\nindices = tf.constant([[4], [3], [1], [7]])\nupdates = tf.constant([9, 10, 11, 12])\nadd = tf.scatter_nd_add(ref, indices, updates)\nwith tf.Session() as sess:\n print sess.run(add)\n```\n\nThe resulting update to ref would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.", + "inputs": [ + { + "description": "A resource handle. Must be from a VarHandleOp.", + "name": "ref", + "type": 20 + }, + { + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A Tensor. Must have the same type as ref. A tensor of\nvalues to add to ref.", + "name": "updates", + "typeAttr": "T" + } + ], + "summary": "Applies sparse addition to individual values or slices in a Variable." + } + }, + { + "name": "ResourceScatterNdSub", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": true, + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]\n```\n\nFor example, say we want to subtract 4 scattered elements from a rank-1 tensor\nwith 8 elements. In Python, that subtraction would look like this:\n\n```python\nref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)\nindices = tf.constant([[4], [3], [1], [7]])\nupdates = tf.constant([9, 10, 11, 12])\nsub = tf.scatter_nd_sub(ref, indices, updates)\nwith tf.Session() as sess:\n print sess.run(sub)\n```\n\nThe resulting update to ref would look like this:\n\n [1, -9, 3, -6, -4, 6, 7, -4]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.", + "inputs": [ + { + "description": "A resource handle. Must be from a VarHandleOp.", + "name": "ref", + "type": 20 + }, + { + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A Tensor. Must have the same type as ref. A tensor of\nvalues to add to ref.", + "name": "updates", + "typeAttr": "T" + } + ], + "summary": "Applies sparse subtraction to individual values or slices in a Variable." + } + }, + { + "name": "ResourceScatterNdUpdate", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": true, + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "variable according to `indices`.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nFor example, say we want to update 4 scattered elements to a rank-1 tensor to\n8 elements. In Python, that update would look like this:\n\n```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n update = tf.scatter_nd_update(ref, indices, updates)\n with tf.Session() as sess:\n print sess.run(update)\n```\n\nThe resulting update to ref would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.", + "inputs": [ + { + "description": "A resource handle. Must be from a VarHandleOp.", + "name": "ref", + "type": 20 + }, + { + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A Tensor. Must have the same type as ref. A tensor of updated\nvalues to add to ref.", + "name": "updates", + "typeAttr": "T" + } + ], + "summary": "Applies sparse `updates` to individual values or slices within a given" + } + }, + { + "name": "ResourceScatterSub", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] -= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] -= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions add.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
    \n\n
    ", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "name": "resource", + "type": 20 + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to add to `ref`.", + "name": "updates", + "typeAttr": "dtype" + } + ], + "summary": "Subtracts sparse updates from the variable referenced by `resource`." + } + }, + { + "name": "ResourceScatterUpdate", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] = updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] = updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "name": "resource", + "type": 20 + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to add to `ref`.", + "name": "updates", + "typeAttr": "dtype" + } + ], + "summary": "Assigns sparse updates to the variable referenced by `resource`." + } + }, + { + "name": "ResourceSparseApplyAdadelta", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": ": Should be from a Variable().", + "name": "accum_update", + "type": 20 + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay factor. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "description": "Constant factor. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "summary": "var: Should be from a Variable()." + } + }, + { + "name": "ResourceSparseApplyAdagrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": true, + "name": "update_slots", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "summary": "Update relevant entries in '*var' and '*accum' according to the adagrad scheme." + } + }, + { + "name": "ResourceSparseApplyAdagradDA", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "gradient_accumulator", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "gradient_squared_accumulator", + "type": 20 + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "Training step number. Must be a scalar.", + "name": "global_step", + "type": 9 + } + ], + "summary": "Update entries in '*var' and '*accum' according to the proximal adagrad scheme." + } + }, + { + "name": "ResourceSparseApplyAdagradV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": true, + "name": "update_slots", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nvar -= lr * grad * (1 / sqrt(accum))", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Constant factor. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "summary": "Update relevant entries in '*var' and '*accum' according to the adagrad scheme." + } + }, + { + "name": "ResourceSparseApplyCenteredRMSProp", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "mg", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "ms", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "mom", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay rate. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var, ms and mom.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "summary": "Update '*var' according to the centered RMSProp algorithm." + } + }, + { + "name": "ResourceSparseApplyFtrl", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "name": "multiply_linear_by_lr", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var, accum and linear as follows:\naccum_new = accum + grad * grad\nlinear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "linear", + "type": 20 + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr_power", + "typeAttr": "T" + } + ], + "summary": "Update relevant entries in '*var' according to the Ftrl-proximal scheme." + } + }, + { + "name": "ResourceSparseApplyFtrlV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "name": "multiply_linear_by_lr", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var, accum and linear as follows:\ngrad_with_shrinkage = grad + 2 * l2_shrinkage * var\naccum_new = accum + grad_with_shrinkage * grad_with_shrinkage\nlinear += grad_with_shrinkage +\n (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "linear", + "type": 20 + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 shrinkage regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "name": "l2_shrinkage", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr_power", + "typeAttr": "T" + } + ], + "summary": "Update relevant entries in '*var' according to the Ftrl-proximal scheme." + } + }, + { + "name": "ResourceSparseApplyKerasMomentum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "description": "If `True`, the tensor passed to compute grad will be\nvar + momentum * accum, so in the end, the var you get is actually\nvar + momentum * accum.", + "name": "use_nesterov", + "type": "boolean" + } + ], + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\nThat is for rows we have grad for, we update var and accum as follows:\n\naccum = accum * momentum - lr * grad\nvar += accum", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Momentum. Must be a scalar.", + "name": "momentum", + "typeAttr": "T" + } + ], + "summary": "Update relevant entries in '*var' and '*accum' according to the momentum scheme." + } + }, + { + "name": "ResourceSparseApplyMomentum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "description": "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum.", + "name": "use_nesterov", + "type": "boolean" + } + ], + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\nThat is for rows we have grad for, we update var and accum as follows:\n\naccum = accum * momentum + grad\nvar -= lr * accum", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Momentum. Must be a scalar.", + "name": "momentum", + "typeAttr": "T" + } + ], + "summary": "Update relevant entries in '*var' and '*accum' according to the momentum scheme." + } + }, + { + "name": "ResourceSparseApplyProximalAdagrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var and accum as follows:\naccum += grad * grad\nprox_v = var\nprox_v -= lr * grad * (1 / sqrt(accum))\nvar = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "accum", + "type": 20 + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "summary": "Sparse update entries in '*var' and '*accum' according to FOBOS algorithm." + } + }, + { + "name": "ResourceSparseApplyProximalGradientDescent", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var as follows:\nprox_v = var - alpha * grad\nvar = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "alpha", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "summary": "Sparse update '*var' as FOBOS algorithm with fixed learning rate." + } + }, + { + "name": "ResourceSparseApplyRMSProp", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\nms <- rho * ms_{t-1} + (1-rho) * grad * grad\nmom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)\nvar <- var - mom", + "inputs": [ + { + "description": "Should be from a Variable().", + "name": "var", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "ms", + "type": 20 + }, + { + "description": "Should be from a Variable().", + "name": "mom", + "type": 20 + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay rate. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var, ms and mom.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "summary": "Update '*var' according to the RMSProp algorithm." + } + }, + { + "name": "ResourceStridedSliceAssign", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Index", + "type": "type" + }, + { + "default": 0, + "name": "begin_mask", + "type": "int64" + }, + { + "default": 0, + "name": "end_mask", + "type": "int64" + }, + { + "default": 0, + "name": "ellipsis_mask", + "type": "int64" + }, + { + "default": 0, + "name": "new_axis_mask", + "type": "int64" + }, + { + "default": 0, + "name": "shrink_axis_mask", + "type": "int64" + } + ], + "description": "The values of `value` are assigned to the positions in the variable\n`ref` that are selected by the slice parameters. The slice parameters\n`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.\n\nNOTE this op currently does not support broadcasting and so `value`'s\nshape must be exactly the shape produced by the slice of `ref`.", + "inputs": [ + { + "name": "ref", + "type": 20 + }, + { + "name": "begin", + "typeAttr": "Index" + }, + { + "name": "end", + "typeAttr": "Index" + }, + { + "name": "strides", + "typeAttr": "Index" + }, + { + "name": "value", + "typeAttr": "T" + } + ], + "summary": "Assign `value` to the sliced l-value reference of `ref`." + } + }, + { + "name": "Restore", + "schema": { + "attributes": [ + { + "description": "The type of the tensor to be restored.", + "name": "dt", + "type": "type" + }, + { + "default": -1, + "description": "Index of file to open first if multiple files match\n`file_pattern`.", + "name": "preferred_shard", + "type": "int64" + } + ], + "description": "Reads a tensor stored in one or several files. If there are several files (for\ninstance because a tensor was saved as slices), `file_pattern` may contain\nwildcard symbols (`*` and `?`) in the filename portion only, not in the\ndirectory portion.\n\nIf a `file_pattern` matches several files, `preferred_shard` can be used to hint\nin which file the requested tensor is likely to be found. This op will first\nopen the file at index `preferred_shard` in the list of matching files and try\nto restore tensors from that file. Only if some tensors or tensor slices are\nnot found in that first file, then the Op opens all the files. Setting\n`preferred_shard` to match the value passed as the `shard` input\nof a matching `Save` Op may speed up Restore. This attribute only affects\nperformance, not correctness. The default value -1 means files are processed in\norder.\n\nSee also `RestoreSlice`.", + "inputs": [ + { + "description": "Must have a single element. The pattern of the files from\nwhich we read the tensor.", + "name": "file_pattern", + "type": 7 + }, + { + "description": "Must have a single element. The name of the tensor to be\nrestored.", + "name": "tensor_name", + "type": 7 + } + ], + "outputs": [ + { + "description": "The restored tensor.", + "name": "tensor", + "typeAttr": "dt" + } + ], + "summary": "Restores a tensor from checkpoint files." + } + }, + { + "name": "RestoreSlice", + "schema": { + "attributes": [ + { + "description": "The type of the tensor to be restored.", + "name": "dt", + "type": "type" + }, + { + "default": -1, + "description": "Index of file to open first if multiple files match\n`file_pattern`. See the documentation for `Restore`.", + "name": "preferred_shard", + "type": "int64" + } + ], + "description": "This is like `Restore` except that restored tensor can be listed as filling\nonly a slice of a larger tensor. `shape_and_slice` specifies the shape of the\nlarger tensor and the slice that the restored tensor covers.\n\nThe `shape_and_slice` input has the same format as the\nelements of the `shapes_and_slices` input of the `SaveSlices` op.", + "inputs": [ + { + "description": "Must have a single element. The pattern of the files from\nwhich we read the tensor.", + "name": "file_pattern", + "type": 7 + }, + { + "description": "Must have a single element. The name of the tensor to be\nrestored.", + "name": "tensor_name", + "type": 7 + }, + { + "description": "Scalar. The shapes and slice specifications to use when\nrestoring a tensors.", + "name": "shape_and_slice", + "type": 7 + } + ], + "outputs": [ + { + "description": "The restored tensor.", + "name": "tensor", + "typeAttr": "dt" + } + ], + "summary": "Restores a tensor from checkpoint files." + } + }, + { + "name": "RestoreV2", + "schema": { + "attributes": [ + { + "description": "shape {N}. The list of expected dtype for the tensors. Must match\nthose stored in the checkpoint.", + "minimum": 1, + "name": "dtypes", + "type": "type[]" + } + ], + "description": "For backward compatibility with the V1 format, this Op currently allows\nrestoring from a V1 checkpoint as well:\n - This Op first attempts to find the V2 index file pointed to by \"prefix\", and\n if found proceed to read it as a V2 checkpoint;\n - Otherwise the V1 read path is invoked.\nRelying on this behavior is not recommended, as the ability to fall back to read\nV1 might be deprecated and eventually removed.\n\nBy default, restores the named tensors in full. If the caller wishes to restore\nspecific slices of stored tensors, \"shape_and_slices\" should be non-empty\nstrings and correspondingly well-formed.\n\nCallers must ensure all the named tensors are indeed stored in the checkpoint.", + "inputs": [ + { + "description": "Must have a single element. The prefix of a V2 checkpoint.", + "name": "prefix", + "type": 7 + }, + { + "description": "shape {N}. The names of the tensors to be restored.", + "name": "tensor_names", + "type": 7 + }, + { + "description": "shape {N}. The slice specs of the tensors to be restored.\nEmpty strings indicate that they are non-partitioned tensors.", + "name": "shape_and_slices", + "type": 7 + } + ], + "outputs": [ + { + "description": "shape {N}. The restored tensors, whose shapes are read from the\ncheckpoint directly.", + "name": "tensors", + "typeListAttr": "dtypes" + } + ], + "summary": "Restores tensors from a V2 checkpoint." + } + }, + { + "name": "RetrieveTPUEmbeddingADAMParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the ADAM optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter momenta updated by the ADAM optimization algorithm.", + "name": "momenta", + "type": 1 + }, + { + "description": "Parameter velocities updated by the ADAM optimization algorithm.", + "name": "velocities", + "type": 1 + } + ], + "summary": "Retrieve ADAM embedding parameters." + } + }, + { + "name": "RetrieveTPUEmbeddingADAMParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the ADAM optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter momenta updated by the ADAM optimization algorithm.", + "name": "momenta", + "type": 1 + }, + { + "description": "Parameter velocities updated by the ADAM optimization algorithm.", + "name": "velocities", + "type": 1 + }, + { + "description": "Parameter gradient_accumulators updated by the ADAM optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Retrieve ADAM embedding parameters with debug support." + } + }, + { + "name": "RetrieveTPUEmbeddingAdadeltaParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the Adadelta optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter accumulators updated by the Adadelta optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Parameter updates updated by the Adadelta optimization algorithm.", + "name": "updates", + "type": 1 + } + ], + "summary": "Retrieve Adadelta embedding parameters." + } + }, + { + "name": "RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the Adadelta optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter accumulators updated by the Adadelta optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Parameter updates updated by the Adadelta optimization algorithm.", + "name": "updates", + "type": 1 + }, + { + "description": "Parameter gradient_accumulators updated by the Adadelta optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Retrieve Adadelta embedding parameters with debug support." + } + }, + { + "name": "RetrieveTPUEmbeddingAdagradParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the Adagrad optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter accumulators updated by the Adagrad optimization algorithm.", + "name": "accumulators", + "type": 1 + } + ], + "summary": "Retrieve Adagrad embedding parameters." + } + }, + { + "name": "RetrieveTPUEmbeddingAdagradParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the Adagrad optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter accumulators updated by the Adagrad optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Parameter gradient_accumulators updated by the Adagrad optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Retrieve Adagrad embedding parameters with debug support." + } + }, + { + "name": "RetrieveTPUEmbeddingCenteredRMSPropParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the centered RMSProp optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter ms updated by the centered RMSProp optimization algorithm.", + "name": "ms", + "type": 1 + }, + { + "description": "Parameter mom updated by the centered RMSProp optimization algorithm.", + "name": "mom", + "type": 1 + }, + { + "description": "Parameter mg updated by the centered RMSProp optimization algorithm.", + "name": "mg", + "type": 1 + } + ], + "summary": "Retrieve centered RMSProp embedding parameters." + } + }, + { + "name": "RetrieveTPUEmbeddingFTRLParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the FTRL optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter accumulators updated by the FTRL optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Parameter linears updated by the FTRL optimization algorithm.", + "name": "linears", + "type": 1 + } + ], + "summary": "Retrieve FTRL embedding parameters." + } + }, + { + "name": "RetrieveTPUEmbeddingFTRLParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the FTRL optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter accumulators updated by the FTRL optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Parameter linears updated by the FTRL optimization algorithm.", + "name": "linears", + "type": 1 + }, + { + "description": "Parameter gradient_accumulators updated by the FTRL optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Retrieve FTRL embedding parameters with debug support." + } + }, + { + "name": "RetrieveTPUEmbeddingMDLAdagradLightParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the MDL Adagrad Light optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Parameter weights updated by the MDL Adagrad Light optimization algorithm.", + "name": "weights", + "type": 1 + }, + { + "description": "Parameter benefits updated by the MDL Adagrad Light optimization algorithm.", + "name": "benefits", + "type": 1 + } + ], + "summary": "Retrieve MDL Adagrad Light embedding parameters." + } + }, + { + "name": "RetrieveTPUEmbeddingMomentumParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the Momentum optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter momenta updated by the Momentum optimization algorithm.", + "name": "momenta", + "type": 1 + } + ], + "summary": "Retrieve Momentum embedding parameters." + } + }, + { + "name": "RetrieveTPUEmbeddingMomentumParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the Momentum optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter momenta updated by the Momentum optimization algorithm.", + "name": "momenta", + "type": 1 + }, + { + "description": "Parameter gradient_accumulators updated by the Momentum optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Retrieve Momentum embedding parameters with debug support." + } + }, + { + "name": "RetrieveTPUEmbeddingProximalAdagradParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the proximal Adagrad optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter accumulators updated by the proximal Adagrad optimization algorithm.", + "name": "accumulators", + "type": 1 + } + ], + "summary": "Retrieve proximal Adagrad embedding parameters." + } + }, + { + "name": "RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the proximal Adagrad optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter accumulators updated by the proximal Adagrad optimization algorithm.", + "name": "accumulators", + "type": 1 + }, + { + "description": "Parameter gradient_accumulators updated by the proximal Adagrad optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Retrieve proximal Adagrad embedding parameters with debug support." + } + }, + { + "name": "RetrieveTPUEmbeddingProximalYogiParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "outputs": [ + { + "name": "parameters", + "type": 1 + }, + { + "name": "v", + "type": 1 + }, + { + "name": "m", + "type": 1 + } + ] + } + }, + { + "name": "RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "outputs": [ + { + "name": "parameters", + "type": 1 + }, + { + "name": "v", + "type": 1 + }, + { + "name": "m", + "type": 1 + }, + { + "name": "gradient_accumulators", + "type": 1 + } + ] + } + }, + { + "name": "RetrieveTPUEmbeddingRMSPropParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the RMSProp optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter ms updated by the RMSProp optimization algorithm.", + "name": "ms", + "type": 1 + }, + { + "description": "Parameter mom updated by the RMSProp optimization algorithm.", + "name": "mom", + "type": 1 + } + ], + "summary": "Retrieve RMSProp embedding parameters." + } + }, + { + "name": "RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the RMSProp optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter ms updated by the RMSProp optimization algorithm.", + "name": "ms", + "type": 1 + }, + { + "description": "Parameter mom updated by the RMSProp optimization algorithm.", + "name": "mom", + "type": 1 + }, + { + "description": "Parameter gradient_accumulators updated by the RMSProp optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Retrieve RMSProp embedding parameters with debug support." + } + }, + { + "name": "RetrieveTPUEmbeddingStochasticGradientDescentParameters", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the stochastic gradient descent optimization algorithm.", + "name": "parameters", + "type": 1 + } + ], + "summary": "Retrieve SGD embedding parameters." + } + }, + { + "name": "RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug", + "schema": { + "attributes": [ + { + "default": -1, + "name": "table_id", + "type": "int64" + }, + { + "default": "", + "name": "table_name", + "type": "string" + }, + { + "name": "num_shards", + "type": "int64" + }, + { + "name": "shard_id", + "type": "int64" + }, + { + "default": "", + "name": "config", + "type": "string" + } + ], + "description": "An op that retrieves optimization parameters from embedding to host\nmemory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up\nthe correct embedding table configuration. For example, this op is\nused to retrieve updated parameters before saving a checkpoint.", + "outputs": [ + { + "description": "Parameter parameters updated by the stochastic gradient descent optimization algorithm.", + "name": "parameters", + "type": 1 + }, + { + "description": "Parameter gradient_accumulators updated by the Adadelta optimization algorithm.", + "name": "gradient_accumulators", + "type": 1 + } + ], + "summary": "Retrieve SGD embedding parameters with debug support." + } + }, + { + "name": "Reverse", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `bool`, `float16`, `float32`, `float64`, `complex64`, `complex128`, `string`.", + "name": "T", + "type": "type" + } + ], + "description": "Given a `tensor`, and a `bool` tensor `dims` representing the dimensions\nof `tensor`, this operation reverses each dimension i of `tensor` where\n`dims[i]` is `True`.\n\n`tensor` can have up to 8 dimensions. The number of dimensions\nof `tensor` must equal the number of elements in `dims`. In other words:\n\n`rank(tensor) = size(dims)`\n\nFor example:\n\n```\n# tensor 't' is [[[[ 0, 1, 2, 3],\n# [ 4, 5, 6, 7],\n# [ 8, 9, 10, 11]],\n# [[12, 13, 14, 15],\n# [16, 17, 18, 19],\n# [20, 21, 22, 23]]]]\n# tensor 't' shape is [1, 2, 3, 4]\n\n# 'dims' is [False, False, False, True]\nreverse(t, dims) ==> [[[[ 3, 2, 1, 0],\n [ 7, 6, 5, 4],\n [ 11, 10, 9, 8]],\n [[15, 14, 13, 12],\n [19, 18, 17, 16],\n [23, 22, 21, 20]]]]\n\n# 'dims' is [False, True, False, False]\nreverse(t, dims) ==> [[[[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]\n [[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]]]]\n\n# 'dims' is [False, False, True, False]\nreverse(t, dims) ==> [[[[8, 9, 10, 11],\n [4, 5, 6, 7],\n [0, 1, 2, 3]]\n [[20, 21, 22, 23],\n [16, 17, 18, 19],\n [12, 13, 14, 15]]]]\n```", + "inputs": [ + { + "description": "Up to 8-D.", + "name": "tensor", + "typeAttr": "T" + }, + { + "description": "1-D. The dimensions to reverse.", + "name": "dims", + "type": 10 + } + ], + "outputs": [ + { + "description": "The same shape as `tensor`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Reverses specific dimensions of a tensor." + } + }, + { + "name": "ReverseSequence", + "schema": { + "attributes": [ + { + "description": "The dimension which is partially reversed.", + "name": "seq_dim", + "type": "int64" + }, + { + "default": 0, + "description": "The dimension along which reversal is performed.", + "name": "batch_dim", + "type": "int64" + }, + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tlen", + "type": "type" + } + ], + "description": "This op first slices `input` along the dimension `batch_dim`, and for each\nslice `i`, reverses the first `seq_lengths[i]` elements along\nthe dimension `seq_dim`.\n\nThe elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,\nand `seq_lengths` must be a vector of length `input.dims[batch_dim]`.\n\nThe output slice `i` along dimension `batch_dim` is then given by input\nslice `i`, with the first `seq_lengths[i]` slices along dimension\n`seq_dim` reversed.\n\nFor example:\n\n```\n# Given this:\nbatch_dim = 0\nseq_dim = 1\ninput.dims = (4, 8, ...)\nseq_lengths = [7, 2, 3, 5]\n\n# then slices of input are reversed on seq_dim, but only up to seq_lengths:\noutput[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]\noutput[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]\noutput[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]\noutput[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]\n\n# while entries past seq_lens are copied through:\noutput[0, 7:, :, ...] = input[0, 7:, :, ...]\noutput[1, 2:, :, ...] = input[1, 2:, :, ...]\noutput[2, 3:, :, ...] = input[2, 3:, :, ...]\noutput[3, 2:, :, ...] = input[3, 2:, :, ...]\n```\n\nIn contrast, if:\n\n```\n# Given this:\nbatch_dim = 2\nseq_dim = 0\ninput.dims = (8, ?, 4, ...)\nseq_lengths = [7, 2, 3, 5]\n\n# then slices of input are reversed on seq_dim, but only up to seq_lengths:\noutput[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]\noutput[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]\noutput[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]\noutput[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]\n\n# while entries past seq_lens are copied through:\noutput[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]\noutput[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]\noutput[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]\noutput[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]\n```", + "inputs": [ + { + "description": "The input to reverse.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "1-D with length `input.dims(batch_dim)` and\n`max(seq_lengths) <= input.dims(seq_dim)`", + "name": "seq_lengths", + "typeAttr": "Tlen" + } + ], + "outputs": [ + { + "description": "The partially reversed input. It has the same shape as `input`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Reverses variable length slices." + } + }, + { + "name": "ReverseV2", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "description": "Must be one of the following: `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `bool`, `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`, `string`.", + "name": "T", + "type": "type" + } + ], + "description": "NOTE `tf.reverse` has now changed behavior in preparation for 1.0.\n`tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.\n\nGiven a `tensor`, and a `int32` tensor `axis` representing the set of\ndimensions of `tensor` to reverse. This operation reverses each dimension\n`i` for which there exists `j` s.t. `axis[j] == i`.\n\n`tensor` can have up to 8 dimensions. The number of dimensions specified\nin `axis` may be 0 or more entries. If an index is specified more than\nonce, a InvalidArgument error is raised.\n\nFor example:\n\n```\n# tensor 't' is [[[[ 0, 1, 2, 3],\n# [ 4, 5, 6, 7],\n# [ 8, 9, 10, 11]],\n# [[12, 13, 14, 15],\n# [16, 17, 18, 19],\n# [20, 21, 22, 23]]]]\n# tensor 't' shape is [1, 2, 3, 4]\n\n# 'dims' is [3] or 'dims' is [-1]\nreverse(t, dims) ==> [[[[ 3, 2, 1, 0],\n [ 7, 6, 5, 4],\n [ 11, 10, 9, 8]],\n [[15, 14, 13, 12],\n [19, 18, 17, 16],\n [23, 22, 21, 20]]]]\n\n# 'dims' is '[1]' (or 'dims' is '[-3]')\nreverse(t, dims) ==> [[[[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]\n [[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]]]]\n\n# 'dims' is '[2]' (or 'dims' is '[-2]')\nreverse(t, dims) ==> [[[[8, 9, 10, 11],\n [4, 5, 6, 7],\n [0, 1, 2, 3]]\n [[20, 21, 22, 23],\n [16, 17, 18, 19],\n [12, 13, 14, 15]]]]\n```", + "inputs": [ + { + "description": "Up to 8-D.", + "name": "tensor", + "typeAttr": "T" + }, + { + "description": "1-D. The indices of the dimensions to reverse. Must be in the range\n`[-rank(tensor), rank(tensor))`.", + "name": "axis", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "The same shape as `tensor`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Reverses specific dimensions of a tensor." + } + }, + { + "name": "RightShift", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "Performs a logical shift for unsigned integer types, and an arithmetic shift\nfor signed integer types.\n\nIf `y` is negative, or greater than or equal to than the width of `x` in bits\nthe result is implementation defined.\n\nExample:\n\n```python\nimport tensorflow as tf\nfrom tensorflow.python.ops import bitwise_ops\nimport numpy as np\ndtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]\n\nfor dtype in dtype_list:\n lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)\n rhs = tf.constant([5, 0, 7, 11], dtype=dtype)\n\n right_shift_result = bitwise_ops.right_shift(lhs, rhs)\n\n print(right_shift_result)\n\n# This will print:\n# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8)\n# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16)\n# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32)\n# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64)\n\nlhs = np.array([-2, 64, 101, 32], dtype=np.int8)\nrhs = np.array([-1, -5, -3, -14], dtype=np.int8)\nbitwise_ops.right_shift(lhs, rhs)\n# \n```\n", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Elementwise computes the bitwise right-shift of `x` and `y`." + } + }, + { + "name": "Rint", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "If the result is midway between two representable values,\nthe even representable is chosen.\nFor example:\n\n```\nrint(-1.5) ==> -2.0\nrint(0.5000001) ==> 1.0\nrint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]\n```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Returns element-wise integer closest to x." + } + }, + { + "name": "RngSkip", + "schema": { + "description": "The state of the RNG after\n`rng_skip(n)` will be the same as that after `stateful_uniform([n])`\n(or any other distribution). The actual increment added to the\ncounter is an unspecified implementation detail.", + "inputs": [ + { + "description": "The handle of the resource variable that stores the state of the RNG.", + "name": "resource", + "type": 20 + }, + { + "description": "The RNG algorithm.", + "name": "algorithm", + "type": 9 + }, + { + "description": "The amount of advancement.", + "name": "delta", + "type": 9 + } + ], + "summary": "Advance the counter of a counter-based RNG." + } + }, + { + "name": "Roll", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tshift", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Taxis", + "type": "type" + } + ], + "description": "The elements are shifted positively (towards larger indices) by the offset of\n`shift` along the dimension of `axis`. Negative `shift` values will shift\nelements in the opposite direction. Elements that roll passed the last position\nwill wrap around to the first and vice versa. Multiple shifts along multiple\naxes may be specified.\n\nFor example:\n\n```\n# 't' is [0, 1, 2, 3, 4]\nroll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]\n\n# shifting along multiple dimensions\n# 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\nroll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]\n\n# shifting along the same axis multiple times\n# 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\nroll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]\n```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "description": "Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which\nelements are shifted positively (towards larger indices) along the dimension\nspecified by `axis[i]`. Negative shifts will roll the elements in the opposite\ndirection.", + "name": "shift", + "typeAttr": "Tshift" + }, + { + "description": "Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift\n`shift[i]` should occur. If the same axis is referenced more than once, the\ntotal shift for that axis will be the sum of all the shifts that belong to that\naxis.", + "name": "axis", + "typeAttr": "Taxis" + } + ], + "outputs": [ + { + "description": "Has the same shape and size as the input. The elements are shifted\npositively (towards larger indices) by the offsets of `shift` along the\ndimensions of `axis`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Rolls the elements of a tensor along an axis." + } + }, + { + "name": "Round", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Rounds half to even. Also known as bankers rounding. If you want to round\naccording to the current system rounding mode use std::cint.", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Rounds the values of a tensor to the nearest integer, element-wise." + } + }, + { + "name": "Rpc", + "schema": { + "attributes": [ + { + "default": "", + "description": "RPC protocol to use. Empty string means use the default protocol.\nOptions include 'grpc'.", + "name": "protocol", + "type": "string" + }, + { + "default": true, + "description": "`boolean`. If `true` (default), then failures to connect\n(i.e., the server does not immediately respond) cause an RPC failure.", + "name": "fail_fast", + "type": "boolean" + }, + { + "default": 0, + "description": "`int`. If `0` (default), then the kernel will run the RPC\nrequest and only time out if the RPC deadline passes or the session times out.\nIf this value is greater than `0`, then the op will raise an exception if\nthe RPC takes longer than `timeout_in_ms`.", + "name": "timeout_in_ms", + "type": "int64" + } + ], + "description": "This op asynchronously performs either a single RPC request, or a batch\nof requests. RPC requests are defined by three main parameters:\n\n - `address` (the host+port or BNS address of the request)\n - `method` (the RPC method name for the request)\n - `request` (the serialized proto string, or vector of strings,\n of the RPC request argument).\n\nFor example, if you have an RPC service running on port localhost:2345,\nand its interface is configured with the following proto declaration:\n\n```\nservice MyService {\n rpc MyMethod(MyRequestProto) returns (MyResponseProto) {\n }\n};\n```\n\nthen call this op with arguments:\n\n```\naddress = \"localhost:2345\"\nmethod = \"MyService/MyMethod\"\n```\n\nThe `request` tensor is a string tensor representing serialized `MyRequestProto`\nstrings; and the output string tensor `response` will have the same shape\nand contain (upon successful completion) corresponding serialized\n`MyResponseProto` strings.\n\nFor example, to send a single, empty, `MyRequestProto`, call\nthis op with `request = \"\"`. To send 5 **parallel** empty requests,\ncall this op with `request = [\"\", \"\", \"\", \"\", \"\"]`.\n\nMore generally, one can create a batch of `MyRequestProto` serialized protos\nfrom regular batched tensors using the `encode_proto` op, and convert\nthe response `MyResponseProto` serialized protos to batched tensors\nusing the `decode_proto` op.\n\n**NOTE** Working with serialized proto strings is faster than instantiating\nactual proto objects in memory, so no performance degradation is expected\ncompared to writing custom kernels for this workflow.\n\nIf the connection fails or the remote worker returns an error\nstatus, the op reraises this exception locally.\n\nSee the `TryRpc` op if you prefer to handle RPC failures manually in the graph.", + "inputs": [ + { + "description": "`0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server.\nIf this tensor has more than 1 element, then multiple parallel rpc requests\nare sent. This argument broadcasts with `method` and `request`.", + "name": "address", + "type": 7 + }, + { + "description": "`0-D` or `1-D`. The method address on the RPC server.\nIf this tensor has more than 1 element, then multiple parallel rpc requests\nare sent. This argument broadcasts with `address` and `request`.", + "name": "method", + "type": 7 + }, + { + "description": "`0-D` or `1-D`. Serialized proto strings: the rpc request argument.\nIf this tensor has more than 1 element, then multiple parallel rpc requests\nare sent. This argument broadcasts with `address` and `method`.", + "name": "request", + "type": 7 + } + ], + "outputs": [ + { + "description": "Same shape as `request`. Serialized proto strings: the rpc responses.", + "name": "response", + "type": 7 + } + ], + "summary": "Perform batches of RPC requests." + } + }, + { + "name": "Rsqrt", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "I.e., \\\\(y = 1 / \\sqrt{x}\\\\).", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes reciprocal of square root of x element-wise." + } + }, + { + "name": "RsqrtGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`\nis the corresponding input gradient.", + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient for the rsqrt of `x` wrt its input." + } + }, + { + "name": "SampleDistortedBoundingBox", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `uint8`, `int8`, `int16`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": 0, + "description": "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`. Otherwise, it is seeded by a random\nseed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "default": 0.10000000149011612, + "description": "The cropped area of the image must contain at least this\nfraction of any bounding box supplied. The value of this parameter should be\nnon-negative. In the case of 0, the cropped area does not need to overlap\nany of the bounding boxes supplied.", + "name": "min_object_covered", + "type": "float32" + }, + { + "default": [ + 0.75, + 1.3300000429153442 + ], + "description": "The cropped area of the image must have an aspect ratio =\nwidth / height within this range.", + "name": "aspect_ratio_range", + "type": "float32[]" + }, + { + "default": [ + 0.05000000074505806, + 1.0 + ], + "description": "The cropped area of the image must contain a fraction of the\nsupplied image within this range.", + "name": "area_range", + "type": "float32[]" + }, + { + "default": 100, + "description": "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage.", + "name": "max_attempts", + "type": "int64" + }, + { + "default": false, + "description": "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error.", + "name": "use_image_if_no_bounding_boxes", + "type": "boolean" + } + ], + "description": "Bounding box annotations are often supplied in addition to ground-truth labels\nin image recognition or object localization tasks. A common technique for\ntraining such a system is to randomly distort an image while preserving\nits content, i.e. *data augmentation*. This Op outputs a randomly distorted\nlocalization of an object, i.e. bounding box, given an `image_size`,\n`bounding_boxes` and a series of constraints.\n\nThe output of this Op is a single bounding box that may be used to crop the\noriginal image. The output is returned as 3 tensors: `begin`, `size` and\n`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the\nimage. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize\nwhat the bounding box looks like.\n\nBounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nheight of the underlying image.\n\nFor example,\n\n```python\n # Generate a single distorted bounding box.\n begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bounding_boxes)\n\n # Draw the bounding box in an image summary.\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n bbox_for_draw)\n tf.summary.image('images_with_box', image_with_box)\n\n # Employ the bounding box to distort the image.\n distorted_image = tf.slice(image, begin, size)\n```\n\nNote that if no bounding box information is available, setting\n`use_image_if_no_bounding_boxes = true` will assume there is a single implicit\nbounding box covering the whole image. If `use_image_if_no_bounding_boxes` is\nfalse and no bounding boxes are supplied, an error is raised.", + "inputs": [ + { + "description": "1-D, containing `[height, width, channels]`.", + "name": "image_size", + "typeAttr": "T" + }, + { + "description": "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image.", + "name": "bounding_boxes", + "type": 1 + } + ], + "outputs": [ + { + "description": "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`.", + "name": "begin", + "typeAttr": "T" + }, + { + "description": "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`.", + "name": "size", + "typeAttr": "T" + }, + { + "description": "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`.", + "name": "bboxes", + "type": 1 + } + ], + "summary": "Generate a single randomly distorted bounding box for an image." + } + }, + { + "name": "SampleDistortedBoundingBoxV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `uint8`, `int8`, `int16`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": 0, + "description": "If either `seed` or `seed2` are set to non-zero, the random number\ngenerator is seeded by the given `seed`. Otherwise, it is seeded by a random\nseed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "default": [ + 0.75, + 1.3300000429153442 + ], + "description": "The cropped area of the image must have an aspect ratio =\nwidth / height within this range.", + "name": "aspect_ratio_range", + "type": "float32[]" + }, + { + "default": [ + 0.05000000074505806, + 1.0 + ], + "description": "The cropped area of the image must contain a fraction of the\nsupplied image within this range.", + "name": "area_range", + "type": "float32[]" + }, + { + "default": 100, + "description": "Number of attempts at generating a cropped region of the image\nof the specified constraints. After `max_attempts` failures, return the entire\nimage.", + "name": "max_attempts", + "type": "int64" + }, + { + "default": false, + "description": "Controls behavior if no bounding boxes supplied.\nIf true, assume an implicit bounding box covering the whole input. If false,\nraise an error.", + "name": "use_image_if_no_bounding_boxes", + "type": "boolean" + } + ], + "description": "Bounding box annotations are often supplied in addition to ground-truth labels\nin image recognition or object localization tasks. A common technique for\ntraining such a system is to randomly distort an image while preserving\nits content, i.e. *data augmentation*. This Op outputs a randomly distorted\nlocalization of an object, i.e. bounding box, given an `image_size`,\n`bounding_boxes` and a series of constraints.\n\nThe output of this Op is a single bounding box that may be used to crop the\noriginal image. The output is returned as 3 tensors: `begin`, `size` and\n`bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the\nimage. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize\nwhat the bounding box looks like.\n\nBounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The\nbounding box coordinates are floats in `[0.0, 1.0]` relative to the width and\nheight of the underlying image.\n\nFor example,\n\n```python\n # Generate a single distorted bounding box.\n begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=bounding_boxes)\n\n # Draw the bounding box in an image summary.\n image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),\n bbox_for_draw)\n tf.summary.image('images_with_box', image_with_box)\n\n # Employ the bounding box to distort the image.\n distorted_image = tf.slice(image, begin, size)\n```\n\nNote that if no bounding box information is available, setting\n`use_image_if_no_bounding_boxes = true` will assume there is a single implicit\nbounding box covering the whole image. If `use_image_if_no_bounding_boxes` is\nfalse and no bounding boxes are supplied, an error is raised.", + "inputs": [ + { + "description": "1-D, containing `[height, width, channels]`.", + "name": "image_size", + "typeAttr": "T" + }, + { + "description": "3-D with shape `[batch, N, 4]` describing the N bounding boxes\nassociated with the image.", + "name": "bounding_boxes", + "type": 1 + }, + { + "description": "The cropped area of the image must contain at least this\nfraction of any bounding box supplied. The value of this parameter should be\nnon-negative. In the case of 0, the cropped area does not need to overlap\nany of the bounding boxes supplied.", + "name": "min_object_covered", + "type": 1 + } + ], + "outputs": [ + { + "description": "1-D, containing `[offset_height, offset_width, 0]`. Provide as input to\n`tf.slice`.", + "name": "begin", + "typeAttr": "T" + }, + { + "description": "1-D, containing `[target_height, target_width, -1]`. Provide as input to\n`tf.slice`.", + "name": "size", + "typeAttr": "T" + }, + { + "description": "3-D with shape `[1, 1, 4]` containing the distorted bounding box.\nProvide as input to `tf.image.draw_bounding_boxes`.", + "name": "bboxes", + "type": 1 + } + ], + "summary": "Generate a single randomly distorted bounding box for an image." + } + }, + { + "name": "SamplingDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "There is no transformation in the `tf.data` Python API for creating this dataset.\nInstead, it is created as a result of the `filter_with_random_uniform_fusion`\nstatic optimization. Whether this optimization is performed is determined by the\n`experimental_optimization.filter_with_random_uniform_fusion` option of\n`tf.data.Options`.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the sample rate. Each element of `input_dataset` is\nretained with this probability, independent of all other elements.", + "name": "rate", + "type": 1 + }, + { + "description": "A scalar representing seed of random number generator.", + "name": "seed", + "type": 9 + }, + { + "description": "A scalar representing seed2 of random number generator.", + "name": "seed2", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that takes a Bernoulli sample of the contents of another dataset." + } + }, + { + "name": "Save", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "T", + "type": "type[]" + } + ], + "description": "The size of `tensor_names` must match the number of tensors in `data`. `data[i]`\nis written to `filename` with name `tensor_names[i]`.\n\nSee also `SaveSlices`.", + "inputs": [ + { + "description": "Must have a single element. The name of the file to which we write\nthe tensor.", + "name": "filename", + "type": 7 + }, + { + "description": "Shape `[N]`. The names of the tensors to be saved.", + "name": "tensor_names", + "type": 7 + }, + { + "description": "`N` tensors to save.", + "name": "data", + "typeListAttr": "T" + } + ], + "summary": "Saves the input tensors to disk." + } + }, + { + "name": "SaveSlices", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "T", + "type": "type[]" + } + ], + "description": "This is like `Save` except that tensors can be listed in the saved file as being\na slice of a larger tensor. `shapes_and_slices` specifies the shape of the\nlarger tensor and the slice that this tensor covers. `shapes_and_slices` must\nhave as many elements as `tensor_names`.\n\nElements of the `shapes_and_slices` input must either be:\n\n* The empty string, in which case the corresponding tensor is\n saved normally.\n* A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the\n `dimI` are the dimensions of the larger tensor and `slice-spec`\n specifies what part is covered by the tensor to save.\n\n`slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`\nwhere each `sliceI` is either:\n\n* The string `-` meaning that the slice covers all indices of this dimension\n* `start,length` where `start` and `length` are integers. In that\n case the slice covers `length` indices starting at `start`.\n\nSee also `Save`.", + "inputs": [ + { + "description": "Must have a single element. The name of the file to which we write the\ntensor.", + "name": "filename", + "type": 7 + }, + { + "description": "Shape `[N]`. The names of the tensors to be saved.", + "name": "tensor_names", + "type": 7 + }, + { + "description": "Shape `[N]`. The shapes and slice specifications to use when\nsaving the tensors.", + "name": "shapes_and_slices", + "type": 7 + }, + { + "description": "`N` tensors to save.", + "name": "data", + "typeListAttr": "T" + } + ], + "summary": "Saves input tensors slices to disk." + } + }, + { + "name": "SaveV2", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "dtypes", + "type": "type[]" + } + ], + "description": "By default, saves the named tensors in full. If the caller wishes to save\nspecific slices of full tensors, \"shape_and_slices\" should be non-empty strings\nand correspondingly well-formed.", + "inputs": [ + { + "description": "Must have a single element. The prefix of the V2 checkpoint to which we\nwrite the tensors.", + "name": "prefix", + "type": 7 + }, + { + "description": "shape {N}. The names of the tensors to be saved.", + "name": "tensor_names", + "type": 7 + }, + { + "description": "shape {N}. The slice specs of the tensors to be saved.\nEmpty strings indicate that they are non-partitioned tensors.", + "name": "shape_and_slices", + "type": 7 + }, + { + "description": "`N` tensors to save.", + "name": "tensors", + "typeListAttr": "dtypes" + } + ], + "summary": "Saves tensors in V2 checkpoint format." + } + }, + { + "name": "ScalarSummary", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "The input `tags` and `values` must have the same shape. The generated summary\nhas a summary value for each tag-value pair in `tags` and `values`.", + "inputs": [ + { + "description": "Tags for the summary.", + "name": "tags", + "type": 7 + }, + { + "description": "Same shape as `tags. Values for the summary.", + "name": "values", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Scalar. Serialized `Summary` protocol buffer.", + "name": "summary", + "type": 7 + } + ], + "summary": "Outputs a `Summary` protocol buffer with scalar values." + } + }, + { + "name": "ScaleAndTranslate", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": "lanczos3", + "name": "kernel_type", + "type": "string" + }, + { + "default": true, + "name": "antialias", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "images", + "typeAttr": "T" + }, + { + "name": "size", + "type": 3 + }, + { + "name": "scale", + "type": 1 + }, + { + "name": "translation", + "type": 1 + } + ], + "outputs": [ + { + "name": "resized_images", + "type": 1 + } + ] + } + }, + { + "name": "ScaleAndTranslateGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`.", + "name": "T", + "type": "type" + }, + { + "default": "lanczos3", + "name": "kernel_type", + "type": "string" + }, + { + "default": true, + "name": "antialias", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "grads", + "typeAttr": "T" + }, + { + "name": "original_image", + "typeAttr": "T" + }, + { + "name": "scale", + "type": 1 + }, + { + "name": "translation", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "ScanDataset", + "schema": { + "attributes": [ + { + "name": "f", + "type": "function" + }, + { + "minimum": 1, + "name": "Tstate", + "type": "type[]" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": false, + "name": "preserve_cardinality", + "type": "boolean" + }, + { + "default": true, + "name": "use_default_device", + "type": "boolean" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "initial_state", + "typeListAttr": "Tstate" + }, + { + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset successively reduces `f` over the elements of `input_dataset`." + } + }, + { + "name": "ScatterAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, the addition will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] += updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] += updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions add.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
    \n\n
    ", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to add to `ref`.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Adds sparse updates to a variable reference." + } + }, + { + "name": "ScatterDiv", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "This operation computes\n\n```python\n # Scalar indices\n ref[indices, ...] /= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] /= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions divide.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of values that `ref` is divided by.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Divides a variable reference by sparse updates." + } + }, + { + "name": "ScatterMax", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, the update will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] = max(ref[indices, ...], updates[...])\n\n # Vector indices (for each i)\n ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions combine.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
    \n\n
    ", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to reduce into `ref`.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Reduces sparse updates into a variable reference using the `max` operation." + } + }, + { + "name": "ScatterMin", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, the update will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "This operation computes\n\n # Scalar indices\n ref[indices, ...] = min(ref[indices, ...], updates[...])\n\n # Vector indices (for each i)\n ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions combine.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
    \n\n
    ", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to reduce into `ref`.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Reduces sparse updates into a variable reference using the `min` operation." + } + }, + { + "name": "ScatterMul", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, the operation will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "This operation computes\n\n```python\n # Scalar indices\n ref[indices, ...] *= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] *= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their contributions multiply.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to multiply to `ref`.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Multiplies sparse updates into a variable reference." + } + }, + { + "name": "ScatterNd", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "Creates a new tensor by applying sparse `updates` to individual values or\nslices within a tensor (initially zero for numeric, empty for string) of\nthe given `shape` according to indices. This operator is the inverse of the\n`tf.gather_nd` operator which extracts values or slices from a given tensor.\n\nThis operation is similar to tensor_scatter_add, except that the tensor is\nzero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical\nto `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)`\n\nIf `indices` contains duplicates, then their updates are accumulated (summed).\n\n**WARNING**: The order in which updates are applied is nondeterministic, so the\noutput will be nondeterministic if `indices` contains duplicates -- because\nof some numerical approximation issues, numbers summed in different order\nmay yield different results.\n\n`indices` is an integer tensor containing indices into a new tensor of shape\n`shape`. The last dimension of `indices` can be at most the rank of `shape`:\n\n indices.shape[-1] <= shape.rank\n\nThe last dimension of `indices` corresponds to indices into elements\n(if `indices.shape[-1] = shape.rank`) or slices\n(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of\n`shape`. `updates` is a tensor with shape\n\n indices.shape[:-1] + shape[indices.shape[-1]:]\n\nThe simplest form of scatter is to insert individual elements in a tensor by\nindex. For example, say we want to insert 4 scattered elements in a rank-1\ntensor with 8 elements.\n\n
    \n\n
    \n\nIn Python, this scatter operation would look like this:\n\n```python\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n shape = tf.constant([8])\n scatter = tf.scatter_nd(indices, updates, shape)\n print(scatter)\n```\n\nThe resulting tensor would look like this:\n\n [0, 11, 0, 10, 9, 0, 0, 12]\n\nWe can also, insert entire slices of a higher rank tensor all at once. For\nexample, if we wanted to insert two slices in the first dimension of a\nrank-3 tensor with two matrices of new values.\n\n
    \n\n
    \n\nIn Python, this scatter operation would look like this:\n\n```python\n indices = tf.constant([[0], [2]])\n updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]],\n [[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]]])\n shape = tf.constant([4, 4, 4])\n scatter = tf.scatter_nd(indices, updates, shape)\n print(scatter)\n```\n\nThe resulting tensor would look like this:\n\n [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],\n [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],\n [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]\n\nNote that on CPU, if an out of bound index is found, an error is returned.\nOn GPU, if an out of bound index is found, the index is ignored.", + "inputs": [ + { + "description": "Index tensor.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Updates to scatter into output.", + "name": "updates", + "typeAttr": "T" + }, + { + "description": "1-D. The shape of the resulting tensor.", + "name": "shape", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "A new tensor with the given shape and updates applied according\nto the indices.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Scatter `updates` into a new tensor according to `indices`." + } + }, + { + "name": "ScatterNdAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]\n```\n\nFor example, say we want to add 4 scattered elements to a rank-1 tensor to\n8 elements. In Python, that addition would look like this:\n\n```python\nref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\nindices = tf.constant([[4], [3], [1], [7]])\nupdates = tf.constant([9, 10, 11, 12])\nadd = tf.scatter_nd_add(ref, indices, updates)\nwith tf.Session() as sess:\n print sess.run(add)\n```\n\nThe resulting update to ref would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.", + "inputs": [ + { + "description": "A mutable Tensor. Should be from a Variable node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to ref.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Applies sparse addition to individual values or slices in a Variable." + } + }, + { + "name": "ScatterNdNonAliasingAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`, `bool`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "from `updates` according to indices `indices`. The updates are non-aliasing:\n`input` is only modified in-place if no other operations will use it.\nOtherwise, a copy of `input` is made. This operation has a gradient with\nrespect to both `input` and `updates`.\n\n`input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `input`.\nIt must be shape \\\\([d_0, ..., d_{Q-2}, K]\\\\) where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or `(P-K)`-dimensional slices\n(if `K < P`) along the `K`th dimension of `input`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n$$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$\n\nFor example, say we want to add 4 scattered elements to a rank-1 tensor to 8\nelements. In Python, that addition would look like this:\n\n input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n output = tf.scatter_nd_non_aliasing_add(input, indices, updates)\n with tf.Session() as sess:\n print(sess.run(output))\n\nThe resulting value `output` would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\nSee `tf.scatter_nd` for more details about how to make updates to slices.", + "inputs": [ + { + "description": "A Tensor.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "A Tensor. Must be one of the following types: `int32`, `int64`.\nA tensor of indices into `input`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A Tensor. Must have the same type as ref. A tensor of updated values\nto add to `input`.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A `Tensor` with the same shape as `input`, containing values of `input`\nupdated with `updates`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Applies sparse addition to `input` using individual values or slices" + } + }, + { + "name": "ScatterNdSub", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "within a given variable according to `indices`.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]\n```\n\nFor example, say we want to subtract 4 scattered elements from a rank-1 tensor\nwith 8 elements. In Python, that subtraction would look like this:\n\n```python\nref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\nindices = tf.constant([[4], [3], [1], [7]])\nupdates = tf.constant([9, 10, 11, 12])\nsub = tf.scatter_nd_sub(ref, indices, updates)\nwith tf.Session() as sess:\n print sess.run(sub)\n```\n\nThe resulting update to ref would look like this:\n\n [1, -9, 3, -6, -4, 6, 7, -4]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.", + "inputs": [ + { + "description": "A mutable Tensor. Should be from a Variable node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A Tensor. Must have the same type as ref. A tensor of updated values\nto subtract from ref.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as ref. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Applies sparse subtraction to individual values or slices in a Variable." + } + }, + { + "name": "ScatterNdUpdate", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": true, + "description": "An optional bool. Defaults to True. If True, the assignment will\nbe protected by a lock; otherwise the behavior is undefined,\nbut may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "variable according to `indices`.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape \\\\([d_0, ..., d_{Q-2}, K]\\\\) where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n$$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$\n\nFor example, say we want to update 4 scattered elements to a rank-1 tensor to\n8 elements. In Python, that update would look like this:\n\n```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n update = tf.scatter_nd_update(ref, indices, updates)\n with tf.Session() as sess:\n print sess.run(update)\n```\n\nThe resulting update to ref would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.\n\nSee also `tf.scatter_update` and `tf.batch_scatter_update`.", + "inputs": [ + { + "description": "A mutable Tensor. Should be from a Variable node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "A Tensor. Must be one of the following types: int32, int64.\nA tensor of indices into ref.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A Tensor. Must have the same type as ref. A tensor of updated\nvalues to add to ref.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as ref. Returned as a convenience for operations that want to\nuse the updated values after the update is done.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Applies sparse `updates` to individual values or slices within a given" + } + }, + { + "name": "ScatterSub", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "```python\n # Scalar indices\n ref[indices, ...] -= updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] -= updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nDuplicate entries are handled correctly: if multiple `indices` reference\nthe same location, their (negated) contributions add.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
    \n\n
    ", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to subtract from `ref`.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Subtracts sparse updates to a variable reference." + } + }, + { + "name": "ScatterUpdate", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": true, + "description": "If True, the assignment will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "This operation computes\n\n```python\n # Scalar indices\n ref[indices, ...] = updates[...]\n\n # Vector indices (for each i)\n ref[indices[i], ...] = updates[i, ...]\n\n # High rank indices (for each i, ..., j)\n ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]\n```\n\nThis operation outputs `ref` after the update is done.\nThis makes it easier to chain operations that need to use the reset value.\n\nIf values in `ref` is to be updated more than once, because there are\nduplicate entries in `indices`, the order at which the updates happen\nfor each value is undefined.\n\nRequires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.\n\n
    \n\n
    \n\nSee also `tf.batch_scatter_update` and `tf.scatter_nd_update`.", + "inputs": [ + { + "description": "Should be from a `Variable` node.", + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "description": "A tensor of indices into the first dimension of `ref`.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "A tensor of updated values to store in `ref`.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "= Same as `ref`. Returned as a convenience for operations that want\nto use the updated values after the update is done.", + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Applies sparse updates to a variable reference." + } + }, + { + "name": "SdcaFprint", + "schema": { + "inputs": [ + { + "description": "vector of strings to compute fingerprints on.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "a (N,2) shaped matrix where N is the number of elements in the input\nvector. Each row contains the low and high parts of the fingerprint.", + "name": "output", + "type": 9 + } + ], + "summary": "Computes fingerprints of the input strings." + } + }, + { + "name": "SdcaOptimizer", + "schema": { + "attributes": [ + { + "description": "Type of the primal loss. Currently SdcaSolver supports logistic,\nsquared and hinge losses. Must be one of the following: `logistic_loss`, `squared_loss`, `hinge_loss`, `smooth_hinge_loss`, `poisson_loss`.", + "name": "loss_type", + "type": "string" + }, + { + "default": false, + "description": "Whether to use Adaptive SDCA for the inner loop.", + "name": "adaptative", + "type": "boolean" + }, + { + "description": "Number of sparse feature groups to train on.", + "minimum": 0, + "name": "num_sparse_features", + "type": "int64" + }, + { + "description": "Number of sparse feature groups with values\nassociated with it, otherwise implicitly treats values as 1.0.", + "minimum": 0, + "name": "num_sparse_features_with_values", + "type": "int64" + }, + { + "description": "Number of dense feature groups to train on.", + "minimum": 0, + "name": "num_dense_features", + "type": "int64" + }, + { + "description": "Symmetric l1 regularization strength.", + "name": "l1", + "type": "float32" + }, + { + "description": "Symmetric l2 regularization strength.", + "name": "l2", + "type": "float32" + }, + { + "description": "Number of partitions of the global loss function.", + "minimum": 1, + "name": "num_loss_partitions", + "type": "int64" + }, + { + "description": "Number of iterations per mini-batch.", + "minimum": 1, + "name": "num_inner_iterations", + "type": "int64" + } + ], + "description": "linear models with L1 + L2 regularization. As global optimization objective is\nstrongly-convex, the optimizer optimizes the dual objective at each step. The\noptimizer applies each update one example at a time. Examples are sampled\nuniformly, and the optimizer is learning rate free and enjoys linear convergence\nrate.\n\n[Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).
    \nShai Shalev-Shwartz, Tong Zhang. 2012\n\n$$Loss Objective = \\sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$\n\n[Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).
    \nChenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,\nPeter Richtarik, Martin Takac. 2015\n\n[Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).
    \nDominik Csiba, Zheng Qu, Peter Richtarik. 2015", + "inputs": [ + { + "description": "a list of vectors which contain example indices.", + "name": "sparse_example_indices", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "description": "a list of vectors which contain feature indices.", + "name": "sparse_feature_indices", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "description": "a list of vectors which contains feature value\nassociated with each feature group.", + "name": "sparse_feature_values", + "numberAttr": "num_sparse_features_with_values", + "type": 1 + }, + { + "description": "a list of matrices which contains the dense feature values.", + "name": "dense_features", + "numberAttr": "num_dense_features", + "type": 1 + }, + { + "description": "a vector which contains the weight associated with each\nexample.", + "name": "example_weights", + "type": 1 + }, + { + "description": "a vector which contains the label/target associated with each\nexample.", + "name": "example_labels", + "type": 1 + }, + { + "description": "a list of vectors where each value is the indices which has\ncorresponding weights in sparse_weights. This field maybe omitted for the\ndense approach.", + "name": "sparse_indices", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "description": "a list of vectors where each value is the weight associated with\na sparse feature group.", + "name": "sparse_weights", + "numberAttr": "num_sparse_features", + "type": 1 + }, + { + "description": "a list of vectors where the values are the weights associated\nwith a dense feature group.", + "name": "dense_weights", + "numberAttr": "num_dense_features", + "type": 1 + }, + { + "description": "a list of vectors containing the example state data.", + "name": "example_state_data", + "type": 1 + } + ], + "outputs": [ + { + "description": "a list of vectors containing the updated example state\ndata.", + "name": "out_example_state_data", + "type": 1 + }, + { + "description": "a list of vectors where each value is the delta\nweights associated with a sparse feature group.", + "name": "out_delta_sparse_weights", + "numberAttr": "num_sparse_features", + "type": 1 + }, + { + "description": "a list of vectors where the values are the delta\nweights associated with a dense feature group.", + "name": "out_delta_dense_weights", + "numberAttr": "num_dense_features", + "type": 1 + } + ], + "summary": "Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for" + } + }, + { + "name": "SdcaOptimizerV2", + "schema": { + "attributes": [ + { + "description": "Type of the primal loss. Currently SdcaSolver supports logistic,\nsquared and hinge losses. Must be one of the following: `logistic_loss`, `squared_loss`, `hinge_loss`, `smooth_hinge_loss`, `poisson_loss`.", + "name": "loss_type", + "type": "string" + }, + { + "default": false, + "description": "Whether to use Adaptive SDCA for the inner loop.", + "name": "adaptive", + "type": "boolean" + }, + { + "description": "Number of sparse feature groups to train on.", + "minimum": 0, + "name": "num_sparse_features", + "type": "int64" + }, + { + "description": "Number of sparse feature groups with values\nassociated with it, otherwise implicitly treats values as 1.0.", + "minimum": 0, + "name": "num_sparse_features_with_values", + "type": "int64" + }, + { + "description": "Number of dense feature groups to train on.", + "minimum": 0, + "name": "num_dense_features", + "type": "int64" + }, + { + "description": "Symmetric l1 regularization strength.", + "name": "l1", + "type": "float32" + }, + { + "description": "Symmetric l2 regularization strength.", + "name": "l2", + "type": "float32" + }, + { + "description": "Number of partitions of the global loss function.", + "minimum": 1, + "name": "num_loss_partitions", + "type": "int64" + }, + { + "description": "Number of iterations per mini-batch.", + "minimum": 1, + "name": "num_inner_iterations", + "type": "int64" + } + ], + "description": "linear models with L1 + L2 regularization. As global optimization objective is\nstrongly-convex, the optimizer optimizes the dual objective at each step. The\noptimizer applies each update one example at a time. Examples are sampled\nuniformly, and the optimizer is learning rate free and enjoys linear convergence\nrate.\n\n[Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).
    \nShai Shalev-Shwartz, Tong Zhang. 2012\n\n$$Loss Objective = \\sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$\n\n[Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).
    \nChenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,\nPeter Richtarik, Martin Takac. 2015\n\n[Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).
    \nDominik Csiba, Zheng Qu, Peter Richtarik. 2015", + "inputs": [ + { + "description": "a list of vectors which contain example indices.", + "name": "sparse_example_indices", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "description": "a list of vectors which contain feature indices.", + "name": "sparse_feature_indices", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "description": "a list of vectors which contains feature value\nassociated with each feature group.", + "name": "sparse_feature_values", + "numberAttr": "num_sparse_features_with_values", + "type": 1 + }, + { + "description": "a list of matrices which contains the dense feature values.", + "name": "dense_features", + "numberAttr": "num_dense_features", + "type": 1 + }, + { + "description": "a vector which contains the weight associated with each\nexample.", + "name": "example_weights", + "type": 1 + }, + { + "description": "a vector which contains the label/target associated with each\nexample.", + "name": "example_labels", + "type": 1 + }, + { + "description": "a list of vectors where each value is the indices which has\ncorresponding weights in sparse_weights. This field maybe omitted for the\ndense approach.", + "name": "sparse_indices", + "numberAttr": "num_sparse_features", + "type": 9 + }, + { + "description": "a list of vectors where each value is the weight associated with\na sparse feature group.", + "name": "sparse_weights", + "numberAttr": "num_sparse_features", + "type": 1 + }, + { + "description": "a list of vectors where the values are the weights associated\nwith a dense feature group.", + "name": "dense_weights", + "numberAttr": "num_dense_features", + "type": 1 + }, + { + "description": "a list of vectors containing the example state data.", + "name": "example_state_data", + "type": 1 + } + ], + "outputs": [ + { + "description": "a list of vectors containing the updated example state\ndata.", + "name": "out_example_state_data", + "type": 1 + }, + { + "description": "a list of vectors where each value is the delta\nweights associated with a sparse feature group.", + "name": "out_delta_sparse_weights", + "numberAttr": "num_sparse_features", + "type": 1 + }, + { + "description": "a list of vectors where the values are the delta\nweights associated with a dense feature group.", + "name": "out_delta_dense_weights", + "numberAttr": "num_dense_features", + "type": 1 + } + ], + "summary": "Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for" + } + }, + { + "name": "SdcaShrinkL1", + "schema": { + "attributes": [ + { + "description": "Number of feature groups to apply shrinking step.", + "minimum": 0, + "name": "num_features", + "type": "int64" + }, + { + "description": "Symmetric l1 regularization strength.", + "name": "l1", + "type": "float32" + }, + { + "description": "Symmetric l2 regularization strength. Should be a positive float.", + "name": "l2", + "type": "float32" + } + ], + "inputs": [ + { + "description": "a list of vectors where each value is the weight associated with a\nfeature group.", + "isRef": true, + "name": "weights", + "numberAttr": "num_features", + "type": 1 + } + ], + "summary": "Applies L1 regularization shrink step on the parameters." + } + }, + { + "name": "SegmentMax", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\max_j(data_j)\\\\) where `max` is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the max is empty for a given segment ID `i`, `output[i] = 0`.\n\n
    \n\n
    \n\nFor example:\n\n```\nc = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\ntf.segment_max(c, tf.constant([0, 0, 1]))\n# ==> [[4, 3, 3, 4],\n# [5, 6, 7, 8]]\n```\n", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.", + "name": "segment_ids", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the maximum along segments of a tensor." + } + }, + { + "name": "SegmentMean", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\frac{\\sum_j data_j}{N}\\\\) where `mean` is\nover `j` such that `segment_ids[j] == i` and `N` is the total number of\nvalues summed.\n\nIf the mean is empty for a given segment ID `i`, `output[i] = 0`.\n\n
    \n\n
    \n\nFor example:\n\n```\nc = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\ntf.segment_mean(c, tf.constant([0, 0, 1]))\n# ==> [[2.5, 2.5, 2.5, 2.5],\n# [5, 6, 7, 8]]\n```\n", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.", + "name": "segment_ids", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the mean along segments of a tensor." + } + }, + { + "name": "SegmentMin", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\min_j(data_j)\\\\) where `min` is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the min is empty for a given segment ID `i`, `output[i] = 0`.\n\n
    \n\n
    \n\nFor example:\n\n```\nc = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\ntf.segment_min(c, tf.constant([0, 0, 1]))\n# ==> [[1, 2, 2, 1],\n# [5, 6, 7, 8]]\n```", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.", + "name": "segment_ids", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the minimum along segments of a tensor." + } + }, + { + "name": "SegmentProd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\prod_j data_j\\\\) where the product is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the product is empty for a given segment ID `i`, `output[i] = 1`.\n\n
    \n\n
    \n\nFor example:\n\n```\nc = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\ntf.segment_prod(c, tf.constant([0, 0, 1]))\n# ==> [[4, 6, 6, 4],\n# [5, 6, 7, 8]]\n```\n", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.", + "name": "segment_ids", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the product along segments of a tensor." + } + }, + { + "name": "SegmentSum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output_i = \\sum_j data_j\\\\) where sum is over `j` such\nthat `segment_ids[j] == i`.\n\nIf the sum is empty for a given segment ID `i`, `output[i] = 0`.\n\n
    \n\n
    \n\nFor example:\n\n```\nc = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])\ntf.segment_sum(c, tf.constant([0, 0, 1]))\n# ==> [[5, 5, 5, 5],\n# [5, 6, 7, 8]]\n```\n", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A 1-D tensor whose size is equal to the size of `data`'s\nfirst dimension. Values should be sorted and can be repeated.", + "name": "segment_ids", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the sum along segments of a tensor." + } + }, + { + "name": "Select", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "The `t`, and `e` tensors must all have the same shape, and the\noutput will also have that shape.\n\nThe `condition` tensor must be a scalar if `t` and `e` are scalars.\nIf `t` and `e` are vectors or higher rank, then `condition` must be either a\nscalar, a vector with size matching the first dimension of `t`, or must have\nthe same shape as `t`.\n\nThe `condition` tensor acts as a mask that chooses, based on the value at each\nelement, whether the corresponding element / row in the output should be\ntaken from `t` (if true) or `e` (if false).\n\nIf `condition` is a vector and `t` and `e` are higher rank matrices, then\nit chooses which row (outer dimension) to copy from `t` and `e`.\nIf `condition` has the same shape as `t` and `e`, then it chooses which\nelement to copy from `t` and `e`.\n\nFor example:\n\n```python\n# 'condition' tensor is [[True, False]\n# [False, True]]\n# 't' is [[1, 2],\n# [3, 4]]\n# 'e' is [[5, 6],\n# [7, 8]]\nselect(condition, t, e) # => [[1, 6], [7, 4]]\n\n\n# 'condition' tensor is [True, False]\n# 't' is [[1, 2],\n# [3, 4]]\n# 'e' is [[5, 6],\n# [7, 8]]\nselect(condition, t, e) ==> [[1, 2],\n [7, 8]]\n\n```", + "inputs": [ + { + "name": "condition", + "type": 10 + }, + { + "description": "= A `Tensor` which may have the same shape as `condition`.\nIf `condition` is rank 1, `t` may have higher rank,\nbut its first dimension must match the size of `condition`.", + "name": "t", + "typeAttr": "T" + }, + { + "description": "= A `Tensor` with the same type and shape as `t`.", + "name": "e", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "= A `Tensor` with the same type and shape as `t` and `e`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Selects elements from `t` or `e`, depending on `condition`." + } + }, + { + "name": "SelectV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "condition", + "type": 10 + }, + { + "name": "t", + "typeAttr": "T" + }, + { + "name": "e", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ] + } + }, + { + "name": "SelfAdjointEig", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float64`, `float32`, `float16`.", + "name": "T", + "type": "type" + } + ], + "description": "The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions\nform square matrices, with the same constraints as the single matrix\nSelfAdjointEig.\n\nThe result is a [..., M+1, M] matrix with [..., 0,:] containing the\neigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues\nare sorted in non-decreasing order.", + "inputs": [ + { + "description": "Shape is `[..., M, M]`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Shape is `[..., M+1, M]`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the Eigen Decomposition of a batch of square self-adjoint matrices." + } + }, + { + "name": "SelfAdjointEigV2", + "schema": { + "attributes": [ + { + "default": true, + "description": "If `True` then eigenvectors will be computed and returned in `v`.\nOtherwise, only the eigenvalues will be computed.", + "name": "compute_v", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in\n`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues\nare sorted in non-decreasing order.\n\n```python\n# a is a tensor.\n# e is a tensor of eigenvalues.\n# v is a tensor of eigenvectors.\ne, v = self_adjoint_eig(a)\ne = self_adjoint_eig(a, compute_v=False)\n```", + "inputs": [ + { + "description": "`Tensor` input of shape `[N, N]`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Eigenvalues. Shape is `[N]`.", + "name": "e", + "typeAttr": "T" + }, + { + "description": "Eigenvectors. Shape is `[N, N]`.", + "name": "v", + "typeAttr": "T" + } + ], + "summary": "Computes the eigen decomposition of one or more square self-adjoint matrices." + } + }, + { + "name": "Selu", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "if < 0, `scale * features` otherwise.\n\nTo be used together with\n`initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.\nFor correct dropout, use `tf.contrib.nn.alpha_dropout`.\n\nSee [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)", + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ], + "summary": "Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`" + } + }, + { + "name": "SeluGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The backpropagated gradients to the corresponding Selu operation.", + "name": "gradients", + "typeAttr": "T" + }, + { + "description": "The outputs of the corresponding Selu operation.", + "name": "outputs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The gradients: `gradients * (outputs + scale * alpha)`\nif outputs < 0, `scale * gradients` otherwise.", + "name": "backprops", + "typeAttr": "T" + } + ], + "summary": "Computes gradients for the scaled exponential linear (Selu) operation." + } + }, + { + "name": "Send", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "The name of the tensor to send.", + "name": "tensor_name", + "type": "string" + }, + { + "description": "The name of the device sending the tensor.", + "name": "send_device", + "type": "string" + }, + { + "description": "The current incarnation of send_device.", + "name": "send_device_incarnation", + "type": "int64" + }, + { + "description": "The name of the device receiving the tensor.", + "name": "recv_device", + "type": "string" + }, + { + "default": false, + "description": "If set to true, this indicates that the node was added\nto the graph as a result of a client-side feed or fetch of Tensor data,\nin which case the corresponding send or recv is expected to be managed\nlocally by the caller.", + "name": "client_terminated", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "The tensor to send.", + "name": "tensor", + "typeAttr": "T" + } + ], + "summary": "Sends the named tensor from send_device to recv_device." + } + }, + { + "name": "SendTPUEmbeddingGradients", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "NN", + "type": "int64" + }, + { + "description": "Serialized TPUEmbeddingConfiguration proto.", + "name": "config", + "type": "string" + } + ], + "inputs": [ + { + "description": "A TensorList of gradients with which to update embedding tables.\nThis argument has the same length and shapes as the return value of\nRecvTPUEmbeddingActivations, but contains gradients of the model's loss\nwith respect to the embedding activations. The embedding tables are updated\nfrom these gradients via the optimizer specified in the TPU embedding\nconfiguration given to tpu.initialize_system.", + "name": "inputs", + "numberAttr": "N", + "type": 1 + }, + { + "description": "A TensorList of float32 scalars, one for each dynamic learning\nrate tag: see the comments in\n//third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto.\nMultiple tables can share the same dynamic learning rate tag as specified\nin the configuration. If the learning rates for all tables are constant,\nthis list should be empty.", + "name": "learning_rates", + "numberAttr": "NN", + "type": 1 + } + ], + "summary": "Performs gradient updates of embedding tables." + } + }, + { + "name": "SerializeIterator", + "schema": { + "attributes": [ + { + "default": 0, + "name": "external_state_policy", + "type": "int64" + } + ], + "inputs": [ + { + "description": "A handle to an iterator resource.", + "name": "resource_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "A variant tensor storing the state of the iterator contained in the\nresource.", + "name": "serialized", + "type": 21 + } + ], + "summary": "Converts the given `resource_handle` representing an iterator to a variant tensor." + } + }, + { + "name": "SerializeManySparse", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 7 + }, + "description": "The `dtype` to use for serialization; the supported types are `string`\n(default) and `variant`. Must be one of the following: `string`, `variant`.", + "name": "out_type", + "type": "type" + } + ], + "description": "The `SparseTensor` must have rank `R` greater than 1, and the first dimension\nis treated as the minibatch dimension. Elements of the `SparseTensor`\nmust be sorted in increasing order of this first dimension. The serialized\n`SparseTensor` objects going into each row of `serialized_sparse` will have\nrank `R-1`.\n\nThe minibatch size `N` is extracted from `sparse_shape[0]`.", + "inputs": [ + { + "description": "2-D. The `indices` of the minibatch `SparseTensor`.", + "name": "sparse_indices", + "type": 9 + }, + { + "description": "1-D. The `values` of the minibatch `SparseTensor`.", + "name": "sparse_values", + "typeAttr": "T" + }, + { + "description": "1-D. The `shape` of the minibatch `SparseTensor`.", + "name": "sparse_shape", + "type": 9 + } + ], + "outputs": [ + { + "name": "serialized_sparse", + "typeAttr": "out_type" + } + ], + "summary": "Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object." + } + }, + { + "name": "SerializeSparse", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 7 + }, + "description": "The `dtype` to use for serialization; the supported types are `string`\n(default) and `variant`. Must be one of the following: `string`, `variant`.", + "name": "out_type", + "type": "type" + } + ], + "inputs": [ + { + "description": "2-D. The `indices` of the `SparseTensor`.", + "name": "sparse_indices", + "type": 9 + }, + { + "description": "1-D. The `values` of the `SparseTensor`.", + "name": "sparse_values", + "typeAttr": "T" + }, + { + "description": "1-D. The `shape` of the `SparseTensor`.", + "name": "sparse_shape", + "type": 9 + } + ], + "outputs": [ + { + "name": "serialized_sparse", + "typeAttr": "out_type" + } + ], + "summary": "Serialize a `SparseTensor` into a `[3]` `Tensor` object." + } + }, + { + "name": "SerializeTensor", + "schema": { + "attributes": [ + { + "description": "The type of the input tensor.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "A Tensor of type `T`.", + "name": "tensor", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A serialized TensorProto proto of the input tensor.", + "name": "serialized", + "type": 7 + } + ], + "summary": "Transforms a Tensor into a serialized TensorProto proto." + } + }, + { + "name": "SetSize", + "schema": { + "attributes": [ + { + "default": true, + "name": "validate_indices", + "type": "boolean" + }, + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.", + "name": "T", + "type": "type" + } + ], + "description": "Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,\nand `set_shape`. The last dimension contains values in a set, duplicates are\nallowed but ignored.\n\nIf `validate_indices` is `True`, this op validates the order and range of `set`\nindices.", + "inputs": [ + { + "description": "2D `Tensor`, indices of a `SparseTensor`.", + "name": "set_indices", + "type": 9 + }, + { + "description": "1D `Tensor`, values of a `SparseTensor`.", + "name": "set_values", + "typeAttr": "T" + }, + { + "description": "1D `Tensor`, shape of a `SparseTensor`.", + "name": "set_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st\n`n-1` dimensions as `set`. Each value is the number of unique elements in\nthe corresponding `[0...n-1]` dimension of `set`.", + "name": "size", + "type": 3 + } + ], + "summary": "Number of unique elements along last dimension of input `set`." + } + }, + { + "name": "SetStatsAggregatorDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "stats_aggregator", + "type": 20 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "counter_prefix", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "Shape", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "out_type", + "type": "type" + } + ], + "description": "This operation returns a 1-D integer tensor representing the shape of `input`.\n\nFor example:\n\n```\n# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\nshape(t) ==> [2, 2, 3]\n```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + } + ], + "summary": "Returns the shape of a tensor." + } + }, + { + "name": "ShapeN", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "out_type", + "type": "type" + } + ], + "description": "This operation returns N 1-D integer tensors representing shape of `input[i]s`.", + "inputs": [ + { + "name": "input", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "numberAttr": "N", + "typeAttr": "out_type" + } + ], + "summary": "Returns shape of tensors." + } + }, + { + "name": "ShardDataset", + "schema": { + "attributes": [ + { + "default": false, + "name": "require_non_empty", + "type": "boolean" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "An integer representing the number of shards operating in parallel.", + "name": "num_shards", + "type": 9 + }, + { + "description": "An integer representing the current worker index.", + "name": "index", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a `Dataset` that includes only 1/`num_shards` of this dataset." + } + }, + { + "name": "ShardedFilename", + "schema": { + "description": " %s-%05d-of-%05d, basename, shard, num_shards.", + "inputs": [ + { + "name": "basename", + "type": 7 + }, + { + "name": "shard", + "type": 3 + }, + { + "name": "num_shards", + "type": 3 + } + ], + "outputs": [ + { + "name": "filename", + "type": 7 + } + ], + "summary": "Generate a sharded filename. The filename is printf formatted as" + } + }, + { + "name": "ShardedFilespec", + "schema": { + "inputs": [ + { + "name": "basename", + "type": 7 + }, + { + "name": "num_shards", + "type": 3 + } + ], + "outputs": [ + { + "name": "filename", + "type": 7 + } + ], + "summary": "Generate a glob pattern matching all sharded file names." + } + }, + { + "name": "ShuffleAndRepeatDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": true, + "name": "reshuffle_each_iteration", + "type": "boolean" + } + ], + "description": "pseudorandomly.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "The number of output elements to buffer in an iterator over\nthis dataset. Compare with the `min_after_dequeue` attr when creating a\n`RandomShuffleQueue`.", + "name": "buffer_size", + "type": 9 + }, + { + "description": "A scalar seed for the random number generator. If either `seed` or\n`seed2` is set to be non-zero, the random number generator is seeded\nby the given seed. Otherwise, a random seed is used.", + "name": "seed", + "type": 9 + }, + { + "description": "A second scalar seed to avoid seed collision.", + "name": "seed2", + "type": 9 + }, + { + "description": "A scalar representing the number of times the underlying dataset\nshould be repeated. The default is `-1`, which results in infinite repetition.", + "name": "count", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that shuffles and repeats elements from `input_dataset`" + } + }, + { + "name": "ShuffleAndRepeatDatasetV2", + "schema": { + "attributes": [ + { + "default": true, + "name": "reshuffle_each_iteration", + "type": "boolean" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "seed", + "type": 9 + }, + { + "name": "seed2", + "type": 9 + }, + { + "name": "count", + "type": 9 + }, + { + "name": "seed_generator", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ShuffleDataset", + "schema": { + "attributes": [ + { + "default": true, + "description": "If true, each iterator over this dataset will be given\na different pseudorandomly generated seed, based on a sequence seeded by the\n`seed` and `seed2` inputs. If false, each iterator will be given the same\nseed, and repeated iteration over this dataset will yield the exact same\nsequence of results.", + "name": "reshuffle_each_iteration", + "type": "boolean" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "The number of output elements to buffer in an iterator over\nthis dataset. Compare with the `min_after_dequeue` attr when creating a\n`RandomShuffleQueue`.", + "name": "buffer_size", + "type": 9 + }, + { + "description": "A scalar seed for the random number generator. If either `seed` or\n`seed2` is set to be non-zero, the random number generator is seeded\nby the given seed. Otherwise, a random seed is used.", + "name": "seed", + "type": 9 + }, + { + "description": "A second scalar seed to avoid seed collision.", + "name": "seed2", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that shuffles elements from `input_dataset` pseudorandomly." + } + }, + { + "name": "ShuffleDatasetV2", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "seed_generator", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ShuffleDatasetV3", + "schema": { + "attributes": [ + { + "default": true, + "name": "reshuffle_each_iteration", + "type": "boolean" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "buffer_size", + "type": 9 + }, + { + "name": "seed", + "type": 9 + }, + { + "name": "seed2", + "type": 9 + }, + { + "name": "seed_generator", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "ShutdownDistributedTPU", + "schema": { + "description": "The op returns an error if no system is running.", + "summary": "Shuts down a running distributed TPU system." + } + }, + { + "name": "Sigmoid", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "category": "Activation", + "description": "Specifically, `y = 1 / (1 + exp(-x))`.", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes sigmoid of `x` element-wise." + } + }, + { + "name": "SigmoidGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and\n`dy` is the corresponding input gradient.", + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient of the sigmoid of `x` wrt its input." + } + }, + { + "name": "Sign", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.\n\nFor complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.\n\nExample usage:\n>>> tf.math.sign([0., 2., -3.])\n", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Returns an element-wise indication of the sign of a number." + } + }, + { + "name": "Sin", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": " Given an input tensor, this function computes sine of every\n element in the tensor. Input range is `(-inf, inf)` and\n output range is `[-1,1]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10, float(\"inf\")])\n tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan]\n ```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes sine of x element-wise." + } + }, + { + "name": "Sinh", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": " Given an input tensor, this function computes hyperbolic sine of every\n element in the tensor. Input range is `[-inf,inf]` and output range\n is `[-inf,inf]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 2, 10, float(\"inf\")])\n tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf]\n ```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes hyperbolic sine of x element-wise." + } + }, + { + "name": "Size", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "out_type", + "type": "type" + } + ], + "description": "This operation returns an integer representing the number of elements in\n`input`.\n\nFor example:\n\n```\n# 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]\nsize(t) ==> 12\n```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + } + ], + "summary": "Returns the size of a tensor." + } + }, + { + "name": "SkipDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of elements from the `input_dataset`\nthat should be skipped. If count is -1, skips everything.", + "name": "count", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that skips `count` elements from the `input_dataset`." + } + }, + { + "name": "Skipgram", + "schema": { + "attributes": [ + { + "description": "The corpus's text file name.", + "name": "filename", + "type": "string" + }, + { + "description": "The size of produced batch.", + "name": "batch_size", + "type": "int64" + }, + { + "default": 5, + "description": "The number of words to predict to the left and right of the target.", + "name": "window_size", + "type": "int64" + }, + { + "default": 5, + "description": "The minimum number of word occurrences for it to be included in the\nvocabulary.", + "name": "min_count", + "type": "int64" + }, + { + "default": 0.0010000000474974513, + "description": "Threshold for word occurrence. Words that appear with higher\nfrequency will be randomly down-sampled. Set to 0 to disable.", + "name": "subsample", + "type": "float32" + } + ], + "outputs": [ + { + "description": "A vector of words in the corpus.", + "name": "vocab_word", + "type": 7 + }, + { + "description": "Frequencies of words. Sorted in the non-ascending order.", + "name": "vocab_freq", + "type": 3 + }, + { + "description": "Number of words per epoch in the data file.", + "name": "words_per_epoch", + "type": 9 + }, + { + "description": "The current epoch number.", + "name": "current_epoch", + "type": 3 + }, + { + "description": "The total number of words processed so far.", + "name": "total_words_processed", + "type": 9 + }, + { + "description": "A vector of word ids.", + "name": "examples", + "type": 3 + }, + { + "description": "A vector of word ids.", + "name": "labels", + "type": 3 + } + ], + "summary": "Parses a text file and creates a batch of examples." + } + }, + { + "name": "SleepDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "name": "sleep_microseconds", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ] + } + }, + { + "name": "Slice", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Index", + "type": "type" + } + ], + "category": "Tensor", + "description": "The output tensor is a tensor with dimensions described by 'size'\nwhose values are extracted from 'input' starting at the offsets in\n'begin'.\n\n*Requirements*:\n 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "description": "begin[i] specifies the offset into the 'i'th dimension of\n'input' to slice from.", + "name": "begin", + "typeAttr": "Index" + }, + { + "description": "size[i] specifies the number of elements of the 'i'th dimension\nof 'input' to slice. If size[i] is -1, all remaining elements in dimension\ni are included in the slice (i.e. this is equivalent to setting\nsize[i] = input.dim_size(i) - begin[i]).", + "name": "size", + "typeAttr": "Index" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Return a slice from 'input'." + } + }, + { + "name": "SlidingWindowDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of elements in the\nsliding window.", + "name": "window_size", + "type": 9 + }, + { + "description": "A scalar representing the steps moving the sliding window\nforward in one iteration. It must be positive.", + "name": "window_shift", + "type": 9 + }, + { + "description": "A scalar representing the stride of the input elements of the sliding window.\nIt must be positive.", + "name": "window_stride", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that passes a sliding window over `input_dataset`." + } + }, + { + "name": "Snapshot", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns a copy of the input tensor." + } + }, + { + "name": "SnapshotDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": "", + "name": "compression", + "type": "string" + }, + { + "default": "", + "name": "reader_path_prefix", + "type": "string" + }, + { + "default": "", + "name": "writer_path_prefix", + "type": "string" + }, + { + "default": 10737418240, + "name": "shard_size_bytes", + "type": "int64" + }, + { + "default": 86400, + "name": "pending_snapshot_expiry_seconds", + "type": "int64" + }, + { + "default": 1, + "name": "num_reader_threads", + "type": "int64" + }, + { + "default": 1, + "name": "reader_buffer_size", + "type": "int64" + }, + { + "default": 1, + "name": "num_writer_threads", + "type": "int64" + }, + { + "default": 1, + "name": "writer_buffer_size", + "type": "int64" + }, + { + "default": false, + "name": "shuffle_on_read", + "type": "boolean" + }, + { + "default": 0, + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "name": "seed2", + "type": "int64" + }, + { + "default": "auto", + "name": "mode", + "type": "string" + }, + { + "default": "", + "name": "snapshot_name", + "type": "string" + } + ], + "description": "This dataset attempts to determine whether a valid snapshot exists at the\n`snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`.\nIf not, it will run the preprocessing pipeline as usual, and write out a\nsnapshot of the data processed for future use.", + "inputs": [ + { + "description": "A variant tensor representing the input dataset.", + "name": "input_dataset", + "type": 21 + }, + { + "description": "The path we should write snapshots to / read snapshots from.", + "name": "path", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that will write to / read from a snapshot." + } + }, + { + "name": "SobolSample", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "The type of the sample. One of: `float32` or `float64`. Must be one of the following: `float32`, `float64`.", + "name": "dtype", + "type": "type" + } + ], + "description": "Creates a Sobol sequence with `num_results` samples. Each sample has dimension\n`dim`. Skips the first `skip` samples.", + "inputs": [ + { + "description": "Positive scalar `Tensor` representing each sample's dimension.", + "name": "dim", + "type": 3 + }, + { + "description": "Positive scalar `Tensor` of dtype int32. The number of Sobol points to return\nin the output.", + "name": "num_results", + "type": 3 + }, + { + "description": "Positive scalar `Tensor` of dtype int32. The number of initial points of the\nSobol sequence to skip.", + "name": "skip", + "type": 3 + } + ], + "outputs": [ + { + "description": "`Tensor` of samples from Sobol sequence with `shape` [num_results, dim].", + "name": "samples", + "typeAttr": "dtype" + } + ], + "summary": "Generates points from the Sobol sequence." + } + }, + { + "name": "Softmax", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "category": "Activation", + "description": "For each batch `i` and class `j` we have\n\n $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$", + "inputs": [ + { + "description": "2-D with shape `[batch_size, num_classes]`.", + "name": "logits", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same shape as `logits`.", + "name": "softmax", + "typeAttr": "T" + } + ], + "summary": "Computes softmax activations." + } + }, + { + "name": "SoftmaxCrossEntropyWithLogits", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "Inputs are the logits, not probabilities.", + "inputs": [ + { + "description": "batch_size x num_classes matrix", + "name": "features", + "typeAttr": "T" + }, + { + "description": "batch_size x num_classes matrix\nThe caller must ensure that each batch of labels represents a valid\nprobability distribution.", + "name": "labels", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Per example loss (batch_size vector).", + "name": "loss", + "typeAttr": "T" + }, + { + "description": "backpropagated gradients (batch_size x num_classes matrix).", + "name": "backprop", + "typeAttr": "T" + } + ], + "summary": "Computes softmax cross entropy cost and gradients to backpropagate." + } + }, + { + "name": "Softplus", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ], + "summary": "Computes softplus: `log(exp(features) + 1)`." + } + }, + { + "name": "SoftplusGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The backpropagated gradients to the corresponding softplus operation.", + "name": "gradients", + "typeAttr": "T" + }, + { + "description": "The features passed as input to the corresponding softplus operation.", + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The gradients: `gradients / (1 + exp(-features))`.", + "name": "backprops", + "typeAttr": "T" + } + ], + "summary": "Computes softplus gradients for a softplus operation." + } + }, + { + "name": "Softsign", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "activations", + "typeAttr": "T" + } + ], + "summary": "Computes softsign: `features / (abs(features) + 1)`." + } + }, + { + "name": "SoftsignGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The backpropagated gradients to the corresponding softsign operation.", + "name": "gradients", + "typeAttr": "T" + }, + { + "description": "The features passed as input to the corresponding softsign operation.", + "name": "features", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The gradients: `gradients / (1 + abs(features)) ** 2`.", + "name": "backprops", + "typeAttr": "T" + } + ], + "summary": "Computes softsign gradients for a softsign operation." + } + }, + { + "name": "SpaceToBatch", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tpaddings", + "type": "type" + }, + { + "minimum": 2, + "name": "block_size", + "type": "int64" + } + ], + "description": "This is a legacy version of the more general SpaceToBatchND.\n\nZero-pads and then rearranges (permutes) blocks of spatial data into batch.\nMore specifically, this op outputs a copy of the input tensor where values from\nthe `height` and `width` dimensions are moved to the `batch` dimension. After\nthe zero-padding, both `height` and `width` of the input must be divisible by the\nblock size.", + "inputs": [ + { + "description": "4-D with shape `[batch, height, width, depth]`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "2-D tensor of non-negative integers with shape `[2, 2]`. It specifies\n the padding of the input with zeros across the spatial dimensions as follows:\n\n paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]\n\n The effective spatial dimensions of the zero-padded input tensor will be:\n\n height_pad = pad_top + height + pad_bottom\n width_pad = pad_left + width + pad_right\n\nThe attr `block_size` must be greater than one. It indicates the block size.\n\n * Non-overlapping blocks of size `block_size x block size` in the height and\n width dimensions are rearranged into the batch dimension at each location.\n * The batch of the output tensor is `batch * block_size * block_size`.\n * Both height_pad and width_pad must be divisible by block_size.\n\nThe shape of the output will be:\n\n [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,\n depth]\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```\n[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 2, 1]` and value:\n\n```\nx = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],\n [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution.", + "name": "paddings", + "typeAttr": "Tpaddings" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "SpaceToBatch for 4-D tensors of type T." + } + }, + { + "name": "SpaceToBatchND", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tblock_shape", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tpaddings", + "type": "type" + } + ], + "description": "This operation divides \"spatial\" dimensions `[1, ..., M]` of the input into a\ngrid of blocks of shape `block_shape`, and interleaves these blocks with the\n\"batch\" dimension (0) such that in the output, the spatial dimensions\n`[1, ..., M]` correspond to the position within the grid, and the batch\ndimension combines both the position within a spatial block and the original\nbatch position. Prior to division into blocks, the spatial dimensions of the\ninput are optionally zero padded according to `paddings`. See below for a\nprecise description.", + "inputs": [ + { + "description": "N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,\nwhere spatial_shape has `M` dimensions.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "1-D with shape `[M]`, all values must be >= 1.", + "name": "block_shape", + "typeAttr": "Tblock_shape" + }, + { + "description": "2-D with shape `[M, 2]`, all values must be >= 0.\n `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension\n `i + 1`, which corresponds to spatial dimension `i`. It is required that\n `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.\n\nThis operation is equivalent to the following steps:\n\n1. Zero-pad the start and end of dimensions `[1, ..., M]` of the\n input according to `paddings` to produce `padded` of shape `padded_shape`.\n\n2. Reshape `padded` to `reshaped_padded` of shape:\n\n [batch] +\n [padded_shape[1] / block_shape[0],\n block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1],\n block_shape[M-1]] +\n remaining_shape\n\n3. Permute dimensions of `reshaped_padded` to produce\n `permuted_reshaped_padded` of shape:\n\n block_shape +\n [batch] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\n4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch\n dimension, producing an output tensor of shape:\n\n [batch * prod(block_shape)] +\n [padded_shape[1] / block_shape[0],\n ...,\n padded_shape[M] / block_shape[M-1]] +\n remaining_shape\n\nSome examples:\n\n(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1], [2]], [[3], [4]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 1]` and value:\n\n```\n[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]\n```\n\n(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThe output tensor has shape `[4, 1, 1, 3]` and value:\n\n```\n[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]\n```\n\n(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and\n `paddings = [[0, 0], [0, 0]]`:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]],\n [[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[4, 2, 2, 1]` and value:\n\n```\nx = [[[[1], [3]], [[9], [11]]],\n [[[2], [4]], [[10], [12]]],\n [[[5], [7]], [[13], [15]]],\n [[[6], [8]], [[14], [16]]]]\n```\n\n(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and\n paddings = `[[0, 0], [2, 0]]`:\n\n```\nx = [[[[1], [2], [3], [4]],\n [[5], [6], [7], [8]]],\n [[[9], [10], [11], [12]],\n [[13], [14], [15], [16]]]]\n```\n\nThe output tensor has shape `[8, 1, 3, 1]` and value:\n\n```\nx = [[[[0], [1], [3]]], [[[0], [9], [11]]],\n [[[0], [2], [4]]], [[[0], [10], [12]]],\n [[[0], [5], [7]]], [[[0], [13], [15]]],\n [[[0], [6], [8]]], [[[0], [14], [16]]]]\n```\n\nAmong others, this operation is useful for reducing atrous convolution into\nregular convolution.", + "name": "paddings", + "typeAttr": "Tpaddings" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "SpaceToBatch for N-D tensors of type T." + } + }, + { + "name": "SpaceToDepth", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "The size of the spatial block.", + "minimum": 2, + "name": "block_size", + "type": "int64" + }, + { + "default": "NHWC", + "description": "Must be one of the following: `NHWC`, `NCHW`, `NCHW_VECT_C`.", + "name": "data_format", + "type": "string" + } + ], + "description": "Rearranges blocks of spatial data, into depth. More specifically,\nthis op outputs a copy of the input tensor where values from the `height`\nand `width` dimensions are moved to the `depth` dimension.\nThe attr `block_size` indicates the input block size.\n\n * Non-overlapping blocks of size `block_size x block size` are rearranged\n into depth at each location.\n * The depth of the output tensor is `block_size * block_size * input_depth`.\n * The Y, X coordinates within each block of the input become the high order\n component of the output channel index.\n * The input tensor's height and width must be divisible by block_size.\n\nThe `data_format` attr specifies the layout of the input and output tensors\nwith the following options:\n \"NHWC\": `[ batch, height, width, channels ]`\n \"NCHW\": `[ batch, channels, height, width ]`\n \"NCHW_VECT_C\":\n `qint8 [ batch, channels / 4, height, width, 4 ]`\n\nIt is useful to consider the operation as transforming a 6-D Tensor.\ne.g. for data_format = NHWC,\n Each element in the input tensor can be specified via 6 coordinates,\n ordered by decreasing memory layout significance as:\n n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates\n within the output image, bX, bY means coordinates\n within the input block, iC means input channels).\n The output would be a transpose to the following layout:\n n,oY,oX,bY,bX,iC\n\nThis operation is useful for resizing the activations between convolutions\n(but keeping all data), e.g. instead of pooling. It is also useful for training\npurely convolutional models.\n\nFor example, given an input of shape `[1, 2, 2, 1]`, data_format = \"NHWC\" and\nblock_size = 2:\n\n```\nx = [[[[1], [2]],\n [[3], [4]]]]\n```\n\nThis operation will output a tensor of shape `[1, 1, 1, 4]`:\n\n```\n[[[[1, 2, 3, 4]]]]\n```\n\nHere, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,\nthe corresponding output will have a single element (i.e. width and height are\nboth 1) and will have a depth of 4 channels (1 * block_size * block_size).\nThe output element shape is `[1, 1, 4]`.\n\nFor an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.\n\n```\nx = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n```\n\nThis operation, for block_size of 2, will return the following tensor of shape\n`[1, 1, 1, 12]`\n\n```\n[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]\n```\n\nSimilarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:\n\n```\nx = [[[[1], [2], [5], [6]],\n [[3], [4], [7], [8]],\n [[9], [10], [13], [14]],\n [[11], [12], [15], [16]]]]\n```\n\nthe operator will return the following tensor of shape `[1 2 2 4]`:\n\n```\nx = [[[[1, 2, 3, 4],\n [5, 6, 7, 8]],\n [[9, 10, 11, 12],\n [13, 14, 15, 16]]]]\n```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "SpaceToDepth for tensors of type T." + } + }, + { + "name": "SparseAccumulatorApplyGradient", + "schema": { + "attributes": [ + { + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Boolean indicating whether gradient_shape is unknown, in which\ncase the input is ignored during validation.", + "name": "has_known_shape", + "type": "boolean" + } + ], + "description": "Does not add if local_step is smaller than the accumulator's\nglobal_step.", + "inputs": [ + { + "description": "The handle to a accumulator.", + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "description": "The local_step value at which the sparse gradient was computed.", + "name": "local_step", + "type": 9 + }, + { + "description": "Indices of the sparse gradient to be accumulated. Must be a\nvector.", + "name": "gradient_indices", + "type": 9 + }, + { + "description": "Values are the non-zero slices of the gradient, and must have\nthe same first dimension as indices, i.e., the nnz represented by indices and\nvalues must be consistent.", + "name": "gradient_values", + "typeAttr": "dtype" + }, + { + "description": "Shape of the sparse gradient to be accumulated.", + "name": "gradient_shape", + "type": 9 + } + ], + "summary": "Applies a sparse gradient to a given accumulator." + } + }, + { + "name": "SparseAccumulatorTakeGradient", + "schema": { + "attributes": [ + { + "description": "The data type of accumulated gradients. Needs to correspond to the type\nof the accumulator. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + } + ], + "description": "The op will blocks until sufficient (i.e., more than num_required)\ngradients have been accumulated. If the accumulator has already\naggregated more than num_required gradients, it will return its\naverage of the accumulated gradients. Also automatically increments\nthe recorded global_step in the accumulator by 1, and resets the\naggregate to 0.", + "inputs": [ + { + "description": "The handle to a SparseConditionalAccumulator.", + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "description": "Number of gradients required before we return an aggregate.", + "name": "num_required", + "type": 3 + } + ], + "outputs": [ + { + "description": "Indices of the average of the accumulated sparse gradients.", + "name": "indices", + "type": 9 + }, + { + "description": "Values of the average of the accumulated sparse gradients.", + "name": "values", + "typeAttr": "dtype" + }, + { + "description": "Shape of the average of the accumulated sparse gradients.", + "name": "shape", + "type": 9 + } + ], + "summary": "Extracts the average sparse gradient in a SparseConditionalAccumulator." + } + }, + { + "name": "SparseAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "Treal", + "type": "type" + } + ], + "description": "The input `SparseTensor` objects' indices are assumed ordered in standard\nlexicographic order. If this is not the case, before this step run\n`SparseReorder` to restore index ordering.\n\nBy default, if two values sum to zero at some index, the output `SparseTensor`\nwould still include that particular location in its index, storing a zero in the\ncorresponding value slot. To override this, callers can specify `thresh`,\nindicating that if the sum has a magnitude strictly smaller than `thresh`, its\ncorresponding value and index would then not be included. In particular,\n`thresh == 0` (default) means everything is kept and actual thresholding happens\nonly for a positive value.\n\nIn the following shapes, `nnz` is the count after taking `thresh` into account.", + "inputs": [ + { + "description": "2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.", + "name": "a_indices", + "type": 9 + }, + { + "description": "1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector.", + "name": "a_values", + "typeAttr": "T" + }, + { + "description": "1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector.", + "name": "a_shape", + "type": 9 + }, + { + "description": "2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.", + "name": "b_indices", + "type": 9 + }, + { + "description": "1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector.", + "name": "b_values", + "typeAttr": "T" + }, + { + "description": "1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector.", + "name": "b_shape", + "type": 9 + }, + { + "description": "0-D. The magnitude threshold that determines if an output value/index\npair takes space.", + "name": "thresh", + "typeAttr": "Treal" + } + ], + "outputs": [ + { + "name": "sum_indices", + "type": 9 + }, + { + "name": "sum_values", + "typeAttr": "T" + }, + { + "name": "sum_shape", + "type": 9 + } + ], + "summary": "Adds two `SparseTensor` objects to produce another `SparseTensor`." + } + }, + { + "name": "SparseAddGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "The SparseAdd op calculates A + B, where A, B, and the sum are all represented\nas `SparseTensor` objects. This op takes in the upstream gradient w.r.t.\nnon-empty values of the sum, and outputs the gradients w.r.t. the non-empty\nvalues of A and B.", + "inputs": [ + { + "description": "1-D with shape `[nnz(sum)]`. The gradient with respect to\nthe non-empty values of the sum.", + "name": "backprop_val_grad", + "typeAttr": "T" + }, + { + "description": "2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.", + "name": "a_indices", + "type": 9 + }, + { + "description": "2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.", + "name": "b_indices", + "type": 9 + }, + { + "description": "2-D. The `indices` of the sum `SparseTensor`, size\n`[nnz(sum), ndims]`.", + "name": "sum_indices", + "type": 9 + } + ], + "outputs": [ + { + "description": "1-D with shape `[nnz(A)]`. The gradient with respect to the\nnon-empty values of A.", + "name": "a_val_grad", + "typeAttr": "T" + }, + { + "description": "1-D with shape `[nnz(B)]`. The gradient with respect to the\nnon-empty values of B.", + "name": "b_val_grad", + "typeAttr": "T" + } + ], + "summary": "The gradient operator for the SparseAdd op." + } + }, + { + "name": "SparseApplyAdadelta", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "inputs": [ + { + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": ": Should be from a Variable().", + "isRef": true, + "name": "accum_update", + "typeAttr": "T" + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay factor. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "description": "Constant factor. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "var: Should be from a Variable()." + } + }, + { + "name": "SparseApplyAdagrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": true, + "name": "update_slots", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var and accum as follows:\n$$accum += grad * grad$$\n$$var -= lr * grad * (1 / sqrt(accum))$$", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update relevant entries in '*var' and '*accum' according to the adagrad scheme." + } + }, + { + "name": "SparseApplyAdagradDA", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "gradient_accumulator", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "gradient_squared_accumulator", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "Training step number. Must be a scalar.", + "name": "global_step", + "type": 9 + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update entries in '*var' and '*accum' according to the proximal adagrad scheme." + } + }, + { + "name": "SparseApplyAdagradV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": true, + "name": "update_slots", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var and accum as follows:\n$$accum += grad * grad$$\n$$var -= lr * grad * (1 / sqrt(accum))$$", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Constant factor. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update relevant entries in '*var' and '*accum' according to the adagrad scheme." + } + }, + { + "name": "SparseApplyCenteredRMSProp", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, mg, ms, and mom tensors is\nprotected by a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "The centered RMSProp algorithm uses an estimate of the centered second moment\n(i.e., the variance) for normalization, as opposed to regular RMSProp, which\nuses the (uncentered) second moment. This often helps with training, but is\nslightly more expensive in terms of computation and memory.\n\nNote that in dense implementation of this algorithm, mg, ms, and mom will\nupdate even if the grad is zero, but in this sparse implementation, mg, ms,\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nmean_grad = decay * mean_grad + (1-decay) * gradient\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)\n\n$$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$\n$$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$\n$$var <- var - mom$$", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "mg", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "ms", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "mom", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay rate. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var, ms and mom.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the centered RMSProp algorithm." + } + }, + { + "name": "SparseApplyFtrl", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "name": "multiply_linear_by_lr", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var, accum and linear as follows:\n$$accum_new = accum + grad * grad$$\n$$linear += grad + (accum_{new}^{-lr_{power}} - accum^{-lr_{power}} / lr * var$$\n$$quadratic = 1.0 / (accum_{new}^{lr_{power}} * lr) + 2 * l2$$\n$$var = (sign(linear) * l1 - linear) / quadratic\\ if\\ |linear| > l1\\ else\\ 0.0$$\n$$accum = accum_{new}$$", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "linear", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr_power", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update relevant entries in '*var' according to the Ftrl-proximal scheme." + } + }, + { + "name": "SparseApplyFtrlV2", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "name": "multiply_linear_by_lr", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var, accum and linear as follows:\ngrad_with_shrinkage = grad + 2 * l2_shrinkage * var\naccum_new = accum + grad * grad\nlinear += grad_with_shrinkage -\n (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var\nquadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2\nvar = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0\naccum = accum_new", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "linear", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 shrinkage regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "name": "l2_shrinkage", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr_power", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update relevant entries in '*var' according to the Ftrl-proximal scheme." + } + }, + { + "name": "SparseApplyMomentum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var and accum tensors will be protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + }, + { + "default": false, + "description": "If `True`, the tensor passed to compute grad will be\nvar - lr * momentum * accum, so in the end, the var you get is actually\nvar - lr * momentum * accum.", + "name": "use_nesterov", + "type": "boolean" + } + ], + "description": "Set use_nesterov = True if you want to use Nesterov momentum.\n\nThat is for rows we have grad for, we update var and accum as follows:\n\n$$accum = accum * momentum + grad$$\n$$var -= lr * accum$$", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Momentum. Must be a scalar.", + "name": "momentum", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update relevant entries in '*var' and '*accum' according to the momentum scheme." + } + }, + { + "name": "SparseApplyProximalAdagrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, updating of the var and accum tensors will be protected by\na lock; otherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var and accum as follows:\n$$accum += grad * grad$$\n$$prox_v = var$$\n$$prox_v -= lr * grad * (1 / sqrt(accum))$$\n$$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "accum", + "typeAttr": "T" + }, + { + "description": "Learning rate. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Sparse update entries in '*var' and '*accum' according to FOBOS algorithm." + } + }, + { + "name": "SparseApplyProximalGradientDescent", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If True, the subtraction will be protected by a lock;\notherwise the behavior is undefined, but may exhibit less contention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "That is for rows we have grad for, we update var as follows:\n$$prox_v = var - alpha * grad$$\n$$var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}$$", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "alpha", + "typeAttr": "T" + }, + { + "description": "L1 regularization. Must be a scalar.", + "name": "l1", + "typeAttr": "T" + }, + { + "description": "L2 regularization. Must be a scalar.", + "name": "l2", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var and accum.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Sparse update '*var' as FOBOS algorithm with fixed learning rate." + } + }, + { + "name": "SparseApplyRMSProp", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "If `True`, updating of the var, ms, and mom tensors is protected\nby a lock; otherwise the behavior is undefined, but may exhibit less\ncontention.", + "name": "use_locking", + "type": "boolean" + } + ], + "description": "Note that in dense implementation of this algorithm, ms and mom will\nupdate even if the grad is zero, but in this sparse implementation, ms\nand mom will not update in iterations during which the grad is zero.\n\nmean_square = decay * mean_square + (1-decay) * gradient ** 2\nDelta = learning_rate * gradient / sqrt(mean_square + epsilon)\n\n$$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$\n$$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$\n$$var <- var - mom$$", + "inputs": [ + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "var", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "ms", + "typeAttr": "T" + }, + { + "description": "Should be from a Variable().", + "isRef": true, + "name": "mom", + "typeAttr": "T" + }, + { + "description": "Scaling factor. Must be a scalar.", + "name": "lr", + "typeAttr": "T" + }, + { + "description": "Decay rate. Must be a scalar.", + "name": "rho", + "typeAttr": "T" + }, + { + "name": "momentum", + "typeAttr": "T" + }, + { + "description": "Ridge term. Must be a scalar.", + "name": "epsilon", + "typeAttr": "T" + }, + { + "description": "The gradient.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "A vector of indices into the first dimension of var, ms and mom.", + "name": "indices", + "typeAttr": "Tindices" + } + ], + "outputs": [ + { + "description": "Same as \"var\".", + "isRef": true, + "name": "out", + "typeAttr": "T" + } + ], + "summary": "Update '*var' according to the RMSProp algorithm." + } + }, + { + "name": "SparseBincount", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "bool; Whether the kernel should count the appearance or number of occurrences.", + "name": "binary_output", + "type": "boolean" + } + ], + "description": "Outputs a vector with length `size` and the same dtype as `weights`. If\n`weights` are empty, then index `i` stores the number of times the value `i` is\ncounted in `arr`. If `weights` are non-empty, then index `i` stores the sum of\nthe value in `weights` at each index where the corresponding value in `arr` is\n`i`.\n\nValues in `arr` outside of the range [0, size) are ignored.", + "inputs": [ + { + "description": "2D int64 `Tensor`.", + "name": "indices", + "type": 9 + }, + { + "description": "1D int `Tensor`.", + "name": "values", + "typeAttr": "Tidx" + }, + { + "description": "1D int64 `Tensor`.", + "name": "dense_shape", + "type": 9 + }, + { + "description": "non-negative int scalar `Tensor`.", + "name": "size", + "typeAttr": "Tidx" + }, + { + "description": "is an int32, int64, float32, or float64 `Tensor` with the same\nshape as `input`, or a length-0 `Tensor`, in which case it acts as all weights\nequal to 1.", + "name": "weights", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].\nThe counts or summed weights for each value in the range [0, size).", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Counts the number of occurrences of each value in an integer array." + } + }, + { + "name": "SparseConcat", + "schema": { + "attributes": [ + { + "description": "Dimension to concatenate along. Must be in range [-rank, rank),\nwhere rank is the number of dimensions in each input `SparseTensor`.", + "name": "concat_dim", + "type": "int64" + }, + { + "minimum": 2, + "name": "N", + "type": "int64" + }, + { + "name": "T", + "type": "type" + } + ], + "description": "Concatenation is with respect to the dense versions of these sparse tensors.\nIt is assumed that each input is a `SparseTensor` whose elements are ordered\nalong increasing dimension number.\n\nAll inputs' shapes must match, except for the concat dimension. The\n`indices`, `values`, and `shapes` lists must have the same length.\n\nThe output shape is identical to the inputs', except along the concat\ndimension, where it is the sum of the inputs' sizes along that dimension.\n\nThe output elements will be resorted to preserve the sort order along\nincreasing dimension number.\n\nThis op runs in `O(M log M)` time, where `M` is the total number of non-empty\nvalues across all inputs. This is due to the need for an internal sort in\norder to concatenate efficiently across an arbitrary dimension.\n\nFor example, if `concat_dim = 1` and the inputs are\n\n sp_inputs[0]: shape = [2, 3]\n [0, 2]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n sp_inputs[1]: shape = [2, 4]\n [0, 1]: \"d\"\n [0, 2]: \"e\"\n\nthen the output will be\n\n shape = [2, 7]\n [0, 2]: \"a\"\n [0, 4]: \"d\"\n [0, 5]: \"e\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\nGraphically this is equivalent to doing\n\n [ a] concat [ d e ] = [ a d e ]\n [b c ] [ ] [b c ]", + "inputs": [ + { + "description": "2-D. Indices of each input `SparseTensor`.", + "name": "indices", + "numberAttr": "N", + "type": 9 + }, + { + "description": "1-D. Non-empty values of each `SparseTensor`.", + "name": "values", + "numberAttr": "N", + "typeAttr": "T" + }, + { + "description": "1-D. Shapes of each `SparseTensor`.", + "name": "shapes", + "numberAttr": "N", + "type": 9 + } + ], + "outputs": [ + { + "description": "2-D. Indices of the concatenated `SparseTensor`.", + "name": "output_indices", + "type": 9 + }, + { + "description": "1-D. Non-empty values of the concatenated `SparseTensor`.", + "name": "output_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the concatenated `SparseTensor`.", + "name": "output_shape", + "type": 9 + } + ], + "summary": "Concatenates a list of `SparseTensor` along the specified dimension." + } + }, + { + "name": "SparseConditionalAccumulator", + "schema": { + "attributes": [ + { + "description": "The type of the value being accumulated. Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "The shape of the values.", + "name": "shape", + "type": "shape" + }, + { + "default": "", + "description": "If non-empty, this accumulator is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this accumulator will be shared under the given name\nacross multiple sessions.", + "name": "shared_name", + "type": "string" + }, + { + "default": "MEAN", + "description": "Must be one of the following: `MEAN`, `SUM`.", + "name": "reduction_type", + "type": "string" + } + ], + "description": "The accumulator accepts gradients marked with local_step greater or\nequal to the most recent global_step known to the accumulator. The\naverage can be extracted from the accumulator, provided sufficient\ngradients have been accumulated. Extracting the average automatically\nresets the aggregate to 0, and increments the global_step recorded by\nthe accumulator.", + "outputs": [ + { + "description": "The handle to the accumulator.", + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "A conditional accumulator for aggregating sparse gradients." + } + }, + { + "name": "SparseCountSparseOutput", + "schema": { + "attributes": [ + { + "description": "Dtype of the input values tensor. Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": -1, + "description": "Minimum value to count. Can be set to -1 for no minimum.", + "minimum": -1, + "name": "minlength", + "type": "int64" + }, + { + "default": -1, + "description": "Maximum value to count. Can be set to -1 for no maximum.", + "minimum": -1, + "name": "maxlength", + "type": "int64" + }, + { + "description": "Whether to output the number of occurrences of each value or 1.", + "name": "binary_output", + "type": "boolean" + }, + { + "description": "Dtype of the output values tensor. Must be one of the following: `int32`, `int64`, `float32`, `float64`.", + "name": "output_type", + "type": "type" + } + ], + "description": " Counts the number of times each value occurs in the input.", + "inputs": [ + { + "description": "Tensor containing the indices of the sparse tensor to count.", + "name": "indices", + "type": 9 + }, + { + "description": "Tensor containing values of the sparse tensor to count.", + "name": "values", + "typeAttr": "T" + }, + { + "description": "Tensor containing the dense shape of the sparse tensor to count.", + "name": "dense_shape", + "type": 9 + }, + { + "description": "A Tensor of the same shape as indices containing per-index weight values.\nMay also be the empty tensor if no weights are used.", + "name": "weights", + "typeAttr": "output_type" + } + ], + "outputs": [ + { + "description": "Indices tensor for the resulting sparse tensor object.", + "name": "output_indices", + "type": 9 + }, + { + "description": "Values tensor for the resulting sparse tensor object.", + "name": "output_values", + "typeAttr": "output_type" + }, + { + "description": "Shape tensor for the resulting sparse tensor object.", + "name": "output_dense_shape", + "type": 9 + } + ], + "summary": "Performs sparse-output bin counting for a sparse tensor input." + } + }, + { + "name": "SparseCross", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "N", + "type": "int64" + }, + { + "description": "If true, returns the hash of the cross instead of the string.\nThis will allow us avoiding string manipulations.", + "name": "hashed_output", + "type": "boolean" + }, + { + "description": "It is used if hashed_output is true.\noutput = hashed_value%num_buckets if num_buckets > 0 else hashed_value.", + "minimum": 0, + "name": "num_buckets", + "type": "int64" + }, + { + "description": "Specify the hash_key that will be used by the `FingerprintCat64`\nfunction to combine the crosses fingerprints.", + "name": "hash_key", + "type": "int64" + }, + { + "description": "Must be one of the following: `int64`, `string`.", + "minimum": 0, + "name": "sparse_types", + "type": "type[]" + }, + { + "description": "Must be one of the following: `int64`, `string`.", + "minimum": 0, + "name": "dense_types", + "type": "type[]" + }, + { + "description": "Must be one of the following: `int64`, `string`.", + "name": "out_type", + "type": "type" + }, + { + "description": "Must be one of the following: `int64`, `string`.", + "name": "internal_type", + "type": "type" + } + ], + "description": "The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each\nrepresenting features of one feature column. It outputs a 2D `SparseTensor` with\nthe batchwise crosses of these features.\n\nFor example, if the inputs are\n\n inputs[0]: SparseTensor with shape = [2, 2]\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n inputs[1]: SparseTensor with shape = [2, 1]\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n\n inputs[2]: Tensor [[\"f\"], [\"g\"]]\n\nthen the output will be\n\n shape = [2, 2]\n [0, 0]: \"a_X_d_X_f\"\n [1, 0]: \"b_X_e_X_g\"\n [1, 1]: \"c_X_e_X_g\"\n\nif hashed_output=true then the output will be\n\n shape = [2, 2]\n [0, 0]: FingerprintCat64(\n Fingerprint64(\"f\"), FingerprintCat64(\n Fingerprint64(\"d\"), Fingerprint64(\"a\")))\n [1, 0]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"b\")))\n [1, 1]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"c\")))", + "inputs": [ + { + "description": "2-D. Indices of each input `SparseTensor`.", + "name": "indices", + "numberAttr": "N", + "type": 9 + }, + { + "description": "1-D. values of each `SparseTensor`.", + "name": "values", + "typeListAttr": "sparse_types" + }, + { + "description": "1-D. Shapes of each `SparseTensor`.", + "name": "shapes", + "numberAttr": "N", + "type": 9 + }, + { + "description": "2-D. Columns represented by dense `Tensor`.", + "name": "dense_inputs", + "typeListAttr": "dense_types" + } + ], + "outputs": [ + { + "description": "2-D. Indices of the concatenated `SparseTensor`.", + "name": "output_indices", + "type": 9 + }, + { + "description": "1-D. Non-empty values of the concatenated or hashed\n`SparseTensor`.", + "name": "output_values", + "typeAttr": "out_type" + }, + { + "description": "1-D. Shape of the concatenated `SparseTensor`.", + "name": "output_shape", + "type": 9 + } + ], + "summary": "Generates sparse cross from a list of sparse and dense tensors." + } + }, + { + "name": "SparseCrossHashed", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "N", + "type": "int64" + }, + { + "description": "Must be one of the following: `int64`, `string`.", + "minimum": 0, + "name": "sparse_types", + "type": "type[]" + }, + { + "description": "Must be one of the following: `int64`, `string`.", + "minimum": 0, + "name": "dense_types", + "type": "type[]" + } + ], + "description": "The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each\nrepresenting features of one feature column. It outputs a 2D `SparseTensor` with\nthe batchwise crosses of these features.\n\nFor example, if the inputs are\n\n inputs[0]: SparseTensor with shape = [2, 2]\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n inputs[1]: SparseTensor with shape = [2, 1]\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n\n inputs[2]: Tensor [[\"f\"], [\"g\"]]\n\nthen the output will be\n\n shape = [2, 2]\n [0, 0]: \"a_X_d_X_f\"\n [1, 0]: \"b_X_e_X_g\"\n [1, 1]: \"c_X_e_X_g\"\n\nif hashed_output=true then the output will be\n\n shape = [2, 2]\n [0, 0]: FingerprintCat64(\n Fingerprint64(\"f\"), FingerprintCat64(\n Fingerprint64(\"d\"), Fingerprint64(\"a\")))\n [1, 0]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"b\")))\n [1, 1]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"c\")))", + "inputs": [ + { + "description": "2-D. Indices of each input `SparseTensor`.", + "name": "indices", + "numberAttr": "N", + "type": 9 + }, + { + "description": "1-D. values of each `SparseTensor`.", + "name": "values", + "typeListAttr": "sparse_types" + }, + { + "description": "1-D. Shapes of each `SparseTensor`.", + "name": "shapes", + "numberAttr": "N", + "type": 9 + }, + { + "description": "2-D. Columns represented by dense `Tensor`.", + "name": "dense_inputs", + "typeListAttr": "dense_types" + }, + { + "description": "It is used if hashed_output is true.\noutput = hashed_value%num_buckets if num_buckets > 0 else hashed_value.", + "name": "num_buckets", + "type": 9 + }, + { + "description": "boolean, if true, siphash with salt will be used instead of farmhash.", + "name": "strong_hash", + "type": 10 + }, + { + "description": "Specify the salt that will be used by the siphash function.", + "name": "salt", + "type": 9 + } + ], + "outputs": [ + { + "description": "2-D. Indices of the concatenated `SparseTensor`.", + "name": "output_indices", + "type": 9 + }, + { + "description": "1-D. Non-empty values of the concatenated or hashed\n`SparseTensor`.", + "name": "output_values", + "type": 9 + }, + { + "description": "1-D. Shape of the concatenated `SparseTensor`.", + "name": "output_shape", + "type": 9 + } + ], + "summary": "Generates sparse cross from a list of sparse and dense tensors." + } + }, + { + "name": "SparseCrossV2", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "N", + "type": "int64" + }, + { + "description": "Must be one of the following: `int64`, `string`.", + "minimum": 0, + "name": "sparse_types", + "type": "type[]" + }, + { + "description": "Must be one of the following: `int64`, `string`.", + "minimum": 0, + "name": "dense_types", + "type": "type[]" + } + ], + "description": "The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each\nrepresenting features of one feature column. It outputs a 2D `SparseTensor` with\nthe batchwise crosses of these features.\n\nFor example, if the inputs are\n\n inputs[0]: SparseTensor with shape = [2, 2]\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n inputs[1]: SparseTensor with shape = [2, 1]\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n\n inputs[2]: Tensor [[\"f\"], [\"g\"]]\n\nthen the output will be\n\n shape = [2, 2]\n [0, 0]: \"a_X_d_X_f\"\n [1, 0]: \"b_X_e_X_g\"\n [1, 1]: \"c_X_e_X_g\"\n\nif hashed_output=true then the output will be\n\n shape = [2, 2]\n [0, 0]: FingerprintCat64(\n Fingerprint64(\"f\"), FingerprintCat64(\n Fingerprint64(\"d\"), Fingerprint64(\"a\")))\n [1, 0]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"b\")))\n [1, 1]: FingerprintCat64(\n Fingerprint64(\"g\"), FingerprintCat64(\n Fingerprint64(\"e\"), Fingerprint64(\"c\")))", + "inputs": [ + { + "description": "2-D. Indices of each input `SparseTensor`.", + "name": "indices", + "numberAttr": "N", + "type": 9 + }, + { + "description": "1-D. values of each `SparseTensor`.", + "name": "values", + "typeListAttr": "sparse_types" + }, + { + "description": "1-D. Shapes of each `SparseTensor`.", + "name": "shapes", + "numberAttr": "N", + "type": 9 + }, + { + "description": "2-D. Columns represented by dense `Tensor`.", + "name": "dense_inputs", + "typeListAttr": "dense_types" + }, + { + "description": "string used when joining a list of string inputs, can be used as separator later.", + "name": "sep", + "type": 7 + } + ], + "outputs": [ + { + "description": "2-D. Indices of the concatenated `SparseTensor`.", + "name": "output_indices", + "type": 9 + }, + { + "description": "1-D. Non-empty values of the concatenated or hashed\n`SparseTensor`.", + "name": "output_values", + "type": 7 + }, + { + "description": "1-D. Shape of the concatenated `SparseTensor`.", + "name": "output_shape", + "type": 9 + } + ], + "summary": "Generates sparse cross from a list of sparse and dense tensors." + } + }, + { + "name": "SparseDenseCwiseAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "(1) Broadcasts the dense side to have the same shape as the sparse side, if\n eligible;\n(2) Then, only the dense values pointed to by the indices of the SparseTensor\n participate in the cwise addition.\n\nBy these rules, the result is a logical SparseTensor with exactly the same\nindices and shape, but possibly with different non-zero values. The output of\nthis Op is the resultant non-zero values.", + "inputs": [ + { + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "name": "sp_indices", + "type": 9 + }, + { + "description": "1-D. `N` non-empty values corresponding to `sp_indices`.", + "name": "sp_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the input SparseTensor.", + "name": "sp_shape", + "type": 9 + }, + { + "description": "`R`-D. The dense Tensor operand.", + "name": "dense", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1-D. The `N` values that are operated on.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Adds up a SparseTensor and a dense Tensor, using these special rules:" + } + }, + { + "name": "SparseDenseCwiseDiv", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "*Limitation*: this Op only broadcasts the dense side to the sparse side, but not\nthe other direction.", + "inputs": [ + { + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "name": "sp_indices", + "type": 9 + }, + { + "description": "1-D. `N` non-empty values corresponding to `sp_indices`.", + "name": "sp_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the input SparseTensor.", + "name": "sp_shape", + "type": 9 + }, + { + "description": "`R`-D. The dense Tensor operand.", + "name": "dense", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1-D. The `N` values that are operated on.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Component-wise divides a SparseTensor by a dense Tensor." + } + }, + { + "name": "SparseDenseCwiseMul", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "The output locations corresponding to the implicitly zero elements in the sparse\ntensor will be zero (i.e., will not take up storage space), regardless of the\ncontents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).\n\n*Limitation*: this Op only broadcasts the dense side to the sparse side, but not\nthe other direction.", + "inputs": [ + { + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "name": "sp_indices", + "type": 9 + }, + { + "description": "1-D. `N` non-empty values corresponding to `sp_indices`.", + "name": "sp_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the input SparseTensor.", + "name": "sp_shape", + "type": 9 + }, + { + "description": "`R`-D. The dense Tensor operand.", + "name": "dense", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1-D. The `N` values that are operated on.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Component-wise multiplies a SparseTensor by a dense Tensor." + } + }, + { + "name": "SparseFillEmptyRows", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "The input `SparseTensor` is represented via the tuple of inputs\n(`indices`, `values`, `dense_shape`). The output `SparseTensor` has the\nsame `dense_shape` but with indices `output_indices` and values\n`output_values`.\n\nThis op inserts a single entry for every row that doesn't have any values.\nThe index is created as `[row, 0, ..., 0]` and the inserted value\nis `default_value`.\n\nFor example, suppose `sp_input` has shape `[5, 6]` and non-empty values:\n\n [0, 1]: a\n [0, 3]: b\n [2, 0]: c\n [3, 1]: d\n\nRows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:\n\n [0, 1]: a\n [0, 3]: b\n [1, 0]: default_value\n [2, 0]: c\n [3, 1]: d\n [4, 0]: default_value\n\nThe output `SparseTensor` will be in row-major order and will have the\nsame shape as the input.\n\nThis op also returns an indicator vector shaped `[dense_shape[0]]` such that\n\n empty_row_indicator[i] = True iff row i was an empty row.\n\nAnd a reverse index map vector shaped `[indices.shape[0]]` that is used during\nbackpropagation,\n\n reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]", + "inputs": [ + { + "description": "2-D. the indices of the sparse tensor.", + "name": "indices", + "type": 9 + }, + { + "description": "1-D. the values of the sparse tensor.", + "name": "values", + "typeAttr": "T" + }, + { + "description": "1-D. the shape of the sparse tensor.", + "name": "dense_shape", + "type": 9 + }, + { + "description": "0-D. default value to insert into location `[row, 0, ..., 0]`\n for rows missing from the input sparse tensor.\noutput indices: 2-D. the indices of the filled sparse tensor.", + "name": "default_value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output_indices", + "type": 9 + }, + { + "description": "1-D. the values of the filled sparse tensor.", + "name": "output_values", + "typeAttr": "T" + }, + { + "description": "1-D. whether the dense row was missing in the\ninput sparse tensor.", + "name": "empty_row_indicator", + "type": 10 + }, + { + "description": "1-D. a map from the input indices to the output indices.", + "name": "reverse_index_map", + "type": 9 + } + ], + "summary": "Fills empty rows in the input 2-D `SparseTensor` with a default value." + } + }, + { + "name": "SparseFillEmptyRowsGrad", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Takes vectors reverse_index_map, shaped `[N]`, and grad_values,\nshaped `[N_full]`, where `N_full >= N` and copies data into either\n`d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and\n`d_default_value` is a scalar.\n\n d_values[j] = grad_values[reverse_index_map[j]]\n d_default_value = sum_{k : 0 .. N_full - 1} (\n grad_values[k] * 1{k not in reverse_index_map})", + "inputs": [ + { + "description": "1-D. The reverse index map from SparseFillEmptyRows.", + "name": "reverse_index_map", + "type": 9 + }, + { + "description": "1-D. The gradients from backprop.", + "name": "grad_values", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1-D. The backprop into values.", + "name": "d_values", + "typeAttr": "T" + }, + { + "description": "0-D. The backprop into default_value.", + "name": "d_default_value", + "typeAttr": "T" + } + ], + "summary": "The gradient of SparseFillEmptyRows." + } + }, + { + "name": "SparseMatMul", + "schema": { + "attributes": [ + { + "default": false, + "name": "transpose_a", + "type": "boolean" + }, + { + "default": false, + "name": "transpose_b", + "type": "boolean" + }, + { + "default": false, + "name": "a_is_sparse", + "type": "boolean" + }, + { + "default": false, + "name": "b_is_sparse", + "type": "boolean" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `bfloat16`.", + "name": "Ta", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `bfloat16`.", + "name": "Tb", + "type": "type" + } + ], + "description": "The inputs must be two-dimensional matrices and the inner dimension of \"a\" must\nmatch the outer dimension of \"b\". Both \"a\" and \"b\" must be `Tensor`s not\n`SparseTensor`s. This op is optimized for the case where at least one of \"a\" or\n\"b\" is sparse, in the sense that they have a large proportion of zero values.\nThe breakeven for using this versus a dense matrix multiply on one platform was\n30% zero values in the sparse matrix.\n\nThe gradient computation of this operation will only take advantage of sparsity\nin the input gradient when that gradient comes from a Relu.", + "inputs": [ + { + "name": "a", + "typeAttr": "Ta" + }, + { + "name": "b", + "typeAttr": "Tb" + } + ], + "outputs": [ + { + "name": "product", + "type": 1 + } + ], + "summary": "Multiply matrix \"a\" by matrix \"b\"." + } + }, + { + "name": "SparseMatrixAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "The gradients of SparseMatrixAdd outputs with respect to alpha and beta are not\ncurrently defined (TensorFlow will return zeros for these entries).", + "inputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "a", + "type": 21 + }, + { + "description": "A CSRSparseMatrix.", + "name": "b", + "type": 21 + }, + { + "description": "A constant scalar.", + "name": "alpha", + "typeAttr": "T" + }, + { + "description": "A constant scalar.", + "name": "beta", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "c", + "type": 21 + } + ], + "summary": "Sparse addition of two CSR matrices, C = alpha * A + beta * B." + } + }, + { + "name": "SparseMatrixMatMul", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "Indicates whether `a` should be transposed.", + "name": "transpose_a", + "type": "boolean" + }, + { + "default": false, + "description": "Indicates whether `b` should be transposed.", + "name": "transpose_b", + "type": "boolean" + }, + { + "default": false, + "description": "Indicates whether `a` should be conjugate-transposed.", + "name": "adjoint_a", + "type": "boolean" + }, + { + "default": false, + "description": "Indicates whether `b` should be conjugate-transposed.", + "name": "adjoint_b", + "type": "boolean" + }, + { + "default": false, + "description": "Transposes the product of `a` and `b`.", + "name": "transpose_output", + "type": "boolean" + }, + { + "default": false, + "description": "Conjugates the product of `a` and `b`.", + "name": "conjugate_output", + "type": "boolean" + } + ], + "description": "Returns a dense matrix.\nFor inputs A and B, where A is CSR and B is dense; this op returns a dense C;\n\nIf transpose_output is false, returns:\n```\n C = A . B\n```\n\nIf transpose_output is `true`, returns:\n```\n C = transpose(A . B) = transpose(B) . transpose(A)\n```\nwhere the transposition is performed along the two innermost (matrix)\ndimensions.\n\nIf conjugate_output is `true`, returns:\n```\n C = conjugate(A . B) = conjugate(A) . conjugate(B)\n```\n\nIf both conjugate_output and transpose_output are `true`, returns:\n```\n C = conjugate(transpose(A . B)) = conjugate(transpose(B)) .\n conjugate(transpose(A))\n```", + "inputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "a", + "type": 21 + }, + { + "description": "A dense tensor.", + "name": "b", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A dense output tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Matrix-multiplies a sparse matrix with a dense matrix." + } + }, + { + "name": "SparseMatrixMul", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Returns a sparse matrix.\n\nThe dense tensor `b` may be either a scalar; otherwise `a` must be a rank-3\n`SparseMatrix`; in this case `b` must be shaped `[batch_size, 1, 1]` and the\nmultiply operation broadcasts.\n\n**NOTE** even if `b` is zero, the sparsity structure of the output does not\nchange.", + "inputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "a", + "type": 21 + }, + { + "description": "A dense tensor.", + "name": "b", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A dense output tensor.", + "name": "output", + "type": 21 + } + ], + "summary": "Element-wise multiplication of a sparse matrix with a dense tensor." + } + }, + { + "name": "SparseMatrixNNZ", + "schema": { + "inputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "sparse_matrix", + "type": 21 + } + ], + "outputs": [ + { + "description": "The number of nonzeroes of `sparse_matrix`.", + "name": "nnz", + "type": 3 + } + ], + "summary": "Returns the number of nonzeroes of `sparse_matrix`." + } + }, + { + "name": "SparseMatrixOrderingAMD", + "schema": { + "description": "Computes the Approximate Minimum Degree (AMD) ordering for a sparse matrix.\n\nThe returned permutation may be used to permute the rows and columns of the\ngiven sparse matrix. This typically results in permuted sparse matrix's sparse\nCholesky (or other decompositions) in having fewer zero fill-in compared to\ndecomposition of the original matrix.\n\nThe input sparse matrix may have rank 2 or rank 3. The output Tensor,\nrepresenting would then have rank 1 or 2 respectively, with the same batch\nshape as the input.\n\nEach component of the input sparse matrix must represent a square symmetric\nmatrix; only the lower triangular part of the matrix is read. The values of the\nsparse matrix does not affect the returned permutation, only the sparsity\npattern of the sparse matrix is used. Hence, a single AMD ordering may be\nreused for the Cholesky decompositions of sparse matrices with the same sparsity\npattern but with possibly different values.\n\nEach batch component of the output permutation represents a permutation of `N`\nelements, where the input sparse matrix components each have `N` rows. That is,\nthe component contains each of the integers `{0, .. N-1}` exactly once. The\n`i`th element represents the row index that the `i`th row maps to.\n\nUsage example:\n\n```python\n from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops\n\n a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])\n a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)\n a_dense_shape = [4, 4]\n\n with tf.Session() as sess:\n # Define (COO format) SparseTensor over Numpy array.\n a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)\n\n # Convert SparseTensors to CSR SparseMatrix.\n a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(\n a_st.indices, a_st.values, a_st.dense_shape)\n\n # Obtain the AMD Ordering for the CSR SparseMatrix.\n ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)\n\n ordering_amd_value = sess.run(ordering_amd)\n```\n\n`ordering_amd_value` stores the AMD ordering: `[1 2 3 0]`.\n\ninput: A `CSRSparseMatrix`.", + "inputs": [ + { + "description": "A `CSRSparseMatrix`.", + "name": "input", + "type": 21 + } + ], + "outputs": [ + { + "description": "The Approximate Minimum Degree (AMD) ordering of `input`.", + "name": "output", + "type": 3 + } + ], + "summary": "Computes the Approximate Minimum Degree (AMD) ordering of `input`." + } + }, + { + "name": "SparseMatrixSoftmax", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "type", + "type": "type" + } + ], + "description": "Calculate the softmax of the innermost dimensions of a SparseMatrix.\n\nMissing values are treated as `-inf` (i.e., logits of zero probability); and\nthe output has the same sparsity structure as the input (though missing values\nin the output may now be treated as having probability zero).", + "inputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "logits", + "type": 21 + } + ], + "outputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "softmax", + "type": 21 + } + ], + "summary": "Calculates the softmax of a CSRSparseMatrix." + } + }, + { + "name": "SparseMatrixSoftmaxGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "type", + "type": "type" + } + ], + "inputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "softmax", + "type": 21 + }, + { + "description": "The gradient of `softmax`.", + "name": "grad_softmax", + "type": 21 + } + ], + "outputs": [ + { + "description": "The output gradient.", + "name": "gradient", + "type": 21 + } + ], + "summary": "Calculates the gradient of the SparseMatrixSoftmax op." + } + }, + { + "name": "SparseMatrixSparseCholesky", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "type", + "type": "type" + } + ], + "description": "Computes the Sparse Cholesky decomposition of a sparse matrix, with the given\nfill-in reducing permutation.\n\nThe input sparse matrix and the fill-in reducing permutation `permutation` must\nhave compatible shapes. If the sparse matrix has rank 3; with the batch\ndimension `B`, then the `permutation` must be of rank 2; with the same batch\ndimension `B`. There is no support for broadcasting.\n\nFurthermore, each component vector of `permutation` must be of length `N`,\ncontaining each of the integers {0, 1, ..., N - 1} exactly once, where `N` is\nthe number of rows of each component of the sparse matrix.\n\nEach component of the input sparse matrix must represent a symmetric positive\ndefinite (SPD) matrix; although only the lower triangular part of the matrix is\nread. If any individual component is not SPD, then an InvalidArgument error is\nthrown.\n\nThe returned sparse matrix has the same dense shape as the input sparse matrix.\nFor each component `A` of the input sparse matrix, the corresponding output\nsparse matrix represents `L`, the lower triangular Cholesky factor satisfying\nthe following identity:\n\n```\n A = L * Lt\n```\n\nwhere Lt denotes the transpose of L (or its conjugate transpose, if `type` is\n`complex64` or `complex128`).\n\nThe `type` parameter denotes the type of the matrix elements. The supported\ntypes are: `float32`, `float64`, `complex64` and `complex128`.\n\nUsage example:\n\n```python\n from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops\n\n a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])\n a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)\n a_dense_shape = [4, 4]\n\n with tf.Session() as sess:\n # Define (COO format) SparseTensor over Numpy array.\n a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)\n\n # Convert SparseTensors to CSR SparseMatrix.\n a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(\n a_st.indices, a_st.values, a_st.dense_shape)\n\n # Obtain the Sparse Cholesky factor using AMD Ordering for reducing zero\n # fill-in (number of structural non-zeros in the sparse Cholesky factor).\n ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)\n cholesky_sparse_matrices = (\n sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(\n sparse_matrix, ordering_amd, type=tf.float32))\n\n # Convert the CSRSparseMatrix Cholesky factor to a dense Tensor\n dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(\n cholesky_sparse_matrices, tf.float32)\n\n # Evaluate the dense Tensor value.\n dense_cholesky_value = sess.run(dense_cholesky)\n```\n\n`dense_cholesky_value` stores the dense Cholesky factor:\n\n```\n [[ 1. 0. 0. 0.]\n [ 0. 1.41 0. 0.]\n [ 0. 0.70 1.58 0.]\n [ 0. 0. 0. 2.]]\n```\n\n\ninput: A `CSRSparseMatrix`.\npermutation: A `Tensor`.\ntype: The type of `input`.", + "inputs": [ + { + "description": "A `CSRSparseMatrix`.", + "name": "input", + "type": 21 + }, + { + "description": "A fill-in reducing permutation matrix.", + "name": "permutation", + "type": 3 + } + ], + "outputs": [ + { + "description": "The sparse Cholesky decompsition of `input`.", + "name": "output", + "type": 21 + } + ], + "summary": "Computes the sparse Cholesky decomposition of `input`." + } + }, + { + "name": "SparseMatrixSparseMatMul", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "type", + "type": "type" + }, + { + "default": false, + "description": "Indicates whether `a` should be transposed.", + "name": "transpose_a", + "type": "boolean" + }, + { + "default": false, + "description": "Indicates whether `b` should be transposed.", + "name": "transpose_b", + "type": "boolean" + }, + { + "default": false, + "description": "Indicates whether `a` should be conjugate-transposed.", + "name": "adjoint_a", + "type": "boolean" + }, + { + "default": false, + "description": "Indicates whether `b` should be conjugate-transposed.", + "name": "adjoint_b", + "type": "boolean" + } + ], + "description": "Performs a matrix multiplication of a sparse matrix `a` with a sparse matrix\n`b`; returns a sparse matrix `a * b`, unless either `a` or `b` is transposed or\nadjointed.\n\nEach matrix may be transposed or adjointed (conjugated and transposed)\naccording to the Boolean parameters `transpose_a`, `adjoint_a`, `transpose_b`\nand `adjoint_b`. At most one of `transpose_a` or `adjoint_a` may be True.\nSimilarly, at most one of `transpose_b` or `adjoint_b` may be True.\n\nThe inputs must have compatible shapes. That is, the inner dimension of `a`\nmust be equal to the outer dimension of `b`. This requirement is adjusted\naccording to whether either `a` or `b` is transposed or adjointed.\n\nThe `type` parameter denotes the type of the matrix elements. Both `a` and `b`\nmust have the same type. The supported types are: `float32`, `float64`,\n`complex64` and `complex128`.\n\nBoth `a` and `b` must have the same rank. Broadcasting is not supported. If they\nhave rank 3, each batch of 2D CSRSparseMatrices within `a` and `b` must have the\nsame dense shape.\n\nThe sparse matrix product may have numeric (non-structural) zeros.\nTODO(anudhyan): Consider adding a boolean attribute to control whether to prune\nzeros.\n\nUsage example:\n\n```python\n from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops\n\n a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]])\n a_values = np.array([1.0, 5.0, -1.0, -2.0], np.float32)\n a_dense_shape = [4, 5]\n\n b_indices = np.array([[0, 0], [3, 0], [3, 1]])\n b_values = np.array([2.0, 7.0, 8.0], np.float32)\n b_dense_shape = [5, 3]\n\n with tf.Session() as sess:\n # Define (COO format) Sparse Tensors over Numpy arrays\n a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)\n b_st = tf.sparse.SparseTensor(b_indices, b_values, b_dense_shape)\n\n # Convert SparseTensors to CSR SparseMatrix\n a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(\n a_st.indices, a_st.values, a_st.dense_shape)\n b_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(\n b_st.indices, b_st.values, b_st.dense_shape)\n\n # Compute the CSR SparseMatrix matrix multiplication\n c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(\n a=a_sm, b=b_sm, type=tf.float32)\n\n # Convert the CSR SparseMatrix product to a dense Tensor\n c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(\n c_sm, tf.float32)\n # Evaluate the dense Tensor value\n c_sm_dense_value = sess.run(c_sm_dense)\n```\n\n`c_sm_dense_value` stores the dense matrix product:\n\n```\n [[ 2. 0. 0.]\n [ 0. 0. 0.]\n [ 35. 40. 0.]\n [ -4. 0. 0.]]\n```\n\na: A `CSRSparseMatrix`.\nb: A `CSRSparseMatrix` with the same type and rank as `a`.\ntype: The type of both `a` and `b`.\ntranspose_a: If True, `a` transposed before multiplication.\ntranspose_b: If True, `b` transposed before multiplication.\nadjoint_a: If True, `a` adjointed before multiplication.\nadjoint_b: If True, `b` adjointed before multiplication.", + "inputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "a", + "type": 21 + }, + { + "description": "A CSRSparseMatrix.", + "name": "b", + "type": 21 + } + ], + "outputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "c", + "type": 21 + } + ], + "summary": "Sparse-matrix-multiplies two CSR matrices `a` and `b`." + } + }, + { + "name": "SparseMatrixTranspose", + "schema": { + "attributes": [ + { + "default": false, + "description": "Indicates whether `input` should be conjugated.", + "name": "conjugate", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "type", + "type": "type" + } + ], + "description": "Transposes the inner (matrix) dimensions of a SparseMatrix and optionally\nconjugates its values.", + "inputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "input", + "type": 21 + } + ], + "outputs": [ + { + "description": "A CSRSparseMatrix.", + "name": "output", + "type": 21 + } + ], + "summary": "Transposes the inner (matrix) dimensions of a CSRSparseMatrix." + } + }, + { + "name": "SparseMatrixZeros", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "type", + "type": "type" + } + ], + "inputs": [ + { + "description": "The desired matrix shape.", + "name": "dense_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "An empty CSR matrix with shape `dense_shape`.", + "name": "sparse_matrix", + "type": 21 + } + ], + "summary": "Creates an all-zeros CSRSparseMatrix with shape `dense_shape`." + } + }, + { + "name": "SparseReduceMax", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`\ninstead of a sparse one.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python.", + "inputs": [ + { + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "name": "input_indices", + "type": 9 + }, + { + "description": "1-D. `N` non-empty values corresponding to `input_indices`.", + "name": "input_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the input SparseTensor.", + "name": "input_shape", + "type": 9 + }, + { + "description": "1-D. Length-`K` vector containing the reduction axes.", + "name": "reduction_axes", + "type": 3 + } + ], + "outputs": [ + { + "description": "`R-K`-D. The reduced Tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the max of elements across dimensions of a SparseTensor." + } + }, + { + "name": "SparseReduceMaxSparse", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a\nSparseTensor.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python.", + "inputs": [ + { + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "name": "input_indices", + "type": 9 + }, + { + "description": "1-D. `N` non-empty values corresponding to `input_indices`.", + "name": "input_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the input SparseTensor.", + "name": "input_shape", + "type": 9 + }, + { + "description": "1-D. Length-`K` vector containing the reduction axes.", + "name": "reduction_axes", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_indices", + "type": 9 + }, + { + "name": "output_values", + "typeAttr": "T" + }, + { + "name": "output_shape", + "type": 9 + } + ], + "summary": "Computes the max of elements across dimensions of a SparseTensor." + } + }, + { + "name": "SparseReduceSum", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`\ninstead of a sparse one.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python.", + "inputs": [ + { + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "name": "input_indices", + "type": 9 + }, + { + "description": "1-D. `N` non-empty values corresponding to `input_indices`.", + "name": "input_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the input SparseTensor.", + "name": "input_shape", + "type": 9 + }, + { + "description": "1-D. Length-`K` vector containing the reduction axes.", + "name": "reduction_axes", + "type": 3 + } + ], + "outputs": [ + { + "description": "`R-K`-D. The reduced Tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the sum of elements across dimensions of a SparseTensor." + } + }, + { + "name": "SparseReduceSumSparse", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "This Op takes a SparseTensor and is the sparse counterpart to\n`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a\nSparseTensor.\n\nReduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\nwith length 1.\n\nIf `reduction_axes` has no entries, all dimensions are reduced, and a tensor\nwith a single element is returned. Additionally, the axes can be negative,\nwhich are interpreted according to the indexing rules in Python.", + "inputs": [ + { + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "name": "input_indices", + "type": 9 + }, + { + "description": "1-D. `N` non-empty values corresponding to `input_indices`.", + "name": "input_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the input SparseTensor.", + "name": "input_shape", + "type": 9 + }, + { + "description": "1-D. Length-`K` vector containing the reduction axes.", + "name": "reduction_axes", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_indices", + "type": 9 + }, + { + "name": "output_values", + "typeAttr": "T" + }, + { + "name": "output_shape", + "type": 9 + } + ], + "summary": "Computes the sum of elements across dimensions of a SparseTensor." + } + }, + { + "name": "SparseReorder", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Note that by convention, all sparse ops preserve the canonical ordering along\nincreasing dimension number. The only time ordering can be violated is during\nmanual manipulation of the indices and values vectors to add entries.\n\nReordering does not affect the shape of the SparseTensor.\n\nIf the tensor has rank `R` and `N` non-empty values, `input_indices` has\nshape `[N, R]`, input_values has length `N`, and input_shape has length `R`.", + "inputs": [ + { + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, possibly not in canonical ordering.", + "name": "input_indices", + "type": 9 + }, + { + "description": "1-D. `N` non-empty values corresponding to `input_indices`.", + "name": "input_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the input SparseTensor.", + "name": "input_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "2-D. `N x R` matrix with the same indices as input_indices, but\nin canonical row-major ordering.", + "name": "output_indices", + "type": 9 + }, + { + "description": "1-D. `N` non-empty values corresponding to `output_indices`.", + "name": "output_values", + "typeAttr": "T" + } + ], + "summary": "Reorders a SparseTensor into the canonical, row-major ordering." + } + }, + { + "name": "SparseReshape", + "schema": { + "description": "This operation has the same semantics as reshape on the represented dense\ntensor. The `input_indices` are recomputed based on the requested `new_shape`.\n\nIf one component of `new_shape` is the special value -1, the size of that\ndimension is computed so that the total dense size remains constant. At\nmost one component of `new_shape` can be -1. The number of dense elements\nimplied by `new_shape` must be the same as the number of dense elements\noriginally implied by `input_shape`.\n\nReshaping does not affect the order of values in the SparseTensor.\n\nIf the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`\nhas length `R_out`, then `input_indices` has shape `[N, R_in]`,\n`input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and\n`output_shape` has length `R_out`.", + "inputs": [ + { + "description": "2-D. `N x R_in` matrix with the indices of non-empty values in a\nSparseTensor.", + "name": "input_indices", + "type": 9 + }, + { + "description": "1-D. `R_in` vector with the input SparseTensor's dense shape.", + "name": "input_shape", + "type": 9 + }, + { + "description": "1-D. `R_out` vector with the requested new dense shape.", + "name": "new_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "2-D. `N x R_out` matrix with the updated indices of non-empty\nvalues in the output SparseTensor.", + "name": "output_indices", + "type": 9 + }, + { + "description": "1-D. `R_out` vector with the full dense shape of the output\nSparseTensor. This is the same as `new_shape` but with any -1 dimensions\nfilled in.", + "name": "output_shape", + "type": 9 + } + ], + "summary": "Reshapes a SparseTensor to represent values in a new dense shape." + } + }, + { + "name": "SparseSegmentMean", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsegmentids", + "type": "type" + } + ], + "description": "See `tf.sparse.segment_sum` for usage examples.\n\nLike `SegmentMean`, but `segment_ids` can have rank less than `data`'s first\ndimension, selecting a subset of dimension 0, specified by `indices`.", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "name": "indices", + "typeAttr": "Tidx" + }, + { + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "name": "segment_ids", + "typeAttr": "Tsegmentids" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the mean along sparse segments of a tensor." + } + }, + { + "name": "SparseSegmentMeanGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsegmentids", + "type": "type" + } + ], + "description": "Returns tensor \"output\" with same shape as grad, except for dimension 0 whose\nvalue is output_dim0.", + "inputs": [ + { + "description": "gradient propagated to the SparseSegmentMean op.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "indices passed to the corresponding SparseSegmentMean op.", + "name": "indices", + "typeAttr": "Tidx" + }, + { + "description": "segment_ids passed to the corresponding SparseSegmentMean op.", + "name": "segment_ids", + "typeAttr": "Tsegmentids" + }, + { + "description": "dimension 0 of \"data\" passed to SparseSegmentMean op.", + "name": "output_dim0", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes gradients for SparseSegmentMean." + } + }, + { + "name": "SparseSegmentMeanWithNumSegments", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tnumsegments", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsegmentids", + "type": "type" + } + ], + "description": "Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is\nmissing, the `output` tensor at that position will be zeroed.\n\nRead\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "name": "indices", + "typeAttr": "Tidx" + }, + { + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "name": "segment_ids", + "typeAttr": "Tsegmentids" + }, + { + "description": "Should equal the number of distinct segment IDs.", + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for dimension 0 which has size\n`num_segments`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the mean along sparse segments of a tensor." + } + }, + { + "name": "SparseSegmentSqrtN", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsegmentids", + "type": "type" + } + ], + "description": "N is the size of the segment being reduced.\n\nSee `tf.sparse.segment_sum` for usage examples.\n", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "name": "indices", + "typeAttr": "Tidx" + }, + { + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "name": "segment_ids", + "typeAttr": "Tsegmentids" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the sum along sparse segments of a tensor divided by the sqrt of N." + } + }, + { + "name": "SparseSegmentSqrtNGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsegmentids", + "type": "type" + } + ], + "description": "Returns tensor \"output\" with same shape as grad, except for dimension 0 whose\nvalue is output_dim0.", + "inputs": [ + { + "description": "gradient propagated to the SparseSegmentSqrtN op.", + "name": "grad", + "typeAttr": "T" + }, + { + "description": "indices passed to the corresponding SparseSegmentSqrtN op.", + "name": "indices", + "typeAttr": "Tidx" + }, + { + "description": "segment_ids passed to the corresponding SparseSegmentSqrtN op.", + "name": "segment_ids", + "typeAttr": "Tsegmentids" + }, + { + "description": "dimension 0 of \"data\" passed to SparseSegmentSqrtN op.", + "name": "output_dim0", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes gradients for SparseSegmentSqrtN." + } + }, + { + "name": "SparseSegmentSqrtNWithNumSegments", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tnumsegments", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsegmentids", + "type": "type" + } + ], + "description": "N is the size of the segment being reduced.\n\nLike `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is\nmissing, the `output` tensor at that position will be zeroed.\n\nRead\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "name": "indices", + "typeAttr": "Tidx" + }, + { + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "name": "segment_ids", + "typeAttr": "Tsegmentids" + }, + { + "description": "Should equal the number of distinct segment IDs.", + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the sum along sparse segments of a tensor divided by the sqrt of N." + } + }, + { + "name": "SparseSegmentSum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsegmentids", + "type": "type" + } + ], + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nLike `SegmentSum`, but `segment_ids` can have rank less than `data`'s first\ndimension, selecting a subset of dimension 0, specified by `indices`.\n\nFor example:\n\n```python\nc = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\n\n# Select two rows, one segment.\ntf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))\n# => [[0 0 0 0]]\n\n# Select two rows, two segment.\ntf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))\n# => [[ 1 2 3 4]\n# [-1 -2 -3 -4]]\n\n# Select all rows, two segments.\ntf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))\n# => [[0 0 0 0]\n# [5 6 7 8]]\n\n# Which is equivalent to:\ntf.segment_sum(c, tf.constant([0, 0, 1]))\n```", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "name": "indices", + "typeAttr": "Tidx" + }, + { + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "name": "segment_ids", + "typeAttr": "Tsegmentids" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for dimension 0 which\nhas size `k`, the number of segments.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the sum along sparse segments of a tensor." + } + }, + { + "name": "SparseSegmentSumWithNumSegments", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tnumsegments", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsegmentids", + "type": "type" + } + ], + "description": "Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is\nmissing, the `output` tensor at that position will be zeroed.\n\nRead\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation)\nfor an explanation of segments.\n\nFor example:\n\n```python\nc = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])\n\ntf.sparse_segment_sum_with_num_segments(\n c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)\n# => [[0 0 0 0]\n# [0 0 0 0]\n# [0 0 0 0]]\n\ntf.sparse_segment_sum_with_num_segments(c,\n tf.constant([0, 1]),\n tf.constant([0, 2],\n num_segments=4))\n# => [[ 1 2 3 4]\n# [ 0 0 0 0]\n# [-1 -2 -3 -4]\n# [ 0 0 0 0]]\n```", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A 1-D tensor. Has same rank as `segment_ids`.", + "name": "indices", + "typeAttr": "Tidx" + }, + { + "description": "A 1-D tensor. Values should be sorted and can be repeated.", + "name": "segment_ids", + "typeAttr": "Tsegmentids" + }, + { + "description": "Should equal the number of distinct segment IDs.", + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for dimension 0 which\nhas size `num_segments`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the sum along sparse segments of a tensor." + } + }, + { + "name": "SparseSlice", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "For example, if the input is\n\n input_tensor = shape = [2, 7]\n [ a d e ]\n [b c ]\n\nGraphically the output tensors are:\n\n sparse_slice([0, 0], [2, 4]) = shape = [2, 4]\n [ a ]\n [b c ]\n\n sparse_slice([0, 4], [2, 3]) = shape = [2, 3]\n [ d e ]\n [ ]", + "inputs": [ + { + "description": "2-D tensor represents the indices of the sparse tensor.", + "name": "indices", + "type": 9 + }, + { + "description": "1-D tensor represents the values of the sparse tensor.", + "name": "values", + "typeAttr": "T" + }, + { + "description": "1-D. tensor represents the shape of the sparse tensor.", + "name": "shape", + "type": 9 + }, + { + "description": "1-D. tensor represents the start of the slice.", + "name": "start", + "type": 9 + }, + { + "description": "1-D. tensor represents the size of the slice.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors.", + "name": "size", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_indices", + "type": 9 + }, + { + "description": "A list of 1-D tensors represents the values of the output sparse\ntensors.", + "name": "output_values", + "typeAttr": "T" + }, + { + "description": "A list of 1-D tensors represents the shape of the output sparse\ntensors.", + "name": "output_shape", + "type": 9 + } + ], + "summary": "Slice a `SparseTensor` based on the `start` and `size`." + } + }, + { + "name": "SparseSliceGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "This op takes in the upstream gradient w.r.t. non-empty values of\nthe sliced `SparseTensor`, and outputs the gradients w.r.t.\nthe non-empty values of input `SparseTensor`.", + "inputs": [ + { + "description": "1-D. The gradient with respect to\nthe non-empty values of the sliced `SparseTensor`.", + "name": "backprop_val_grad", + "typeAttr": "T" + }, + { + "description": "2-D. The `indices` of the input `SparseTensor`.", + "name": "input_indices", + "type": 9 + }, + { + "description": "1-D. tensor represents the start of the slice.", + "name": "input_start", + "type": 9 + }, + { + "description": "2-D. The `indices` of the sliced `SparseTensor`.", + "name": "output_indices", + "type": 9 + } + ], + "outputs": [ + { + "description": "1-D. The gradient with respect to the non-empty values of input `SparseTensor`.", + "name": "val_grad", + "typeAttr": "T" + } + ], + "summary": "The gradient operator for the SparseSlice op." + } + }, + { + "name": "SparseSoftmax", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`\n(where `N >= 2`), and with indices sorted in the canonical lexicographic order.\n\nThis op is equivalent to applying the normal `tf.nn.softmax()` to each innermost\nlogical submatrix with shape `[B, C]`, but with the catch that *the implicitly\nzero elements do not participate*. Specifically, the algorithm is equivalent\nto the following:\n\n (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix\n with shape `[B, C]`, along the size-C dimension;\n (2) Masks out the original implicitly-zero locations;\n (3) Renormalizes the remaining elements.\n\nHence, the `SparseTensor` result has exactly the same non-zero indices and\nshape.", + "inputs": [ + { + "description": "2-D. `NNZ x R` matrix with the indices of non-empty values in a\nSparseTensor, in canonical ordering.", + "name": "sp_indices", + "type": 9 + }, + { + "description": "1-D. `NNZ` non-empty values corresponding to `sp_indices`.", + "name": "sp_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the input SparseTensor.", + "name": "sp_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "1-D. The `NNZ` values for the result `SparseTensor`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Applies softmax to a batched N-D `SparseTensor`." + } + }, + { + "name": "SparseSoftmaxCrossEntropyWithLogits", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tlabels", + "type": "type" + } + ], + "description": "Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept\na matrix of label probabilities, but rather a single label per row\nof features. This label is considered to have probability 1.0 for the\ngiven row.\n\nInputs are the logits, not probabilities.", + "inputs": [ + { + "description": "batch_size x num_classes matrix", + "name": "features", + "typeAttr": "T" + }, + { + "description": "batch_size vector with values in [0, num_classes).\nThis is the label for the given minibatch entry.", + "name": "labels", + "typeAttr": "Tlabels" + } + ], + "outputs": [ + { + "description": "Per example loss (batch_size vector).", + "name": "loss", + "typeAttr": "T" + }, + { + "description": "backpropagated gradients (batch_size x num_classes matrix).", + "name": "backprop", + "typeAttr": "T" + } + ], + "summary": "Computes softmax cross entropy cost and gradients to backpropagate." + } + }, + { + "name": "SparseSparseMaximum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "Assumes the two SparseTensors have the same shape, i.e., no broadcasting.", + "inputs": [ + { + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering.", + "name": "a_indices", + "type": 9 + }, + { + "description": "1-D. `N` non-empty values corresponding to `a_indices`.", + "name": "a_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the input SparseTensor.", + "name": "a_shape", + "type": 9 + }, + { + "description": "counterpart to `a_indices` for the other operand.", + "name": "b_indices", + "type": 9 + }, + { + "description": "counterpart to `a_values` for the other operand; must be of the same dtype.", + "name": "b_values", + "typeAttr": "T" + }, + { + "description": "counterpart to `a_shape` for the other operand; the two shapes must be equal.", + "name": "b_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "2-D. The indices of the output SparseTensor.", + "name": "output_indices", + "type": 9 + }, + { + "description": "1-D. The values of the output SparseTensor.", + "name": "output_values", + "typeAttr": "T" + } + ], + "summary": "Returns the element-wise max of two SparseTensors." + } + }, + { + "name": "SparseSparseMinimum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "Assumes the two SparseTensors have the same shape, i.e., no broadcasting.", + "inputs": [ + { + "description": "2-D. `N x R` matrix with the indices of non-empty values in a\nSparseTensor, in the canonical lexicographic ordering.", + "name": "a_indices", + "type": 9 + }, + { + "description": "1-D. `N` non-empty values corresponding to `a_indices`.", + "name": "a_values", + "typeAttr": "T" + }, + { + "description": "1-D. Shape of the input SparseTensor.", + "name": "a_shape", + "type": 9 + }, + { + "description": "counterpart to `a_indices` for the other operand.", + "name": "b_indices", + "type": 9 + }, + { + "description": "counterpart to `a_values` for the other operand; must be of the same dtype.", + "name": "b_values", + "typeAttr": "T" + }, + { + "description": "counterpart to `a_shape` for the other operand; the two shapes must be equal.", + "name": "b_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "2-D. The indices of the output SparseTensor.", + "name": "output_indices", + "type": 9 + }, + { + "description": "1-D. The values of the output SparseTensor.", + "name": "output_values", + "typeAttr": "T" + } + ], + "summary": "Returns the element-wise min of two SparseTensors." + } + }, + { + "name": "SparseSplit", + "schema": { + "attributes": [ + { + "description": "The number of ways to split.", + "minimum": 1, + "name": "num_split", + "type": "int64" + }, + { + "name": "T", + "type": "type" + } + ], + "description": "If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices\n`[0 : shape[split_dim] % num_split]` gets one extra dimension.\nFor example, if `split_dim = 1` and `num_split = 2` and the input is\n\n input_tensor = shape = [2, 7]\n [ a d e ]\n [b c ]\n\nGraphically the output tensors are:\n\n output_tensor[0] = shape = [2, 4]\n [ a ]\n [b c ]\n\n output_tensor[1] = shape = [2, 3]\n [ d e ]\n [ ]", + "inputs": [ + { + "description": "0-D. The dimension along which to split. Must be in the range\n`[0, rank(shape))`.", + "name": "split_dim", + "type": 9 + }, + { + "description": "2-D tensor represents the indices of the sparse tensor.", + "name": "indices", + "type": 9 + }, + { + "description": "1-D tensor represents the values of the sparse tensor.", + "name": "values", + "typeAttr": "T" + }, + { + "description": "1-D. tensor represents the shape of the sparse tensor.\noutput indices: A list of 1-D tensors represents the indices of the output\nsparse tensors.", + "name": "shape", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_indices", + "numberAttr": "num_split", + "type": 9 + }, + { + "description": "A list of 1-D tensors represents the values of the output sparse\ntensors.", + "name": "output_values", + "numberAttr": "num_split", + "typeAttr": "T" + }, + { + "description": "A list of 1-D tensors represents the shape of the output sparse\ntensors.", + "name": "output_shape", + "numberAttr": "num_split", + "type": 9 + } + ], + "summary": "Split a `SparseTensor` into `num_split` tensors along one dimension." + } + }, + { + "name": "SparseTensorDenseAdd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "This Op does not require `a_indices` be sorted in standard lexicographic order.", + "inputs": [ + { + "description": "2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.", + "name": "a_indices", + "typeAttr": "Tindices" + }, + { + "description": "1-D. The `values` of the `SparseTensor`, with shape `[nnz]`.", + "name": "a_values", + "typeAttr": "T" + }, + { + "description": "1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`.", + "name": "a_shape", + "typeAttr": "Tindices" + }, + { + "description": "`ndims`-D Tensor. With shape `a_shape`.", + "name": "b", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`." + } + }, + { + "name": "SparseTensorDenseMatMul", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": false, + "description": "Use the adjoint of A in the matrix multiply. If A is complex, this\nis transpose(conj(A)). Otherwise it's transpose(A).", + "name": "adjoint_a", + "type": "boolean" + }, + { + "default": false, + "description": "Use the adjoint of B in the matrix multiply. If B is complex, this\nis transpose(conj(B)). Otherwise it's transpose(B).", + "name": "adjoint_b", + "type": "boolean" + } + ], + "description": "No validity checking is performed on the indices of A. However, the following\ninput format is recommended for optimal behavior:\n\nif adjoint_a == false:\n A should be sorted in lexicographically increasing order. Use SparseReorder\n if you're not sure.\nif adjoint_a == true:\n A should be sorted in order of increasing dimension 1 (i.e., \"column major\"\n order instead of \"row major\" order).", + "inputs": [ + { + "description": "2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.", + "name": "a_indices", + "typeAttr": "Tindices" + }, + { + "description": "1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector.", + "name": "a_values", + "typeAttr": "T" + }, + { + "description": "1-D. The `shape` of the `SparseTensor`, size `[2]` Vector.", + "name": "a_shape", + "type": 9 + }, + { + "description": "2-D. A dense Matrix.", + "name": "b", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "product", + "typeAttr": "T" + } + ], + "summary": "Multiply SparseTensor (of rank 2) \"A\" by dense matrix \"B\"." + } + }, + { + "name": "SparseTensorSliceDataset", + "schema": { + "attributes": [ + { + "name": "Tvalues", + "type": "type" + } + ], + "inputs": [ + { + "name": "indices", + "type": 9 + }, + { + "name": "values", + "typeAttr": "Tvalues" + }, + { + "name": "dense_shape", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that splits a SparseTensor into elements row-wise." + } + }, + { + "name": "SparseTensorToCSRSparseMatrix", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "SparseTensor indices.", + "name": "indices", + "type": 9 + }, + { + "description": "SparseTensor values.", + "name": "values", + "typeAttr": "T" + }, + { + "description": "SparseTensor dense shape.", + "name": "dense_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "A (possibly batched) CSRSparseMatrix.", + "name": "sparse_matrix", + "type": 21 + } + ], + "summary": "Converts a SparseTensor to a (possibly batched) CSRSparseMatrix." + } + }, + { + "name": "SparseToDense", + "schema": { + "attributes": [ + { + "default": true, + "description": "If true, indices are checked to make sure they are sorted in\nlexicographic order and that there are no repeats.", + "name": "validate_indices", + "type": "boolean" + }, + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "Builds an array `dense` with shape `output_shape` such that\n\n```\n# If sparse_indices is scalar\ndense[i] = (i == sparse_indices ? sparse_values : default_value)\n\n# If sparse_indices is a vector, then for each i\ndense[sparse_indices[i]] = sparse_values[i]\n\n# If sparse_indices is an n by d matrix, then for each i in [0, n)\ndense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]\n```\n\nAll other values in `dense` are set to `default_value`. If `sparse_values` is a\nscalar, all sparse indices are set to this single value.\n\nIndices should be sorted in lexicographic order, and indices must not\ncontain any repeats. If `validate_indices` is true, these properties\nare checked during execution.", + "inputs": [ + { + "description": "0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete\nindex where `sparse_values[i]` will be placed.", + "name": "sparse_indices", + "typeAttr": "Tindices" + }, + { + "description": "1-D. Shape of the dense output tensor.", + "name": "output_shape", + "typeAttr": "Tindices" + }, + { + "description": "1-D. Values corresponding to each row of `sparse_indices`,\nor a scalar value to be used for all sparse indices.", + "name": "sparse_values", + "typeAttr": "T" + }, + { + "description": "Scalar value to set for indices not specified in\n`sparse_indices`.", + "name": "default_value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Dense output tensor of shape `output_shape`.", + "name": "dense", + "typeAttr": "T" + } + ], + "summary": "Converts a sparse representation into a dense tensor." + } + }, + { + "name": "SparseToSparseSetOperation", + "schema": { + "attributes": [ + { + "name": "set_operation", + "type": "string" + }, + { + "default": true, + "name": "validate_indices", + "type": "boolean" + }, + { + "description": "Must be one of the following: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.", + "name": "T", + "type": "type" + } + ], + "description": "See SetOperationOp::SetOperationFromContext for values of `set_operation`.\n\nIf `validate_indices` is `True`, `SparseToSparseSetOperation` validates the\norder and range of `set1` and `set2` indices.\n\nInput `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,\nand `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same\nas `set2`. Dimension `n` contains values in a set, duplicates are allowed but\nignored.\n\nInput `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,\nand `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same\nas `set1`. Dimension `n` contains values in a set, duplicates are allowed but\nignored.\n\nIf `validate_indices` is `True`, this op validates the order and range of `set1`\nand `set2` indices.\n\nOutput `result` is a `SparseTensor` represented by `result_indices`,\n`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this\nhas rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`\ndimension contains the result of `set_operation` applied to the corresponding\n`[0...n-1]` dimension of `set`.", + "inputs": [ + { + "description": "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder.", + "name": "set1_indices", + "type": 9 + }, + { + "description": "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder.", + "name": "set1_values", + "typeAttr": "T" + }, + { + "description": "1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must\nbe the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the\nmax set size across `0...n-1` dimensions.", + "name": "set1_shape", + "type": 9 + }, + { + "description": "2D `Tensor`, indices of a `SparseTensor`. Must be in row-major\norder.", + "name": "set2_indices", + "type": 9 + }, + { + "description": "1D `Tensor`, values of a `SparseTensor`. Must be in row-major\norder.", + "name": "set2_values", + "typeAttr": "T" + }, + { + "description": "1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must\nbe the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the\nmax set size across `0...n-1` dimensions.", + "name": "set2_shape", + "type": 9 + } + ], + "outputs": [ + { + "description": "2D indices of a `SparseTensor`.", + "name": "result_indices", + "type": 9 + }, + { + "description": "1D values of a `SparseTensor`.", + "name": "result_values", + "typeAttr": "T" + }, + { + "description": "1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is\nthe same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`\nis the max result set size across all `0...n-1` dimensions.", + "name": "result_shape", + "type": 9 + } + ], + "summary": "Applies set operation along last dimension of 2 `SparseTensor` inputs." + } + }, + { + "name": "Spence", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ] + } + }, + { + "name": "Split", + "schema": { + "attributes": [ + { + "description": "The number of ways to split. Must evenly divide\n`value.shape[split_dim]`.", + "minimum": 1, + "name": "num_split", + "type": "int64" + }, + { + "name": "T", + "type": "type" + } + ], + "category": "Tensor", + "inputs": [ + { + "description": "0-D. The dimension along which to split. Must be in the range\n`[-rank(value), rank(value))`.", + "name": "split_dim", + "type": 3 + }, + { + "description": "The tensor to split.", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "They are identically shaped tensors, whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`values.shape[split_dim] / num_split`.", + "name": "output", + "numberAttr": "num_split", + "typeAttr": "T" + } + ], + "summary": "Splits a tensor into `num_split` tensors along one dimension." + } + }, + { + "name": "SplitV", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "num_split", + "type": "int64" + }, + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tlen", + "type": "type" + } + ], + "inputs": [ + { + "description": "The tensor to split.", + "name": "value", + "typeAttr": "T" + }, + { + "description": "list containing the sizes of each output tensor along the split\ndimension. Must sum to the dimension of value along split_dim.\nCan contain one -1 indicating that dimension is to be inferred.", + "name": "size_splits", + "typeAttr": "Tlen" + }, + { + "description": "0-D. The dimension along which to split. Must be in the range\n`[-rank(value), rank(value))`.", + "name": "split_dim", + "type": 3 + } + ], + "outputs": [ + { + "description": "Tensors whose shape matches that of `value`\nexcept along `split_dim`, where their sizes are\n`size_splits[i]`.", + "name": "output", + "numberAttr": "num_split", + "typeAttr": "T" + } + ], + "summary": "Splits a tensor into `num_split` tensors along one dimension." + } + }, + { + "name": "SqlDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": "The database type. Currently, the only supported type is 'sqlite'.", + "name": "driver_name", + "type": 7 + }, + { + "description": "A connection string to connect to the database.", + "name": "data_source_name", + "type": 7 + }, + { + "description": "A SQL query to execute.", + "name": "query", + "type": 7 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that executes a SQL query and emits rows of the result set." + } + }, + { + "name": "Sqrt", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "I.e., \\\\(y = \\sqrt{x} = x^{1/2}\\\\).", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes square root of x element-wise." + } + }, + { + "name": "SqrtGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`\nis the corresponding input gradient.", + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient for the sqrt of `x` wrt its input." + } + }, + { + "name": "Square", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "I.e., \\\\(y = x * x = x^2\\\\).", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes square of x element-wise." + } + }, + { + "name": "SquaredDifference", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns (x - y)(x - y) element-wise." + } + }, + { + "name": "Squeeze", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": [], + "description": "If specified, only squeezes the dimensions listed. The dimension\nindex starts at 0. It is an error to squeeze a dimension that is not 1. Must\nbe in the range `[-rank(input), rank(input))`.", + "minimum": 0, + "name": "squeeze_dims", + "type": "int64[]" + } + ], + "category": "Shape", + "description": "Given a tensor `input`, this operation returns a tensor of the same type with\nall dimensions of size 1 removed. If you don't want to remove all size 1\ndimensions, you can remove specific size 1 dimensions by specifying\n`squeeze_dims`.\n\nFor example:\n\n```\n# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\nshape(squeeze(t)) ==> [2, 3]\n```\n\nOr, to remove specific size 1 dimensions:\n\n```\n# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]\nshape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]\n```", + "inputs": [ + { + "description": "The `input` to squeeze.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Contains the same data as `input`, but has one or more dimensions of\nsize 1 removed.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Removes dimensions of size 1 from the shape of a tensor." + } + }, + { + "name": "Stack", + "schema": { + "attributes": [ + { + "name": "elem_type", + "type": "type" + }, + { + "default": "", + "name": "stack_name", + "type": "string" + } + ], + "outputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "Deprecated, use StackV2." + } + }, + { + "name": "StackClose", + "schema": { + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "summary": "Deprecated, use StackCloseV2." + } + }, + { + "name": "StackCloseV2", + "schema": { + "inputs": [ + { + "description": "The handle to a stack.", + "name": "handle", + "type": 20 + } + ], + "summary": "Delete the stack from its resource container." + } + }, + { + "name": "StackPop", + "schema": { + "attributes": [ + { + "name": "elem_type", + "type": "type" + } + ], + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + } + ], + "outputs": [ + { + "name": "elem", + "typeAttr": "elem_type" + } + ], + "summary": "Deprecated, use StackPopV2." + } + }, + { + "name": "StackPopV2", + "schema": { + "attributes": [ + { + "description": "The type of the elem that is popped.", + "name": "elem_type", + "type": "type" + } + ], + "inputs": [ + { + "description": "The handle to a stack.", + "name": "handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "The tensor that is popped from the top of the stack.", + "name": "elem", + "typeAttr": "elem_type" + } + ], + "summary": "Pop the element at the top of the stack." + } + }, + { + "name": "StackPush", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": false, + "name": "swap_memory", + "type": "boolean" + } + ], + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "name": "elem", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Deprecated, use StackPushV2." + } + }, + { + "name": "StackPushV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": false, + "description": "Swap `elem` to CPU. Default to false.", + "name": "swap_memory", + "type": "boolean" + } + ], + "inputs": [ + { + "description": "The handle to a stack.", + "name": "handle", + "type": 20 + }, + { + "description": "The tensor to be pushed onto the stack.", + "name": "elem", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The same tensor as the input 'elem'.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Push an element onto the stack." + } + }, + { + "name": "StackV2", + "schema": { + "attributes": [ + { + "description": "The type of the elements on the stack.", + "name": "elem_type", + "type": "type" + }, + { + "default": "", + "description": "Overrides the name used for the temporary stack resource. Default\nvalue is the name of the 'Stack' op (which is guaranteed unique).", + "name": "stack_name", + "type": "string" + } + ], + "inputs": [ + { + "description": "The maximum size of the stack if non-negative. If negative, the stack\nsize is unlimited.", + "name": "max_size", + "type": 3 + } + ], + "outputs": [ + { + "description": "The handle to the stack.", + "name": "handle", + "type": 20 + } + ], + "summary": "A stack that produces elements in first-in last-out order." + } + }, + { + "name": "Stage", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Maximum number of elements in the Staging Area. If > 0, inserts\non the container will block when the capacity is reached.", + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "description": "The maximum number of bytes allowed for Tensors in the Staging Area.\nIf > 0, inserts will block until sufficient space is available.", + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "description": "If non-empty, this queue is placed in the given container. Otherwise,\na default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "It is necessary to match this name to the matching Unstage Op.", + "name": "shared_name", + "type": "string" + } + ], + "description": "The basic functionality of this Op is similar to a queue with many\nfewer capabilities and options. This Op is optimized for performance.", + "inputs": [ + { + "description": "a list of tensors\ndtypes A list of data types that inserted values should adhere to.", + "name": "values", + "typeListAttr": "dtypes" + } + ], + "summary": "Stage values similar to a lightweight Enqueue." + } + }, + { + "name": "StageClear", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "summary": "Op removes all elements in the underlying container." + } + }, + { + "name": "StagePeek", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "description": "underlying container does not contain sufficient elements\nthis op will block until it does. This Op is optimized for\nperformance.", + "inputs": [ + { + "name": "index", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ], + "summary": "Op peeks at the values at the specified index. If the" + } + }, + { + "name": "StageSize", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ], + "summary": "Op returns the number of elements in the underlying container." + } + }, + { + "name": "StatefulPartitionedCall", + "schema": { + "attributes": [ + { + "description": "A list of input types.", + "minimum": 0, + "name": "Tin", + "type": "type[]" + }, + { + "description": "A list of output types.", + "minimum": 0, + "name": "Tout", + "type": "type[]" + }, + { + "description": " A function that takes 'args', a list of tensors, and returns 'output',\n another list of tensors. Input and output types are specified by 'Tin'\n and 'Tout'. The function body of f will be placed and partitioned across\n devices, setting this op apart from the regular Call op. This op is\n stateful.", + "name": "f", + "type": "function" + }, + { + "default": "", + "name": "config", + "type": "string" + }, + { + "default": "", + "name": "config_proto", + "type": "string" + }, + { + "default": "", + "name": "executor_type", + "type": "string" + } + ], + "inputs": [ + { + "description": "A list of input tensors.", + "name": "args", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "description": "A list of return values.", + "name": "output", + "typeListAttr": "Tout" + } + ], + "summary": "returns `f(inputs)`, where `f`'s body is placed and partitioned." + } + }, + { + "name": "StatefulRandomBinomial", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "S", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 2 + }, + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "resource", + "type": 20 + }, + { + "name": "algorithm", + "type": 9 + }, + { + "name": "shape", + "typeAttr": "S" + }, + { + "name": "counts", + "typeAttr": "T" + }, + { + "name": "probs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "dtype" + } + ] + } + }, + { + "name": "StatefulStandardNormal", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "The type of the output.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "name": "shape_dtype", + "type": "type" + } + ], + "description": "The generated values will have mean 0 and standard deviation 1.", + "inputs": [ + { + "description": "The handle of the resource variable that stores the state of the RNG.", + "name": "resource", + "type": 20 + }, + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "description": "A tensor of the specified shape filled with random normal values.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs random values from a normal distribution. This op is deprecated in favor of op 'StatefulStandardNormalV2'" + } + }, + { + "name": "StatefulStandardNormalV2", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "The type of the output.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "name": "shape_dtype", + "type": "type" + } + ], + "description": "The generated values will have mean 0 and standard deviation 1.", + "inputs": [ + { + "description": "The handle of the resource variable that stores the state of the RNG.", + "name": "resource", + "type": 20 + }, + { + "description": "The RNG algorithm.", + "name": "algorithm", + "type": 9 + }, + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "description": "A tensor of the specified shape filled with random normal values.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs random values from a normal distribution." + } + }, + { + "name": "StatefulTruncatedNormal", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "The type of the output.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "name": "shape_dtype", + "type": "type" + } + ], + "description": "The generated values follow a normal distribution with mean 0 and standard\ndeviation 1, except that values whose magnitude is more than 2 standard\ndeviations from the mean are dropped and re-picked.", + "inputs": [ + { + "description": "The handle of the resource variable that stores the state of the RNG.", + "name": "resource", + "type": 20 + }, + { + "description": "The RNG algorithm.", + "name": "algorithm", + "type": 9 + }, + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs random values from a truncated normal distribution." + } + }, + { + "name": "StatefulUniform", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "The type of the output.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "name": "shape_dtype", + "type": "type" + } + ], + "description": "The generated values follow a uniform distribution in the range `[0, 1)`. The\nlower bound 0 is included in the range, while the upper bound 1 is excluded.", + "inputs": [ + { + "description": "The handle of the resource variable that stores the state of the RNG.", + "name": "resource", + "type": 20 + }, + { + "description": "The RNG algorithm.", + "name": "algorithm", + "type": 9 + }, + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs random values from a uniform distribution." + } + }, + { + "name": "StatefulUniformFullInt", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 23 + }, + "description": "The type of the output.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "name": "shape_dtype", + "type": "type" + } + ], + "description": "The generated values are uniform integers covering the whole range of `dtype`.", + "inputs": [ + { + "description": "The handle of the resource variable that stores the state of the RNG.", + "name": "resource", + "type": 20 + }, + { + "description": "The RNG algorithm.", + "name": "algorithm", + "type": 9 + }, + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "shape_dtype" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs random integers from a uniform distribution." + } + }, + { + "name": "StatefulUniformInt", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 9 + }, + "description": "The type of the output.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "name": "shape_dtype", + "type": "type" + } + ], + "description": "The generated values are uniform integers in the range `[minval, maxval)`.\nThe lower bound `minval` is included in the range, while the upper bound\n`maxval` is excluded.\n\nThe random integers are slightly biased unless `maxval - minval` is an exact\npower of two. The bias is small for values of `maxval - minval` significantly\nsmaller than the range of the output (either `2^32` or `2^64`).", + "inputs": [ + { + "description": "The handle of the resource variable that stores the state of the RNG.", + "name": "resource", + "type": 20 + }, + { + "description": "The RNG algorithm.", + "name": "algorithm", + "type": 9 + }, + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "shape_dtype" + }, + { + "description": "Minimum value (inclusive, scalar).", + "name": "minval", + "typeAttr": "dtype" + }, + { + "description": "Maximum value (exclusive, scalar).", + "name": "maxval", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs random integers from a uniform distribution." + } + }, + { + "name": "StatelessIf", + "schema": { + "attributes": [ + { + "name": "Tcond", + "type": "type" + }, + { + "description": "A list of input types.", + "minimum": 0, + "name": "Tin", + "type": "type[]" + }, + { + "description": "A list of output types.", + "minimum": 0, + "name": "Tout", + "type": "type[]" + }, + { + "description": " A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what else_branch returns.", + "name": "then_branch", + "type": "function" + }, + { + "description": " A function that takes 'inputs' and returns a list of tensors, whose\n types are the same as what then_branch returns.", + "name": "else_branch", + "type": "function" + }, + { + "default": [], + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "description": " A Tensor. If the tensor is a scalar of non-boolean type, the\n scalar is converted to a boolean according to the\n following rule: if the scalar is a numerical value, non-zero means\n `True` and zero means False; if the scalar is a string, non-empty\n means `True` and empty means `False`. If the tensor is not a scalar,\n being empty means False and being non-empty means True.\n\n This should only be used when the if then/else body functions do not\n have stateful ops.", + "name": "cond", + "typeAttr": "Tcond" + }, + { + "description": "A list of input tensors.", + "name": "input", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "description": "A list of return values.", + "name": "output", + "typeListAttr": "Tout" + } + ], + "summary": "output = cond ? then_branch(input) : else_branch(input)" + } + }, + { + "name": "StatelessMultinomial", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tseed", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "output_dtype", + "type": "type" + } + ], + "inputs": [ + { + "description": "2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]`\nrepresents the unnormalized log probabilities for all classes.", + "name": "logits", + "typeAttr": "T" + }, + { + "description": "0-D. Number of independent samples to draw for each row slice.", + "name": "num_samples", + "type": 3 + }, + { + "description": "2 seeds (shape [2]).", + "name": "seed", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "description": "2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]`\ncontains the drawn class labels with range `[0, num_classes)`.", + "name": "output", + "typeAttr": "output_dtype" + } + ], + "summary": "Draws samples from a multinomial distribution." + } + }, + { + "name": "StatelessRandomBinomial", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "S", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tseed", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 2 + }, + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "The type of the output. Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "dtype", + "type": "type" + } + ], + "description": "Outputs random values from a binomial distribution.\n\nThe outputs are a deterministic function of `shape`, `seed`, `counts`, and `probs`.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "S" + }, + { + "description": "2 seeds (shape [2]).", + "name": "seed", + "typeAttr": "Tseed" + }, + { + "description": "The counts of the binomial distribution. Must be broadcastable with `probs`,\nand broadcastable with the rightmost dimensions of `shape`.", + "name": "counts", + "typeAttr": "T" + }, + { + "description": "The probability of success for the binomial distribution. Must be broadcastable\nwith `counts` and broadcastable with the rightmost dimensions of `shape`.", + "name": "probs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs deterministic pseudorandom random numbers from a binomial distribution." + } + }, + { + "name": "StatelessRandomGammaV2", + "schema": { + "attributes": [ + { + "description": "The type of the output. Must be one of the following: `float16`, `float32`, `float64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tseed", + "type": "type" + } + ], + "description": "Outputs random values from a gamma distribution.\n\nThe outputs are a deterministic function of `shape`, `seed`, and `alpha`.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "T" + }, + { + "description": "2 seeds (shape [2]).", + "name": "seed", + "typeAttr": "Tseed" + }, + { + "description": "The concentration of the gamma distribution. Shape must match the rightmost\ndimensions of `shape`.", + "name": "alpha", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs deterministic pseudorandom random numbers from a gamma distribution." + } + }, + { + "name": "StatelessRandomNormal", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tseed", + "type": "type" + } + ], + "description": "The generated values will have mean 0 and standard deviation 1.\n\nThe outputs are a deterministic function of `shape` and `seed`.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "T" + }, + { + "description": "2 seeds (shape [2]).", + "name": "seed", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs deterministic pseudorandom values from a normal distribution." + } + }, + { + "name": "StatelessRandomPoisson", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "Rtype", + "type": "type" + }, + { + "description": "The type of the output. Must be one of the following: `float16`, `float32`, `float64`, `int32`, `int64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tseed", + "type": "type" + } + ], + "description": "Outputs random values from a Poisson distribution.\n\nThe outputs are a deterministic function of `shape`, `seed`, and `lam`.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "T" + }, + { + "description": "2 seeds (shape [2]).", + "name": "seed", + "typeAttr": "Tseed" + }, + { + "description": "The rate of the Poisson distribution. Shape must match the rightmost dimensions\nof `shape`.", + "name": "lam", + "typeAttr": "Rtype" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs deterministic pseudorandom random numbers from a Poisson distribution." + } + }, + { + "name": "StatelessRandomUniform", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tseed", + "type": "type" + } + ], + "description": "The generated values follow a uniform distribution in the range `[0, 1)`. The\nlower bound 0 is included in the range, while the upper bound 1 is excluded.\n\nThe outputs are a deterministic function of `shape` and `seed`.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "T" + }, + { + "description": "2 seeds (shape [2]).", + "name": "seed", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs deterministic pseudorandom random values from a uniform distribution." + } + }, + { + "name": "StatelessRandomUniformFullInt", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 23 + }, + "description": "The type of the output. Must be one of the following: `int32`, `int64`, `uint32`, `uint64`.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`, `uint32`, `uint64`.", + "name": "Tseed", + "type": "type" + } + ], + "description": "The generated values are uniform integers covering the whole range of `dtype`.\n\nThe outputs are a deterministic function of `shape` and `seed`.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "T" + }, + { + "description": "2 seeds (shape [2]).", + "name": "seed", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs deterministic pseudorandom random integers from a uniform distribution." + } + }, + { + "name": "StatelessRandomUniformInt", + "schema": { + "attributes": [ + { + "description": "The type of the output. Must be one of the following: `int32`, `int64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tseed", + "type": "type" + } + ], + "description": "The generated values follow a uniform distribution in the range `[minval, maxval)`.\n\nThe outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "T" + }, + { + "description": "2 seeds (shape [2]).", + "name": "seed", + "typeAttr": "Tseed" + }, + { + "description": "Minimum value (inclusive, scalar).", + "name": "minval", + "typeAttr": "dtype" + }, + { + "description": "Maximum value (exclusive, scalar).", + "name": "maxval", + "typeAttr": "dtype" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs deterministic pseudorandom random integers from a uniform distribution." + } + }, + { + "name": "StatelessTruncatedNormal", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tseed", + "type": "type" + } + ], + "description": "The generated values follow a normal distribution with mean 0 and standard\ndeviation 1, except that values whose magnitude is more than 2 standard\ndeviations from the mean are dropped and re-picked.\n\nThe outputs are a deterministic function of `shape` and `seed`.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "T" + }, + { + "description": "2 seeds (shape [2]).", + "name": "seed", + "typeAttr": "Tseed" + } + ], + "outputs": [ + { + "description": "Random values with specified shape.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs deterministic pseudorandom values from a truncated normal distribution." + } + }, + { + "name": "StatelessWhile", + "schema": { + "attributes": [ + { + "description": "dtype in use.", + "minimum": 0, + "name": "T", + "type": "type[]" + }, + { + "description": " A function takes 'input' and returns a tensor. If the tensor is\n a scalar of non-boolean, the scalar is converted to a boolean\n according to the following rule: if the scalar is a numerical\n value, non-zero means True and zero means False; if the scalar is\n a string, non-empty means True and empty means False. If the\n tensor is not a scalar, non-emptiness means True and False\n otherwise.\n\n This should only be used when the while condition and body functions\n do not have stateful ops.", + "name": "cond", + "type": "function" + }, + { + "description": " A function that takes a list of tensors and returns another\n list of tensors. Both lists have the same types as specified\n by T.", + "name": "body", + "type": "function" + }, + { + "default": [], + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": 10, + "name": "parallel_iterations", + "type": "int64" + } + ], + "inputs": [ + { + "description": "A list of input tensors whose types are T.", + "name": "input", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "description": "A list of output tensors whose types are T.", + "name": "output", + "typeListAttr": "T" + } + ], + "summary": "output = input; While (Cond(output)) { output = Body(output) }" + } + }, + { + "name": "StaticRegexFullMatch", + "schema": { + "attributes": [ + { + "description": "The regular expression to match the input.", + "name": "pattern", + "type": "string" + } + ], + "description": "The input is a string tensor of any shape. The pattern is the\nregular expression to be matched with every element of the input tensor.\nThe boolean values (True or False) of the output tensor indicate\nif the input matches the regex pattern provided.\n\nThe pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)", + "inputs": [ + { + "description": "A string tensor of the text to be processed.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "A bool tensor with the same shape as `input`.", + "name": "output", + "type": 10 + } + ], + "summary": "Check if the input matches the regex pattern." + } + }, + { + "name": "StaticRegexReplace", + "schema": { + "attributes": [ + { + "description": "The regular expression to match the input.", + "name": "pattern", + "type": "string" + }, + { + "description": "The rewrite to be applied to the matched expression.", + "name": "rewrite", + "type": "string" + }, + { + "default": true, + "description": "If True, the replacement is global, otherwise the replacement\nis done only on the first match.", + "name": "replace_global", + "type": "boolean" + } + ], + "description": "It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)", + "inputs": [ + { + "description": "The text to be processed.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "The text after applying pattern and rewrite.", + "name": "output", + "type": 7 + } + ], + "summary": "Replaces the match of pattern in input with rewrite." + } + }, + { + "name": "StatsAggregatorHandle", + "schema": { + "attributes": [ + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + } + ], + "summary": "Creates a statistics manager resource." + } + }, + { + "name": "StatsAggregatorHandleV2", + "schema": { + "attributes": [ + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "name": "handle", + "type": 20 + } + ] + } + }, + { + "name": "StatsAggregatorSetSummaryWriter", + "schema": { + "inputs": [ + { + "name": "stats_aggregator", + "type": 20 + }, + { + "name": "summary", + "type": 20 + } + ], + "summary": "Set a summary_writer_interface to record statistics using given stats_aggregator." + } + }, + { + "name": "StatsAggregatorSummary", + "schema": { + "inputs": [ + { + "name": "iterator", + "type": 20 + } + ], + "outputs": [ + { + "name": "summary", + "type": 7 + } + ], + "summary": "Produces a summary of any statistics recorded by the given statistics manager." + } + }, + { + "name": "StopGradient", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "When executed in a graph, this op outputs its input tensor as-is.\n\nWhen building ops to compute gradients, this op prevents the contribution of\nits inputs to be taken into account. Normally, the gradient generator adds ops\nto a graph to compute the derivatives of a specified 'loss' by recursively\nfinding out inputs that contributed to its computation. If you insert this op\nin the graph it inputs are masked from the gradient generator. They are not\ntaken into account for computing gradients.\n\nThis is useful any time you want to compute a value with TensorFlow but need\nto pretend that the value was a constant. Some examples include:\n\n* The *EM* algorithm where the *M-step* should not involve backpropagation\n through the output of the *E-step*.\n* Contrastive divergence training of Boltzmann machines where, when\n differentiating the energy function, the training must not backpropagate\n through the graph that generated the samples from the model.\n* Adversarial training, where no backprop should happen through the adversarial\n example generation process.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Stops gradient computation." + } + }, + { + "name": "StridedSlice", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Index", + "type": "type" + }, + { + "default": 0, + "description": "a bitmask where a bit i being 1 means to ignore the begin\nvalue and instead use the largest interval possible. At runtime\nbegin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or\n`[-1, n-1]` if `stride[i] < 0`", + "name": "begin_mask", + "type": "int64" + }, + { + "default": 0, + "description": "analogous to `begin_mask`", + "name": "end_mask", + "type": "int64" + }, + { + "default": 0, + "description": "a bitmask where bit `i` being 1 means the `i`th\nposition is actually an ellipsis. One bit at most can be 1.\nIf `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`\nis provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis\nimplicitly creates as many range specifications as necessary to fully\nspecify the sliced range for every dimension. For example for a 4-dimensional\ntensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.", + "name": "ellipsis_mask", + "type": "int64" + }, + { + "default": 0, + "description": "a bitmask where bit `i` being 1 means the `i`th\nspecification creates a new shape 1 dimension. For example\n`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.", + "name": "new_axis_mask", + "type": "int64" + }, + { + "default": 0, + "description": "a bitmask where bit `i` implies that the `i`th\nspecification should shrink the dimensionality. begin and end\nmust imply a slice of size 1 in the dimension. For example in\npython one might do `foo[:, 3, :]` which would result in\n`shrink_axis_mask` being 2.", + "name": "shrink_axis_mask", + "type": "int64" + } + ], + "category": "Tensor", + "description": "Note, most python users will want to use the Python `Tensor.__getitem__`\nor `Variable.__getitem__` rather than this op directly.\n\nThe goal of this op is to produce a new tensor with a subset of\nthe elements from the `n` dimensional `input` tensor. The subset is chosen using\na sequence of `m` sparse range specifications encoded into the arguments\nof this function. Note, in some cases\n`m` could be equal to `n`, but this need not be the case. Each\nrange specification entry can be one of the following:\n\n- An ellipsis (...). Ellipses are used to imply zero or more\n dimensions of full-dimension selection and are produced using\n `ellipsis_mask`. For example, `foo[...]` is the identity slice.\n\n- A new axis. This is used to insert a new shape=1 dimension and is\n produced using `new_axis_mask`. For example, `foo[:, ...]` where\n `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.\n\n\n- A range `begin:end:stride`. This is used to specify how much to choose from\n a given dimension. `stride` can be any integer but 0. `begin` is an integer\n which represents the index of the first value to select while `end` represents\n the index of the last value to select. The number of values selected in each\n dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.\n `begin` and `end` can be negative where `-1` is the last element, `-2` is\n the second to last. `begin_mask` controls whether to replace the explicitly\n given `begin` with an implicit effective value of `0` if `stride > 0` and\n `-1` if `stride < 0`. `end_mask` is analogous but produces the number\n required to create the largest open interval. For example, given a shape\n `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do\n not assume this is equivalent to `foo[0:-1]` which has an effective `begin`\n and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the\n first dimension of a tensor while dropping the last two (in the original\n order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.\n\n- A single index. This is used to keep only elements that have a given\n index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a\n shape `(6,)` tensor. This is encoded in `begin` and `end` and\n `shrink_axis_mask`.\n\nEach conceptual range specification is encoded in the op's argument. This\nencoding is best understand by considering a non-trivial example. In\nparticular,\n`foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as\n\n```\nbegin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)\nend = [2, 4, x, x, -3, x]\nstrides = [1, 1, x, x, -1, 1]\nbegin_mask = 1<<4 | 1 << 5 = 48\nend_mask = 1<<5 = 32\nellipsis_mask = 1<<3 = 8\nnew_axis_mask = 1<<2 4\nshrink_axis_mask = 1<<0\n```\n\nIn this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of\nthe slice becomes (2, 1, 5, 5, 2, 5).\nLet us walk step by step through each argument specification.\n\n1. The first argument in the example slice is turned into `begin = 1` and\n`end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we\nalso set the appropriate bit in `shrink_axis_mask`.\n\n2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have\nzero bits contributed.\n\n3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1\ndimension in the final shape. Dummy values are contributed to begin,\nend and stride, while the new_axis_mask bit is set.\n\n4. `...` grab the full ranges from as many dimensions as needed to\nfully specify a slice for every dimension of the input shape.\n\n5. `:-3:-1` shows the use of negative indices. A negative index `i` associated\nwith a dimension that has shape `s` is converted to a positive index\n`s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion\nis done internally so begin, end and strides receive x, -3, and -1.\nThe appropriate begin_mask bit is set to indicate the start range is the\nfull range (ignoring the x).\n\n6. `:` indicates that the entire contents of the corresponding dimension\nis selected. This is equivalent to `::` or `0::1`. begin, end, and strides\nreceive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and\n`end_mask` are also set.\n\n*Requirements*:\n `0 != strides[i] for i in [0, m)`\n `ellipsis_mask must be a power of two (only one ellipsis)`", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "description": "`begin[k]` specifies the offset into the `k`th range specification.\nThe exact dimension this corresponds to will be determined by context.\nOut-of-bounds values will be silently clamped. If the `k`th bit of\n`begin_mask` then `begin[k]` is ignored and the full range of the\nappropriate dimension is used instead. Negative values causes indexing\nto start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.", + "name": "begin", + "typeAttr": "Index" + }, + { + "description": "`end[i]` is like `begin` with the exception that `end_mask` is\nused to determine full ranges.", + "name": "end", + "typeAttr": "Index" + }, + { + "description": "`strides[i]` specifies the increment in the `i`th specification\nafter extracting a given element. Negative indices will reverse\nthe original order. Out or range values are\nclamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`", + "name": "strides", + "typeAttr": "Index" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Return a strided slice from `input`." + } + }, + { + "name": "StridedSliceAssign", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Index", + "type": "type" + }, + { + "default": 0, + "name": "begin_mask", + "type": "int64" + }, + { + "default": 0, + "name": "end_mask", + "type": "int64" + }, + { + "default": 0, + "name": "ellipsis_mask", + "type": "int64" + }, + { + "default": 0, + "name": "new_axis_mask", + "type": "int64" + }, + { + "default": 0, + "name": "shrink_axis_mask", + "type": "int64" + } + ], + "description": "The values of `value` are assigned to the positions in the variable\n`ref` that are selected by the slice parameters. The slice parameters\n`begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`.\n\nNOTE this op currently does not support broadcasting and so `value`'s\nshape must be exactly the shape produced by the slice of `ref`.", + "inputs": [ + { + "isRef": true, + "name": "ref", + "typeAttr": "T" + }, + { + "name": "begin", + "typeAttr": "Index" + }, + { + "name": "end", + "typeAttr": "Index" + }, + { + "name": "strides", + "typeAttr": "Index" + }, + { + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "isRef": true, + "name": "output_ref", + "typeAttr": "T" + } + ], + "summary": "Assign `value` to the sliced l-value reference of `ref`." + } + }, + { + "name": "StridedSliceGrad", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Index", + "type": "type" + }, + { + "default": 0, + "name": "begin_mask", + "type": "int64" + }, + { + "default": 0, + "name": "end_mask", + "type": "int64" + }, + { + "default": 0, + "name": "ellipsis_mask", + "type": "int64" + }, + { + "default": 0, + "name": "new_axis_mask", + "type": "int64" + }, + { + "default": 0, + "name": "shrink_axis_mask", + "type": "int64" + } + ], + "description": "Since `StridedSlice` cuts out pieces of its `input` which is size\n`shape`, its gradient will have the same shape (which is passed here\nas `shape`). The gradient will be zero in any element that the slice\ndoes not select.\n\nArguments are the same as StridedSliceGrad with the exception that\n`dy` is the input gradient to be propagated and `shape` is the\nshape of `StridedSlice`'s `input`.", + "inputs": [ + { + "name": "shape", + "typeAttr": "Index" + }, + { + "name": "begin", + "typeAttr": "Index" + }, + { + "name": "end", + "typeAttr": "Index" + }, + { + "name": "strides", + "typeAttr": "Index" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns the gradient of `StridedSlice`." + } + }, + { + "name": "StringFormat", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "T", + "type": "type[]" + }, + { + "default": "%s", + "description": "A string, the template to format tensor summaries into.", + "name": "template", + "type": "string" + }, + { + "default": "%s", + "description": "A string, at each placeholder in the template a subsequent tensor summary will be inserted.", + "name": "placeholder", + "type": "string" + }, + { + "default": 3, + "description": "When formatting the tensor summaries print the first and last summarize entries of each tensor dimension.", + "name": "summarize", + "type": "int64" + } + ], + "description": "Formats a string template using a list of tensors, pretty-printing tensor summaries.", + "inputs": [ + { + "description": "The list of tensors to format into the placeholder string.", + "name": "inputs", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "description": "= The resulting string scalar.", + "name": "output", + "type": 7 + } + ], + "summary": "Formats a string template using a list of tensors." + } + }, + { + "name": "StringJoin", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "default": "", + "description": "string, an optional join separator.", + "name": "separator", + "type": "string" + } + ], + "description": "with the given separator (default is an empty separator).\n\nExamples:\n\n>>> s = [\"hello\", \"world\", \"tensorflow\"]\n>>> tf.strings.join(s, \" \")\n", + "inputs": [ + { + "description": "A list of string tensors. The tensors must all have the same shape,\nor be scalars. Scalars may be mixed in; these will be broadcast to the shape\nof non-scalar inputs.", + "name": "inputs", + "numberAttr": "N", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "type": 7 + } + ], + "summary": "Joins the strings in the given list of string tensors into one tensor;" + } + }, + { + "name": "StringLength", + "schema": { + "attributes": [ + { + "default": "BYTE", + "description": "The unit that is counted to compute string length. One of: `\"BYTE\"` (for\nthe number of bytes in each string) or `\"UTF8_CHAR\"` (for the number of UTF-8\nencoded Unicode code points in each string). Results are undefined\nif `unit=UTF8_CHAR` and the `input` strings do not contain structurally\nvalid UTF-8. Must be one of the following: `BYTE`, `UTF8_CHAR`.", + "name": "unit", + "type": "string" + } + ], + "description": "Computes the length of each string given in the input tensor.\n\n>>> strings = tf.constant(['Hello','TensorFlow', '\\U0001F642'])\n>>> tf.strings.length(strings).numpy() # default counts bytes\narray([ 5, 10, 4], dtype=int32)\n>>> tf.strings.length(strings, unit=\"UTF8_CHAR\").numpy()\narray([ 5, 10, 1], dtype=int32)\n", + "inputs": [ + { + "description": "The strings for which to compute the length for each element.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "Integer tensor that has the same shape as `input`. The output contains the\nelement-wise string lengths of `input`.", + "name": "output", + "type": 3 + } + ], + "summary": "String lengths of `input`." + } + }, + { + "name": "StringLower", + "schema": { + "attributes": [ + { + "default": "", + "name": "encoding", + "type": "string" + } + ], + "description": "Example:\n\n>>> tf.strings.lower(\"CamelCase string and ALL CAPS\")\n\n", + "inputs": [ + { + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "type": 7 + } + ], + "summary": "Converts all uppercase characters into their respective lowercase replacements." + } + }, + { + "name": "StringNGrams", + "schema": { + "attributes": [ + { + "description": "The string to append between elements of the token. Use \"\" for no separator.", + "name": "separator", + "type": "string" + }, + { + "description": "The sizes of the ngrams to create.", + "minimum": 0, + "name": "ngram_widths", + "type": "int64[]" + }, + { + "description": "The string to use to pad the left side of the ngram sequence. Only used if\npad_width != 0.", + "name": "left_pad", + "type": "string" + }, + { + "description": "The string to use to pad the right side of the ngram sequence. Only used if\npad_width != 0.", + "name": "right_pad", + "type": "string" + }, + { + "description": "The number of padding elements to add to each side of each\nsequence. Note that padding will never be greater than 'ngram_widths'-1\nregardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1`\nelements.", + "name": "pad_width", + "type": "int64" + }, + { + "name": "preserve_short_sequences", + "type": "boolean" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsplits", + "type": "type" + } + ], + "description": "This op accepts a ragged tensor with 1 ragged dimension containing only\nstrings and outputs a ragged tensor with 1 ragged dimension containing ngrams\nof that string, joined along the innermost axis.", + "inputs": [ + { + "description": "The values tensor of the ragged string tensor to make ngrams out of. Must be a\n1D string tensor.", + "name": "data", + "type": 7 + }, + { + "description": "The splits tensor of the ragged string tensor to make ngrams out of.", + "name": "data_splits", + "typeAttr": "Tsplits" + } + ], + "outputs": [ + { + "description": "The values tensor of the output ngrams ragged tensor.", + "name": "ngrams", + "type": 7 + }, + { + "description": "The splits tensor of the output ngrams ragged tensor.", + "name": "ngrams_splits", + "typeAttr": "Tsplits" + } + ], + "summary": "Creates ngrams from ragged string data." + } + }, + { + "name": "StringSplit", + "schema": { + "attributes": [ + { + "default": true, + "description": "A `bool`. If `True`, skip the empty strings from the result.", + "name": "skip_empty", + "type": "boolean" + } + ], + "description": "Let N be the size of source (typically N will be the batch size). Split each\nelement of `input` based on `delimiter` and return a `SparseTensor`\ncontaining the splitted tokens. Empty tokens are ignored.\n\n`delimiter` can be empty, or a string of split characters. If `delimiter` is an\n empty string, each element of `input` is split into individual single-byte\n character strings, including splitting of UTF-8 multibyte sequences. Otherwise\n every character of `delimiter` is a potential split point.\n\nFor example:\n N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output\n will be\n\n indices = [0, 0;\n 0, 1;\n 1, 0;\n 1, 1;\n 1, 2]\n shape = [2, 3]\n values = ['hello', 'world', 'a', 'b', 'c']", + "inputs": [ + { + "description": "1-D. Strings to split.", + "name": "input", + "type": 7 + }, + { + "description": "0-D. Delimiter characters (bytes), or empty string.", + "name": "delimiter", + "type": 7 + } + ], + "outputs": [ + { + "description": "A dense matrix of int64 representing the indices of the sparse tensor.", + "name": "indices", + "type": 9 + }, + { + "description": "A vector of strings corresponding to the splited values.", + "name": "values", + "type": 7 + }, + { + "description": "a length-2 vector of int64 representing the shape of the sparse\ntensor, where the first value is N and the second value is the maximum number\nof tokens in a single input entry.", + "name": "shape", + "type": 9 + } + ], + "summary": "Split elements of `input` based on `delimiter` into a `SparseTensor`." + } + }, + { + "name": "StringSplitV2", + "schema": { + "attributes": [ + { + "default": -1, + "description": "An `int`. If `maxsplit > 0`, limit of the split of the result.", + "name": "maxsplit", + "type": "int64" + } + ], + "description": "Let N be the size of source (typically N will be the batch size). Split each\nelement of `source` based on `sep` and return a `SparseTensor`\ncontaining the split tokens. Empty tokens are ignored.\n\nFor example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',\nthen the output will be\n```\nst.indices = [0, 0;\n 0, 1;\n 1, 0;\n 1, 1;\n 1, 2]\nst.shape = [2, 3]\nst.values = ['hello', 'world', 'a', 'b', 'c']\n```\n\nIf `sep` is given, consecutive delimiters are not grouped together and are\ndeemed to delimit empty strings. For example, source of `\"1<>2<><>3\"` and\nsep of `\"<>\"` returns `[\"1\", \"2\", \"\", \"3\"]`. If `sep` is None or an empty\nstring, consecutive whitespace are regarded as a single separator, and the\nresult will contain no empty strings at the startor end if the string has\nleading or trailing whitespace.\n\nNote that the above mentioned behavior matches python's str.split.", + "inputs": [ + { + "description": "`1-D` string `Tensor`, the strings to split.", + "name": "input", + "type": 7 + }, + { + "description": "`0-D` string `Tensor`, the delimiter character.", + "name": "sep", + "type": 7 + } + ], + "outputs": [ + { + "name": "indices", + "type": 9 + }, + { + "name": "values", + "type": 7 + }, + { + "name": "shape", + "type": 9 + } + ], + "summary": "Split elements of `source` based on `sep` into a `SparseTensor`." + } + }, + { + "name": "StringStrip", + "schema": { + "inputs": [ + { + "description": "A string `Tensor` of any shape.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "A string `Tensor` of the same shape as the input.\n\nExamples:\n\n>>> tf.strings.strip([\"\\nTensorFlow\", \" The python library \"]).numpy()\narray([b'TensorFlow', b'The python library'], dtype=object)", + "name": "output", + "type": 7 + } + ], + "summary": "Strip leading and trailing whitespaces from the Tensor." + } + }, + { + "name": "StringToHashBucket", + "schema": { + "attributes": [ + { + "description": "The number of buckets.", + "minimum": 1, + "name": "num_buckets", + "type": "int64" + } + ], + "description": "The hash function is deterministic on the content of the string within the\nprocess.\n\nNote that the hash function may change from time to time.\nThis functionality will be deprecated and it's recommended to use\n`tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.", + "inputs": [ + { + "name": "string_tensor", + "type": 7 + } + ], + "outputs": [ + { + "description": "A Tensor of the same shape as the input `string_tensor`.", + "name": "output", + "type": 9 + } + ], + "summary": "Converts each string in the input Tensor to its hash mod by a number of buckets." + } + }, + { + "name": "StringToHashBucketFast", + "schema": { + "attributes": [ + { + "description": "The number of buckets.", + "minimum": 1, + "name": "num_buckets", + "type": "int64" + } + ], + "description": "The hash function is deterministic on the content of the string within the\nprocess and will never change. However, it is not suitable for cryptography.\nThis function may be used when CPU time is scarce and inputs are trusted or\nunimportant. There is a risk of adversaries constructing inputs that all hash\nto the same bucket. To prevent this problem, use a strong hash function with\n`tf.string_to_hash_bucket_strong`.\n\nExamples:\n\n>>> tf.strings.to_hash_bucket_fast([\"Hello\", \"TensorFlow\", \"2.x\"], 3).numpy()\narray([0, 2, 2])", + "inputs": [ + { + "description": "The strings to assign a hash bucket.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "A Tensor of the same shape as the input `string_tensor`.", + "name": "output", + "type": 9 + } + ], + "summary": "Converts each string in the input Tensor to its hash mod by a number of buckets." + } + }, + { + "name": "StringToHashBucketStrong", + "schema": { + "attributes": [ + { + "description": "The number of buckets.", + "minimum": 1, + "name": "num_buckets", + "type": "int64" + }, + { + "description": "The key used to seed the hash function, passed as a list of two uint64\nelements.", + "name": "key", + "type": "int64[]" + } + ], + "description": "The hash function is deterministic on the content of the string within the\nprocess. The hash function is a keyed hash function, where attribute `key`\ndefines the key of the hash function. `key` is an array of 2 elements.\n\nA strong hash is important when inputs may be malicious, e.g. URLs with\nadditional components. Adversaries could try to make their inputs hash to the\nsame bucket for a denial-of-service attack or to skew the results. A strong\nhash can be used to make it difficult to find inputs with a skewed hash value\ndistribution over buckets. This requires that the hash function is\nseeded by a high-entropy (random) \"key\" unknown to the adversary.\n\nThe additional robustness comes at a cost of roughly 4x higher compute\ntime than `tf.string_to_hash_bucket_fast`.\n\nExamples:\n\n>>> tf.strings.to_hash_bucket_strong([\"Hello\", \"TF\"], 3, [1, 2]).numpy()\narray([2, 0])", + "inputs": [ + { + "description": "The strings to assign a hash bucket.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "A Tensor of the same shape as the input `string_tensor`.", + "name": "output", + "type": 9 + } + ], + "summary": "Converts each string in the input Tensor to its hash mod by a number of buckets." + } + }, + { + "name": "StringToNumber", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "The numeric type to interpret each string in `string_tensor` as. Must be one of the following: `float32`, `float64`, `int32`, `int64`.", + "name": "out_type", + "type": "type" + } + ], + "description": "(Note that int32 overflow results in an error while float overflow\nresults in a rounded value.)\n\nExample:\n\n>>> strings = [\"5.0\", \"3.0\", \"7.0\"]\n>>> tf.strings.to_number(strings)\n\n", + "inputs": [ + { + "name": "string_tensor", + "type": 7 + } + ], + "outputs": [ + { + "description": "A Tensor of the same shape as the input `string_tensor`.", + "name": "output", + "typeAttr": "out_type" + } + ], + "summary": "Converts each string in the input Tensor to the specified numeric type." + } + }, + { + "name": "StringUpper", + "schema": { + "attributes": [ + { + "default": "", + "name": "encoding", + "type": "string" + } + ], + "description": "Example:\n\n>>> tf.strings.upper(\"CamelCase string and ALL CAPS\")\n\n", + "inputs": [ + { + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "name": "output", + "type": 7 + } + ], + "summary": "Converts all lowercase characters into their respective uppercase replacements." + } + }, + { + "name": "Sub", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `uint32`.", + "name": "T", + "type": "type" + } + ], + "description": "*NOTE*: `Sub` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns x - y element-wise." + } + }, + { + "name": "Substr", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + }, + { + "default": "BYTE", + "description": "The unit that is used to create the substring. One of: `\"BYTE\"` (for\ndefining position and length by bytes) or `\"UTF8_CHAR\"` (for the UTF-8\nencoded Unicode code points). The default is `\"BYTE\"`. Results are undefined if\n`unit=UTF8_CHAR` and the `input` strings do not contain structurally valid\nUTF-8. Must be one of the following: `BYTE`, `UTF8_CHAR`.", + "name": "unit", + "type": "string" + } + ], + "description": "For each string in the input `Tensor`, creates a substring starting at index\n`pos` with a total length of `len`.\n\nIf `len` defines a substring that would extend beyond the length of the input\nstring, or if `len` is negative, then as many characters as possible are used.\n\nA negative `pos` indicates distance within the string backwards from the end.\n\nIf `pos` specifies an index which is out of range for any of the input strings,\nthen an `InvalidArgumentError` is thrown.\n\n`pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on\nOp creation.\n\n*NOTE*: `Substr` supports broadcasting up to two dimensions. More about\nbroadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n\n---\n\nExamples\n\nUsing scalar `pos` and `len`:\n\n```python\ninput = [b'Hello', b'World']\nposition = 1\nlength = 3\n\noutput = [b'ell', b'orl']\n```\n\nUsing `pos` and `len` with same shape as `input`:\n\n```python\ninput = [[b'ten', b'eleven', b'twelve'],\n [b'thirteen', b'fourteen', b'fifteen'],\n [b'sixteen', b'seventeen', b'eighteen']]\nposition = [[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]\nlength = [[2, 3, 4],\n [4, 3, 2],\n [5, 5, 5]]\n\noutput = [[b'en', b'eve', b'lve'],\n [b'hirt', b'urt', b'te'],\n [b'ixtee', b'vente', b'hteen']]\n```\n\nBroadcasting `pos` and `len` onto `input`:\n\n```\ninput = [[b'ten', b'eleven', b'twelve'],\n [b'thirteen', b'fourteen', b'fifteen'],\n [b'sixteen', b'seventeen', b'eighteen'],\n [b'nineteen', b'twenty', b'twentyone']]\nposition = [1, 2, 3]\nlength = [1, 2, 3]\n\noutput = [[b'e', b'ev', b'lve'],\n [b'h', b'ur', b'tee'],\n [b'i', b've', b'hte'],\n [b'i', b'en', b'nty']]\n```\n\nBroadcasting `input` onto `pos` and `len`:\n\n```\ninput = b'thirteen'\nposition = [1, 5, 7]\nlength = [3, 2, 1]\n\noutput = [b'hir', b'ee', b'n']\n```\n\nRaises:\n\n * `ValueError`: If the first argument cannot be converted to a\n Tensor of `dtype string`.\n * `InvalidArgumentError`: If indices are out of range.\n * `ValueError`: If `pos` and `len` are not the same shape.\n", + "inputs": [ + { + "description": "Tensor of strings", + "name": "input", + "type": 7 + }, + { + "description": "Scalar defining the position of first character in each substring", + "name": "pos", + "typeAttr": "T" + }, + { + "description": "Scalar defining the number of characters to include in each substring", + "name": "len", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Tensor of substrings", + "name": "output", + "type": 7 + } + ], + "summary": "Return substrings from `Tensor` of strings." + } + }, + { + "name": "Sum", + "schema": { + "attributes": [ + { + "default": false, + "description": "If true, retain reduced dimensions with length 1.", + "name": "keep_dims", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "Reduces `input` along the dimensions given in `reduction_indices`. Unless\n`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n`reduction_indices`. If `keep_dims` is true, the reduced dimensions are\nretained with length 1.", + "inputs": [ + { + "description": "The tensor to reduce.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "The dimensions to reduce. Must be in the range\n`[-rank(input), rank(input))`.", + "name": "reduction_indices", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "The reduced tensor.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the sum of elements across dimensions of a tensor." + } + }, + { + "name": "SummaryWriter", + "schema": { + "attributes": [ + { + "default": "", + "name": "shared_name", + "type": "string" + }, + { + "default": "", + "name": "container", + "type": "string" + } + ], + "outputs": [ + { + "name": "writer", + "type": 20 + } + ] + } + }, + { + "name": "Svd", + "schema": { + "attributes": [ + { + "default": true, + "description": "If true, left and right singular vectors will be\ncomputed and returned in `u` and `v`, respectively.\nIf false, `u` and `v` are not set and should never referenced.", + "name": "compute_uv", + "type": "boolean" + }, + { + "default": false, + "description": "If true, compute full-sized `u` and `v`. If false\n(the default), compute only the leading `P` singular vectors.\nIgnored if `compute_uv` is `False`.", + "name": "full_matrices", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`, `float16`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Computes the SVD of each inner matrix in `input` such that\n`input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`\n\n```python\n# a is a tensor containing a batch of matrices.\n# s is a tensor of singular values for each matrix.\n# u is the tensor containing the left singular vectors for each matrix.\n# v is the tensor containing the right singular vectors for each matrix.\ns, u, v = svd(a)\ns, _, _ = svd(a, compute_uv=False)\n```", + "inputs": [ + { + "description": "A tensor of shape `[..., M, N]` whose inner-most 2 dimensions\nform matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Singular values. Shape is `[..., P]`.", + "name": "s", + "typeAttr": "T" + }, + { + "description": "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., M, P]`; if `full_matrices` is `True` then shape is\n`[..., M, M]`. Undefined if `compute_uv` is `False`.", + "name": "u", + "typeAttr": "T" + }, + { + "description": "Left singular vectors. If `full_matrices` is `False` then shape is\n`[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.\nUndefined if `compute_uv` is false.", + "name": "v", + "typeAttr": "T" + } + ], + "summary": "Computes the singular value decompositions of one or more matrices." + } + }, + { + "name": "Switch", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,\nthe data goes to `output_false`.\n\nSee also `RefSwitch` and `Merge`.", + "inputs": [ + { + "description": "The tensor to be forwarded to the appropriate output.", + "name": "data", + "typeAttr": "T" + }, + { + "description": "A scalar that specifies which output port will receive data.", + "name": "pred", + "type": 10 + } + ], + "outputs": [ + { + "description": "If `pred` is false, data will be forwarded to this output.", + "name": "output_false", + "typeAttr": "T" + }, + { + "description": "If `pred` is true, data will be forwarded to this output.", + "name": "output_true", + "typeAttr": "T" + } + ], + "summary": "Forwards `data` to the output port determined by `pred`." + } + }, + { + "name": "SymbolicGradient", + "schema": { + "attributes": [ + { + "description": "the type list for the input list.", + "minimum": 1, + "name": "Tin", + "type": "type[]" + }, + { + "description": "the type list for the input list.", + "minimum": 1, + "name": "Tout", + "type": "type[]" + }, + { + "description": "The function we want to compute the gradient for.\n\nThe function 'f' must be a numerical function which takes N inputs and\nproduces M outputs. Its gradient function 'g', which is computed by\nthis SymbolicGradient op is a function taking N + M inputs and\nproduces N outputs.\n\nI.e. if we have\n (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),\nthen, g is\n (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,\n dL/dy1, dL/dy2, ..., dL/dy_M),\n\nwhere L is a scalar-value function of (x1, x2, ..., xN) (e.g., the\nloss function). dL/dx_i is the partial derivative of L with respect\nto x_i.\n\n(Needs some math expert to say the comment above better.)", + "name": "f", + "type": "function" + } + ], + "inputs": [ + { + "description": "a list of input tensors of size N + M;", + "name": "input", + "typeListAttr": "Tin" + } + ], + "outputs": [ + { + "description": "a list of output tensors of size N;", + "name": "output", + "typeListAttr": "Tout" + } + ], + "summary": "Computes the gradient function for function f via backpropagation." + } + }, + { + "name": "TFRecordDataset", + "schema": { + "inputs": [ + { + "description": "A scalar or vector containing the name(s) of the file(s) to be\nread.", + "name": "filenames", + "type": 7 + }, + { + "description": "A scalar containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\".", + "name": "compression_type", + "type": 7 + }, + { + "description": "A scalar representing the number of bytes to buffer. A value of\n0 means no buffering will be performed.", + "name": "buffer_size", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that emits the records from one or more TFRecord files." + } + }, + { + "name": "TFRecordReader", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + }, + { + "default": "", + "name": "compression_type", + "type": "string" + } + ], + "outputs": [ + { + "description": "The handle to reference the Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + } + ], + "summary": "A Reader that outputs the records from a TensorFlow Records file." + } + }, + { + "name": "TFRecordReaderV2", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + }, + { + "default": "", + "name": "compression_type", + "type": "string" + } + ], + "outputs": [ + { + "description": "The handle to reference the Reader.", + "name": "reader_handle", + "type": 20 + } + ], + "summary": "A Reader that outputs the records from a TensorFlow Records file." + } + }, + { + "name": "TPUCompilationResult", + "schema": { + "description": "This operation returns the result of a TPU compilation as a serialized\nCompilationResultProto, which holds a status and an error message if an error\noccurred during compilation.", + "outputs": [ + { + "name": "output", + "type": 7 + } + ], + "summary": "Returns the result of a TPU compilation." + } + }, + { + "name": "TPUEmbeddingActivations", + "schema": { + "attributes": [ + { + "description": "The id of the table in the embedding layer configuration from which\nthese activations were computed.", + "minimum": 0, + "name": "table_id", + "type": "int64" + }, + { + "description": "Identifier of the set of embedding indices which produced these\nactivations.", + "minimum": 0, + "name": "lookup_id", + "type": "int64" + } + ], + "description": "This op simply returns its first input, which is assumed to have been sliced\nfrom the Tensors returned by TPUEmbeddingDequeueActivations. The presence of\nthis op, and its first argument being a trainable Variable, enables automatic\ndifferentiation of graphs containing embeddings via the TPU Embedding Python\nlibraries.", + "inputs": [ + { + "description": "A trainable variable, enabling optimizers to find this op.", + "name": "embedding_variable", + "type": 1 + }, + { + "description": "The embedding activations Tensor to return.", + "name": "sliced_activations", + "type": 1 + } + ], + "outputs": [ + { + "name": "output", + "type": 1 + } + ], + "summary": "An op enabling differentiation of TPU Embeddings." + } + }, + { + "name": "TPUOrdinalSelector", + "schema": { + "description": "This Op produces a set of TPU cores (for warm-up) or a single TPU core\n(for regular inference) to execute the TPU program on. The output is\nconsumed by TPUPartitionedCall.", + "outputs": [ + { + "description": "A vector 1 or more TPU cores.", + "name": "device_ordinals", + "type": 3 + } + ], + "summary": "A TPU core selector Op." + } + }, + { + "name": "TPUPartitionedCall", + "schema": { + "attributes": [ + { + "description": "The types of the arguments to the function.", + "minimum": 0, + "name": "Tin", + "type": "type[]" + }, + { + "description": "The types of the outputs of the function.", + "minimum": 0, + "name": "Tout", + "type": "type[]" + }, + { + "description": "The function to call.", + "name": "f", + "type": "function" + }, + { + "default": 0, + "name": "autotuner_thresh", + "type": "int64" + } + ], + "inputs": [ + { + "description": "The arguments to the function.", + "name": "args", + "typeListAttr": "Tin" + }, + { + "description": "The TPU device ordinal to run the function on.", + "name": "device_ordinal", + "type": 3 + } + ], + "outputs": [ + { + "description": "The output of the function call.", + "name": "output", + "typeListAttr": "Tout" + } + ], + "summary": "Calls a function placed on a specified TPU device." + } + }, + { + "name": "TPUReplicateMetadata", + "schema": { + "attributes": [ + { + "description": "Number of replicas of the computation", + "minimum": 0, + "name": "num_replicas", + "type": "int64" + }, + { + "default": 1, + "description": "Number of cores per replica. Used for model parallelism.", + "name": "num_cores_per_replica", + "type": "int64" + }, + { + "default": "", + "description": "TopologyProto indicating the topology of the TPU pod slice.", + "name": "topology", + "type": "string" + }, + { + "default": true, + "description": "Whether to place the computation on the TPU.", + "name": "use_tpu", + "type": "boolean" + }, + { + "default": [], + "description": "The assignment of devices for the computation.", + "name": "device_assignment", + "type": "int64[]" + }, + { + "default": [], + "description": "DEPRECATED. Use num_cores_per_replica instead.", + "name": "computation_shape", + "type": "int64[]" + }, + { + "default": [], + "name": "host_compute_core", + "type": "string[]" + }, + { + "default": [], + "name": "padding_map", + "type": "string[]" + }, + { + "default": "STEP_MARK_AT_ENTRY", + "name": "step_marker_location", + "type": "string" + }, + { + "default": false, + "name": "allow_soft_placement", + "type": "boolean" + } + ], + "description": "This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph.", + "summary": "Metadata indicating how the TPU computation should be replicated." + } + }, + { + "name": "TPUReplicatedInput", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "N", + "type": "int64" + }, + { + "name": "T", + "type": "type" + }, + { + "default": false, + "name": "is_mirrored_variable", + "type": "boolean" + }, + { + "default": -1, + "name": "index", + "type": "int64" + } + ], + "description": "This operation holds a replicated input to a `tpu.replicate()` computation subgraph.\nEach replicated input has the same shape and type alongside the output.\n\nFor example:\n```\n%a = \"tf.opA\"()\n%b = \"tf.opB\"()\n%replicated_input = \"tf.TPUReplicatedInput\"(%a, %b)\n%computation = \"tf.Computation\"(%replicated_input)\n```\nThe above computation has a replicated input of two replicas.", + "inputs": [ + { + "name": "inputs", + "numberAttr": "N", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Connects N inputs to an N-way replicated TPU computation." + } + }, + { + "name": "TPUReplicatedOutput", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "num_replicas", + "type": "int64" + }, + { + "name": "T", + "type": "type" + } + ], + "description": "This operation holds a replicated output from a `tpu.replicate()` computation subgraph.\nEach replicated output has the same shape and type alongside the input.\n\nFor example:\n```\n%computation = \"tf.Computation\"()\n%replicated_output:2 = \"tf.TPUReplicatedOutput\"(%computation)\n```\nThe above computation has a replicated output of two replicas.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "outputs", + "numberAttr": "num_replicas", + "typeAttr": "T" + } + ], + "summary": "Connects N outputs from an N-way replicated TPU computation." + } + }, + { + "name": "TakeDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A scalar representing the number of elements from the `input_dataset`\nthat should be taken. A value of `-1` indicates that all of `input_dataset`\nis taken.", + "name": "count", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that contains `count` elements from the `input_dataset`." + } + }, + { + "name": "TakeManySparseFromTensorsMap", + "schema": { + "attributes": [ + { + "description": "The `dtype` of the `SparseTensor` objects stored in the\n`SparseTensorsMap`.", + "name": "dtype", + "type": "type" + }, + { + "default": "", + "description": "The container name for the `SparseTensorsMap` read by this op.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "The shared name for the `SparseTensorsMap` read by this op.\nIt should not be blank; rather the `shared_name` or unique Operation name\nof the Op that created the original `SparseTensorsMap` should be used.", + "name": "shared_name", + "type": "string" + } + ], + "description": "The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where\n`N` is the minibatch size and the rows correspond to the output handles of\n`AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the\noriginal `SparseTensor` objects that went into the given input ops must all\nmatch. When the final `SparseTensor` is created, it has rank one\nhigher than the ranks of the incoming `SparseTensor` objects\n(they have been concatenated along a new row dimension on the left).\n\nThe output `SparseTensor` object's shape values for all dimensions but the\nfirst are the max across the input `SparseTensor` objects' shape values\nfor the corresponding dimensions. Its first shape value is `N`, the minibatch\nsize.\n\nThe input `SparseTensor` objects' indices are assumed ordered in\nstandard lexicographic order. If this is not the case, after this\nstep run `SparseReorder` to restore index ordering.\n\nFor example, if the handles represent an input, which is a `[2, 3]` matrix\nrepresenting two original `SparseTensor` objects:\n\n```\n index = [ 0]\n [10]\n [20]\n values = [1, 2, 3]\n shape = [50]\n```\n\nand\n\n```\n index = [ 2]\n [10]\n values = [4, 5]\n shape = [30]\n```\n\nthen the final `SparseTensor` will be:\n\n```\n index = [0 0]\n [0 10]\n [0 20]\n [1 2]\n [1 10]\n values = [1, 2, 3, 4, 5]\n shape = [2 50]\n```", + "inputs": [ + { + "description": "1-D, The `N` serialized `SparseTensor` objects.\nShape: `[N]`.", + "name": "sparse_handles", + "type": 9 + } + ], + "outputs": [ + { + "description": "2-D. The `indices` of the minibatch `SparseTensor`.", + "name": "sparse_indices", + "type": 9 + }, + { + "description": "1-D. The `values` of the minibatch `SparseTensor`.", + "name": "sparse_values", + "typeAttr": "dtype" + }, + { + "description": "1-D. The `shape` of the minibatch `SparseTensor`.", + "name": "sparse_shape", + "type": 9 + } + ], + "summary": "Read `SparseTensors` from a `SparseTensorsMap` and concatenate them." + } + }, + { + "name": "TakeWhileDataset", + "schema": { + "attributes": [ + { + "description": "A function returning a scalar boolean.", + "name": "predicate", + "type": "function" + }, + { + "minimum": 0, + "name": "Targuments", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "description": "The `predicate` function must return a scalar boolean and accept the\nfollowing arguments:\n\n* One tensor for each component of an element of `input_dataset`.\n* One tensor for each value in `other_arguments`.", + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A list of tensors, typically values that were captured when\nbuilding a closure for `predicate`.", + "name": "other_arguments", + "typeListAttr": "Targuments" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that stops iteration when predicate` is false." + } + }, + { + "name": "Tan", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": " Given an input tensor, this function computes tangent of every\n element in the tensor. Input range is `(-inf, inf)` and\n output range is `(-inf, inf)`. If input lies outside the boundary, `nan`\n is returned.\n\n ```python\n x = tf.constant([-float(\"inf\"), -9, -0.5, 1, 1.2, 200, 10000, float(\"inf\")])\n tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan]\n ```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes tan of x element-wise." + } + }, + { + "name": "Tanh", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": " Given an input tensor, this function computes hyperbolic tangent of every\n element in the tensor. Input range is `[-inf, inf]` and\n output range is `[-1,1]`.\n\n ```python\n x = tf.constant([-float(\"inf\"), -5, -0.5, 1, 1.2, 2, 3, float(\"inf\")])\n tf.math.tanh(x) ==> [-1. -0.99990916 -0.46211717 0.7615942 0.8336547 0.9640276 0.9950547 1.]\n ```", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Computes hyperbolic tangent of `x` element-wise." + } + }, + { + "name": "TanhGrad", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`\nis the corresponding input gradient.", + "inputs": [ + { + "name": "y", + "typeAttr": "T" + }, + { + "name": "dy", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Computes the gradient for the tanh of `x` wrt its input." + } + }, + { + "name": "TemporaryVariable", + "schema": { + "attributes": [ + { + "description": "The shape of the variable tensor.", + "name": "shape", + "type": "shape" + }, + { + "description": "The type of elements in the variable tensor.", + "name": "dtype", + "type": "type" + }, + { + "default": "", + "description": "Overrides the name used for the temporary variable resource. Default\nvalue is the name of the 'TemporaryVariable' op (which is guaranteed unique).", + "name": "var_name", + "type": "string" + } + ], + "description": "This is an experimental op for internal use only and it is possible to use this\nop in unsafe ways. DO NOT USE unless you fully understand the risks.\n\nIt is the caller's responsibility to ensure that 'ref' is eventually passed to a\nmatching 'DestroyTemporaryVariable' op after all other uses have completed.\n\nOutputs a ref to the tensor state so it may be read or modified.\n\n E.g.\n var = state_ops._temporary_variable([1, 2], types.float_)\n var_name = var.op.name\n var = state_ops.assign(var, [[4.0, 5.0]])\n var = state_ops.assign_add(var, [[6.0, 7.0]])\n final = state_ops._destroy_temporary_variable(var, var_name=var_name)", + "outputs": [ + { + "description": "A reference to the variable tensor.", + "isRef": true, + "name": "ref", + "typeAttr": "dtype" + } + ], + "summary": "Returns a tensor that may be mutated, but only persists within a single step." + } + }, + { + "name": "TensorArray", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "default": false, + "name": "dynamic_size", + "type": "boolean" + }, + { + "default": true, + "name": "clear_after_read", + "type": "boolean" + }, + { + "default": "", + "name": "tensor_array_name", + "type": "string" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "name": "element_shape", + "type": "shape" + } + ], + "inputs": [ + { + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + } + ] + } + }, + { + "name": "TensorArrayClose", + "schema": { + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + } + ] + } + }, + { + "name": "TensorArrayCloseV2", + "schema": { + "inputs": [ + { + "name": "handle", + "type": 7 + } + ], + "summary": "Deprecated. Use TensorArrayCloseV3" + } + }, + { + "name": "TensorArrayCloseV3", + "schema": { + "description": "This enables the user to close and release the resource in the middle\nof a step/run.", + "inputs": [ + { + "description": "The handle to a TensorArray (output of TensorArray or TensorArrayGrad).", + "name": "handle", + "type": 20 + } + ], + "summary": "Delete the TensorArray from its resource container." + } + }, + { + "name": "TensorArrayConcat", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "name": "element_shape_except0", + "type": "shape" + } + ], + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + }, + { + "name": "lengths", + "type": 9 + } + ] + } + }, + { + "name": "TensorArrayConcatV2", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "name": "element_shape_except0", + "type": "shape" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + }, + { + "name": "lengths", + "type": 9 + } + ], + "summary": "Deprecated. Use TensorArrayConcatV3" + } + }, + { + "name": "TensorArrayConcatV3", + "schema": { + "attributes": [ + { + "description": "The type of the elem that is returned.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "description": "The expected shape of an element, if known,\nexcluding the first dimension. Used to validate the shapes of\nTensorArray elements. If this shape is not fully specified, concatenating\nzero-size TensorArrays is an error.", + "name": "element_shape_except0", + "type": "shape" + } + ], + "description": "Takes `T` elements of shapes\n\n ```\n (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)\n ```\n\nand concatenates them into a Tensor of shape:\n\n ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```\n\nAll elements must have the same shape (excepting the first dimension).", + "inputs": [ + { + "description": "The handle to a TensorArray.", + "name": "handle", + "type": 20 + }, + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "description": "All of the elements in the TensorArray, concatenated along the first\naxis.", + "name": "value", + "typeAttr": "dtype" + }, + { + "description": "A vector of the row sizes of the original T elements in the\nvalue output. In the example above, this would be the values:\n`(n1, n2, ..., n(T-1))`.", + "name": "lengths", + "type": 9 + } + ], + "summary": "Concat the elements from the TensorArray into value `value`." + } + }, + { + "name": "TensorArrayGather", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "name": "element_shape", + "type": "shape" + } + ], + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ] + } + }, + { + "name": "TensorArrayGatherV2", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "name": "element_shape", + "type": "shape" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ], + "summary": "Deprecated. Use TensorArrayGatherV3" + } + }, + { + "name": "TensorArrayGatherV3", + "schema": { + "attributes": [ + { + "description": "The type of the elem that is returned.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "description": "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error.", + "name": "element_shape", + "type": "shape" + } + ], + "description": "All elements selected by `indices` must have the same shape.", + "inputs": [ + { + "description": "The handle to a TensorArray.", + "name": "handle", + "type": 20 + }, + { + "description": "The locations in the TensorArray from which to read tensor elements.", + "name": "indices", + "type": 3 + }, + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "description": "All of the elements in the TensorArray, concatenated along a new\naxis (the new dimension 0).", + "name": "value", + "typeAttr": "dtype" + } + ], + "summary": "Gather specific elements from the TensorArray into output `value`." + } + }, + { + "name": "TensorArrayGrad", + "schema": { + "attributes": [ + { + "name": "source", + "type": "string" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "isRef": true, + "name": "grad_handle", + "type": 7 + } + ] + } + }, + { + "name": "TensorArrayGradV2", + "schema": { + "attributes": [ + { + "name": "source", + "type": "string" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "grad_handle", + "type": 7 + } + ], + "summary": "Deprecated. Use TensorArrayGradV3" + } + }, + { + "name": "TensorArrayGradV3", + "schema": { + "attributes": [ + { + "description": "The gradient source string, used to decide which gradient TensorArray\nto return.", + "name": "source", + "type": "string" + } + ], + "description": "If the given TensorArray gradient already exists, returns a reference to it.\n\nLocks the size of the original TensorArray by disabling its dynamic size flag.\n\n**A note about the input flow_in:**\n\nThe handle flow_in forces the execution of the gradient lookup to occur\nonly after certain other operations have occurred. For example, when\nthe forward TensorArray is dynamically sized, writes to this TensorArray\nmay resize the object. The gradient TensorArray is statically sized based\non the size of the forward TensorArray when this operation executes.\nFurthermore, the size of the forward TensorArray is frozen by this call.\nAs a result, the flow is used to ensure that the call to generate the gradient\nTensorArray only happens after all writes are executed.\n\nIn the case of dynamically sized TensorArrays, gradient computation should\nonly be performed on read operations that have themselves been chained via\nflow to occur only after all writes have executed. That way the final size\nof the forward TensorArray is known when this operation is called.\n\n**A note about the source attribute:**\n\nTensorArray gradient calls use an accumulator TensorArray object. If\nmultiple gradients are calculated and run in the same session, the multiple\ngradient nodes may accidentally flow through the same accumulator TensorArray.\nThis double counts and generally breaks the TensorArray gradient flow.\n\nThe solution is to identify which gradient call this particular\nTensorArray gradient is being called in. This is performed by identifying\na unique string (e.g. \"gradients\", \"gradients_1\", ...) from the input\ngradient Tensor's name. This string is used as a suffix when creating\nthe TensorArray gradient object here (the attribute `source`).\n\nThe attribute `source` is added as a suffix to the forward TensorArray's\nname when performing the creation / lookup, so that each separate gradient\ncalculation gets its own TensorArray accumulator.", + "inputs": [ + { + "description": "The handle to the forward TensorArray.", + "name": "handle", + "type": 20 + }, + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "grad_handle", + "type": 20 + }, + { + "name": "flow_out", + "type": 1 + } + ], + "summary": "Creates a TensorArray for storing the gradients of values in the given handle." + } + }, + { + "name": "TensorArrayGradWithShape", + "schema": { + "attributes": [ + { + "description": "The gradient source string, used to decide which gradient TensorArray\nto return.", + "name": "source", + "type": "string" + } + ], + "description": "Similar to TensorArrayGradV3. However it creates an accumulator with an\nexpanded shape compared to the input TensorArray whose gradient is being\ncomputed. This enables multiple gradients for the same TensorArray to be\ncalculated using the same accumulator.", + "inputs": [ + { + "description": "The handle to the forward TensorArray.", + "name": "handle", + "type": 20 + }, + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_in", + "type": 1 + }, + { + "description": "An int32 vector representing a shape. Elements in the gradient accumulator will\nhave shape which is this shape_to_prepend value concatenated with shape of the\nelements in the TensorArray corresponding to the input handle.", + "name": "shape_to_prepend", + "type": 3 + } + ], + "outputs": [ + { + "name": "grad_handle", + "type": 20 + }, + { + "name": "flow_out", + "type": 1 + } + ], + "summary": "Creates a TensorArray for storing multiple gradients of values in the given handle." + } + }, + { + "name": "TensorArrayPack", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "name": "element_shape", + "type": "shape" + } + ], + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ] + } + }, + { + "name": "TensorArrayRead", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + } + ], + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "name": "index", + "type": 3 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ] + } + }, + { + "name": "TensorArrayReadV2", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "index", + "type": 3 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "value", + "typeAttr": "dtype" + } + ], + "summary": "Deprecated. Use TensorArrayReadV3" + } + }, + { + "name": "TensorArrayReadV3", + "schema": { + "attributes": [ + { + "description": "The type of the elem that is returned.", + "name": "dtype", + "type": "type" + } + ], + "inputs": [ + { + "description": "The handle to a TensorArray.", + "name": "handle", + "type": 20 + }, + { + "name": "index", + "type": 3 + }, + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "description": "The tensor that is read from the TensorArray.", + "name": "value", + "typeAttr": "dtype" + } + ], + "summary": "Read an element from the TensorArray into output `value`." + } + }, + { + "name": "TensorArrayScatter", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ] + } + }, + { + "name": "TensorArrayScatterV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ], + "summary": "Deprecated. Use TensorArrayScatterV3" + } + }, + { + "name": "TensorArrayScatterV3", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "`indices` must be a vector, its length must match the first dim of `value`.", + "inputs": [ + { + "description": "The handle to a TensorArray.", + "name": "handle", + "type": 20 + }, + { + "description": "The locations at which to write the tensor elements.", + "name": "indices", + "type": 3 + }, + { + "description": "The concatenated tensor to write to the TensorArray.", + "name": "value", + "typeAttr": "T" + }, + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_out", + "type": 1 + } + ], + "summary": "Scatter the data from the input value into specific TensorArray elements." + } + }, + { + "name": "TensorArraySize", + "schema": { + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ] + } + }, + { + "name": "TensorArraySizeV2", + "schema": { + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "size", + "type": 3 + } + ], + "summary": "Deprecated. Use TensorArraySizeV3" + } + }, + { + "name": "TensorArraySizeV3", + "schema": { + "inputs": [ + { + "description": "The handle to a TensorArray (output of TensorArray or TensorArrayGrad).", + "name": "handle", + "type": 20 + }, + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "description": "The current size of the TensorArray.", + "name": "size", + "type": 3 + } + ], + "summary": "Get the current size of the TensorArray." + } + }, + { + "name": "TensorArraySplit", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "lengths", + "type": 9 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ] + } + }, + { + "name": "TensorArraySplitV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "lengths", + "type": 9 + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ], + "summary": "Deprecated. Use TensorArraySplitV3" + } + }, + { + "name": "TensorArraySplitV3", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Assuming that `lengths` takes on values\n\n ```(n0, n1, ..., n(T-1))```\n\nand that `value` has shape\n\n ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,\n\nthis splits values into a TensorArray with T tensors.\n\nTensorArray index t will be the subtensor of values with starting position\n\n ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```\n\nand having size\n\n ```nt x d0 x d1 x ...```", + "inputs": [ + { + "description": "The handle to a TensorArray.", + "name": "handle", + "type": 20 + }, + { + "description": "The concatenated tensor to write to the TensorArray.", + "name": "value", + "typeAttr": "T" + }, + { + "description": "The vector of lengths, how to split the rows of value into the\nTensorArray.", + "name": "lengths", + "type": 9 + }, + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_out", + "type": 1 + } + ], + "summary": "Split the data from the input value into TensorArray elements." + } + }, + { + "name": "TensorArrayUnpack", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ] + } + }, + { + "name": "TensorArrayV2", + "schema": { + "attributes": [ + { + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "name": "element_shape", + "type": "shape" + }, + { + "default": false, + "name": "dynamic_size", + "type": "boolean" + }, + { + "default": true, + "name": "clear_after_read", + "type": "boolean" + }, + { + "default": "", + "name": "tensor_array_name", + "type": "string" + } + ], + "inputs": [ + { + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "name": "handle", + "type": 7 + } + ], + "summary": "Deprecated. Use TensorArrayV3" + } + }, + { + "name": "TensorArrayV3", + "schema": { + "attributes": [ + { + "description": "The type of the elements on the tensor_array.", + "name": "dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "description": "The expected shape of an element, if known. Used to\nvalidate the shapes of TensorArray elements. If this shape is not\nfully specified, gathering zero-size TensorArrays is an error.", + "name": "element_shape", + "type": "shape" + }, + { + "default": false, + "description": "A boolean that determines whether writes to the TensorArray\nare allowed to grow the size. By default, this is not allowed.", + "name": "dynamic_size", + "type": "boolean" + }, + { + "default": true, + "description": "If true (default), Tensors in the TensorArray are cleared\nafter being read. This disables multiple read semantics but allows early\nrelease of memory.", + "name": "clear_after_read", + "type": "boolean" + }, + { + "default": false, + "description": "If true (default is false), then all\nelements in the TensorArray will be expected to have have identical shapes.\nThis allows certain behaviors, like dynamically checking for\nconsistent shapes on write, and being able to fill in properly\nshaped zero tensors on stack -- even if the element_shape attribute\nis not fully defined.", + "name": "identical_element_shapes", + "type": "boolean" + }, + { + "default": "", + "description": "Overrides the name used for the temporary tensor_array\nresource. Default value is the name of the 'TensorArray' op (which\nis guaranteed unique).", + "name": "tensor_array_name", + "type": "string" + } + ], + "description": "Write data via Write and read via Read or Pack.", + "inputs": [ + { + "description": "The size of the array.", + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "description": "The handle to the TensorArray.", + "name": "handle", + "type": 20 + }, + { + "description": "A scalar used to control gradient flow.", + "name": "flow", + "type": 1 + } + ], + "summary": "An array of Tensors of given size." + } + }, + { + "name": "TensorArrayWrite", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "isRef": true, + "name": "handle", + "type": 7 + }, + { + "name": "index", + "type": 3 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ] + } + }, + { + "name": "TensorArrayWriteV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "handle", + "type": 7 + }, + { + "name": "index", + "type": 3 + }, + { + "name": "value", + "typeAttr": "T" + }, + { + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "name": "flow_out", + "type": 1 + } + ], + "summary": "Deprecated. Use TensorArrayGradV3" + } + }, + { + "name": "TensorArrayWriteV3", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "The handle to a TensorArray.", + "name": "handle", + "type": 20 + }, + { + "description": "The position to write to inside the TensorArray.", + "name": "index", + "type": 3 + }, + { + "description": "The tensor to write to the TensorArray.", + "name": "value", + "typeAttr": "T" + }, + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_in", + "type": 1 + } + ], + "outputs": [ + { + "description": "A float scalar that enforces proper chaining of operations.", + "name": "flow_out", + "type": 1 + } + ], + "summary": "Push an element onto the tensor_array." + } + }, + { + "name": "TensorDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "Toutput_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "components", + "typeListAttr": "Toutput_types" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that emits `components` as a tuple of tensors once." + } + }, + { + "name": "TensorForestCreateTreeVariable", + "schema": { + "inputs": [ + { + "description": "Handle to the tree resource to be created.", + "name": "tree_handle", + "type": 20 + }, + { + "description": "Serialized proto string of the boosted_trees.Tree.", + "name": "tree_config", + "type": 7 + } + ], + "summary": "Creates a tree resource and returns a handle to it." + } + }, + { + "name": "TensorForestTreeDeserialize", + "schema": { + "inputs": [ + { + "description": "Handle to the tree resource to be restored.", + "name": "tree_handle", + "type": 20 + }, + { + "description": "Serialied proto string of the boosted_trees.Tree proto.", + "name": "tree_config", + "type": 7 + } + ], + "summary": "Deserializes a proto into the tree handle" + } + }, + { + "name": "TensorForestTreeIsInitializedOp", + "schema": { + "inputs": [ + { + "description": "Handle to the tree.", + "name": "tree_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "Whether the tree is initialized.", + "name": "is_initialized", + "type": 10 + } + ], + "summary": "Checks whether a tree has been initialized." + } + }, + { + "name": "TensorForestTreePredict", + "schema": { + "attributes": [ + { + "description": "Scalar, dimension of the logits.", + "name": "logits_dimension", + "type": "int64" + } + ], + "inputs": [ + { + "description": "Handle to the tree resource.", + "name": "tree_handle", + "type": 20 + }, + { + "description": "Rank 2 dense features tensor.", + "name": "dense_features", + "type": 1 + } + ], + "outputs": [ + { + "description": "The logits predictions from the tree for each instance in the batch.", + "name": "logits", + "type": 1 + } + ], + "summary": "Output the logits for the given input data" + } + }, + { + "name": "TensorForestTreeResourceHandleOp", + "schema": { + "attributes": [ + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "name": "resource", + "type": 20 + } + ], + "summary": "Creates a handle to a TensorForestTreeResource" + } + }, + { + "name": "TensorForestTreeSerialize", + "schema": { + "inputs": [ + { + "description": "Handle to the tree resource to be serialized.", + "name": "tree_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "Serialied proto string of the tree resource.", + "name": "tree_config", + "type": 7 + } + ], + "summary": "Serializes the tree handle to a proto" + } + }, + { + "name": "TensorForestTreeSize", + "schema": { + "inputs": [ + { + "description": "Handle to the tree resource.", + "name": "tree_handle", + "type": 20 + } + ], + "outputs": [ + { + "description": "The size of the tree.", + "name": "tree_size", + "type": 3 + } + ], + "summary": "Get the number of nodes in a tree" + } + }, + { + "name": "TensorListConcat", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "default": { + "type": "shape", + "value": "?" + }, + "name": "element_shape", + "type": "shape" + } + ], + "description": "Requires that all tensors have the same shape except the first dimension.\n\ninput_handle: The input list.\ntensor: The concated result.\nlengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.\n", + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "lengths", + "type": 9 + } + ], + "summary": "Concats all tensors in the list along the 0th dimension." + } + }, + { + "name": "TensorListConcatLists", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_a", + "type": 21 + }, + { + "name": "input_b", + "type": 21 + } + ], + "outputs": [ + { + "name": "output", + "type": 21 + } + ] + } + }, + { + "name": "TensorListConcatV2", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "shape_type", + "type": "type" + } + ], + "description": "Requires that all tensors have the same shape except the first dimension.\n\ninput_handle: The input list.\nelement_shape: The shape of the uninitialized elements in the list. If the first\n dimension is not -1, it is assumed that all list elements have the same\n leading dim.\nleading_dims: The list of leading dims of uninitialized list elements. Used if\n the leading dim of input_handle.element_shape or the element_shape input arg\n is not already set.\ntensor: The concated result.\nlengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.\n", + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "element_shape", + "typeAttr": "shape_type" + }, + { + "name": "leading_dims", + "type": 9 + } + ], + "outputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "lengths", + "type": 9 + } + ], + "summary": "Concats all tensors in the list along the 0th dimension." + } + }, + { + "name": "TensorListElementShape", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "shape_type", + "type": "type" + } + ], + "description": " input_handle: the list\n element_shape: the shape of elements of the list", + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "element_shape", + "typeAttr": "shape_type" + } + ], + "summary": "The shape of the elements of the given list, as a tensor." + } + }, + { + "name": "TensorListFromTensor", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "shape_type", + "type": "type" + } + ], + "description": "Each tensor in the result list corresponds to one row of the input tensor.\n\ntensor: The input tensor.\noutput_handle: The list.", + "inputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "element_shape", + "typeAttr": "shape_type" + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ], + "summary": "Creates a TensorList which, when stacked, has the value of `tensor`." + } + }, + { + "name": "TensorListGather", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "description": "Each row in the produced Tensor corresponds to the element in the TensorList\nspecified by the given index (see `tf.gather`).\n\ninput_handle: The input tensor list.\nindices: The indices used to index into the list.\nvalues: The tensor.", + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "element_shape", + "type": 3 + } + ], + "outputs": [ + { + "name": "values", + "typeAttr": "element_dtype" + } + ], + "summary": "Creates a Tensor by indexing into the TensorList." + } + }, + { + "name": "TensorListGetItem", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "index", + "type": 3 + }, + { + "name": "element_shape", + "type": 3 + } + ], + "outputs": [ + { + "name": "item", + "typeAttr": "element_dtype" + } + ] + } + }, + { + "name": "TensorListLength", + "schema": { + "description": "input_handle: the input list\nlength: the number of tensors in the list", + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "length", + "type": 3 + } + ], + "summary": "Returns the number of tensors in the input tensor list." + } + }, + { + "name": "TensorListPopBack", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "description": "Fails if the list is empty.\n\ninput_handle: the input list\ntensor: the withdrawn last element of the list\nelement_dtype: the type of elements in the list\nelement_shape: the shape of the output tensor", + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "element_shape", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + }, + { + "name": "tensor", + "typeAttr": "element_dtype" + } + ], + "summary": "Returns the last element of the input list as well as a list with all but that element." + } + }, + { + "name": "TensorListPushBack", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "description": "tensor: The tensor to put on the list.\ninput_handle: The old list.\noutput_handle: A list with the elements of the old list followed by tensor.\nelement_dtype: the type of elements in the list.\nelement_shape: a shape compatible with that of elements in the list.", + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "tensor", + "typeAttr": "element_dtype" + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ], + "summary": "Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`." + } + }, + { + "name": "TensorListPushBackBatch", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "inputs": [ + { + "name": "input_handles", + "type": 21 + }, + { + "name": "tensor", + "typeAttr": "element_dtype" + } + ], + "outputs": [ + { + "name": "output_handles", + "type": 21 + } + ] + } + }, + { + "name": "TensorListReserve", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "shape_type", + "type": "type" + } + ], + "description": "element_shape: the shape of the future elements of the list\nnum_elements: the number of elements to reserve\nhandle: the output list\nelement_dtype: the desired type of elements in the list.", + "inputs": [ + { + "name": "element_shape", + "typeAttr": "shape_type" + }, + { + "name": "num_elements", + "type": 3 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "List of the given size with empty elements." + } + }, + { + "name": "TensorListResize", + "schema": { + "description": "\ninput_handle: the input list\nsize: size of the output list\n", + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "size", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ], + "summary": "Resizes the list." + } + }, + { + "name": "TensorListScatter", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "shape_type", + "type": "type" + } + ], + "description": "Each member of the TensorList corresponds to one row of the input tensor,\nspecified by the given index (see `tf.gather`).\n\ntensor: The input tensor.\nindices: The indices used to index into the list.\nelement_shape: The shape of the elements in the list (can be less specified than\n the shape of the tensor).\noutput_handle: The TensorList.", + "inputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "element_shape", + "typeAttr": "shape_type" + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ], + "summary": "Creates a TensorList by indexing into a Tensor." + } + }, + { + "name": "TensorListScatterIntoExistingList", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "description": "Each member of the TensorList corresponds to one row of the input tensor,\nspecified by the given index (see `tf.gather`).\n\ninput_handle: The list to scatter into.\ntensor: The input tensor.\nindices: The indices used to index into the list.\noutput_handle: The TensorList.", + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "indices", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ], + "summary": "Scatters tensor at indices in an input list." + } + }, + { + "name": "TensorListScatterV2", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "shape_type", + "type": "type" + } + ], + "description": "Each member of the TensorList corresponds to one row of the input tensor,\nspecified by the given index (see `tf.gather`).\n\ntensor: The input tensor.\nindices: The indices used to index into the list.\nelement_shape: The shape of the elements in the list (can be less specified than\n the shape of the tensor).\nnum_elements: The size of the output list. Must be large enough to accommodate\n the largest index in indices. If -1, the list is just large enough to include\n the largest index in indices.\noutput_handle: The TensorList.", + "inputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "indices", + "type": 3 + }, + { + "name": "element_shape", + "typeAttr": "shape_type" + }, + { + "name": "num_elements", + "type": 3 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ], + "summary": "Creates a TensorList by indexing into a Tensor." + } + }, + { + "name": "TensorListSetItem", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + } + ], + "description": "input_handle: the list\nindex: the position in the list to which the tensor will be assigned\nitem: the element to be assigned to that position\noutput_handle: the new list, with the element in the proper position\n", + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "index", + "type": 3 + }, + { + "name": "item", + "typeAttr": "element_dtype" + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ], + "summary": "Sets the index-th position of the list to contain the given tensor." + } + }, + { + "name": "TensorListSplit", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "shape_type", + "type": "type" + } + ], + "description": "list[i] corresponds to lengths[i] tensors from the input tensor.\nThe tensor must have rank at least 1 and contain exactly sum(lengths) elements.\n\ntensor: The input tensor.\nelement_shape: A shape compatible with that of elements in the tensor.\nlengths: Vector of sizes of the 0th dimension of tensors in the list.\noutput_handle: The list.", + "inputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + }, + { + "name": "element_shape", + "typeAttr": "shape_type" + }, + { + "name": "lengths", + "type": 9 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ], + "summary": "Splits a tensor into a list." + } + }, + { + "name": "TensorListStack", + "schema": { + "attributes": [ + { + "name": "element_dtype", + "type": "type" + }, + { + "default": -1, + "name": "num_elements", + "type": "int64" + } + ], + "description": "Requires that all tensors have the same shape.\n\ninput_handle: the input list\ntensor: the gathered result\nnum_elements: optional. If not -1, the number of elements in the list.\n", + "inputs": [ + { + "name": "input_handle", + "type": 21 + }, + { + "name": "element_shape", + "type": 3 + } + ], + "outputs": [ + { + "name": "tensor", + "typeAttr": "element_dtype" + } + ], + "summary": "Stacks all tensors in the list." + } + }, + { + "name": "TensorScatterAdd", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "This operation creates a new tensor by adding sparse `updates` to the passed\nin `tensor`.\nThis operation is very similar to `tf.scatter_nd_add`, except that the updates\nare added onto an existing tensor (as opposed to a variable). If the memory\nfor the existing tensor cannot be re-used, a copy is made and updated.\n\n`indices` is an integer tensor containing indices into a new tensor of shape\n`tensor.shape`. The last dimension of `indices` can be at most the rank of\n`tensor.shape`:\n\n indices.shape[-1] <= tensor.shape.rank\n\nThe last dimension of `indices` corresponds to indices into elements\n(if `indices.shape[-1] = tensor.shape.rank`) or slices\n(if `indices.shape[-1] < tensor.shape.rank`) along dimension\n`indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape\n\n indices.shape[:-1] + tensor.shape[indices.shape[-1]:]\n\nThe simplest form of tensor_scatter_add is to add individual elements to a\ntensor by index. For example, say we want to add 4 elements in a rank-1\ntensor with 8 elements.\n\nIn Python, this scatter add operation would look like this:\n\n```python\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n tensor = tf.ones([8], dtype=tf.int32)\n updated = tf.tensor_scatter_nd_add(tensor, indices, updates)\n print(updated)\n```\n\nThe resulting tensor would look like this:\n\n [1, 12, 1, 11, 10, 1, 1, 13]\n\nWe can also, insert entire slices of a higher rank tensor all at once. For\nexample, if we wanted to insert two slices in the first dimension of a\nrank-3 tensor with two matrices of new values.\n\nIn Python, this scatter add operation would look like this:\n\n```python\n indices = tf.constant([[0], [2]])\n updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]],\n [[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]]])\n tensor = tf.ones([4, 4, 4],dtype=tf.int32)\n updated = tf.tensor_scatter_nd_add(tensor, indices, updates)\n print(updated)\n```\n\nThe resulting tensor would look like this:\n\n [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],\n [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],\n [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],\n [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]\n\nNote that on CPU, if an out of bound index is found, an error is returned.\nOn GPU, if an out of bound index is found, the index is ignored.", + "inputs": [ + { + "description": "Tensor to copy/update.", + "name": "tensor", + "typeAttr": "T" + }, + { + "description": "Index tensor.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Updates to scatter into output.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A new tensor copied from tensor and updates added according to the indices.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Adds sparse `updates` to an existing tensor according to `indices`." + } + }, + { + "name": "TensorScatterSub", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "This operation creates a new tensor by subtracting sparse `updates` from the\npassed in `tensor`.\nThis operation is very similar to `tf.scatter_nd_sub`, except that the updates\nare subtracted from an existing tensor (as opposed to a variable). If the memory\nfor the existing tensor cannot be re-used, a copy is made and updated.\n\n`indices` is an integer tensor containing indices into a new tensor of shape\n`shape`. The last dimension of `indices` can be at most the rank of `shape`:\n\n indices.shape[-1] <= shape.rank\n\nThe last dimension of `indices` corresponds to indices into elements\n(if `indices.shape[-1] = shape.rank`) or slices\n(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of\n`shape`. `updates` is a tensor with shape\n\n indices.shape[:-1] + shape[indices.shape[-1]:]\n\nThe simplest form of tensor_scatter_sub is to subtract individual elements\nfrom a tensor by index. For example, say we want to insert 4 scattered elements\nin a rank-1 tensor with 8 elements.\n\nIn Python, this scatter subtract operation would look like this:\n\n```python\n indices = tf.constant([[4], [3], [1], [7]])\n updates = tf.constant([9, 10, 11, 12])\n tensor = tf.ones([8], dtype=tf.int32)\n updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)\n print(updated)\n```\n\nThe resulting tensor would look like this:\n\n [1, -10, 1, -9, -8, 1, 1, -11]\n\nWe can also, insert entire slices of a higher rank tensor all at once. For\nexample, if we wanted to insert two slices in the first dimension of a\nrank-3 tensor with two matrices of new values.\n\nIn Python, this scatter add operation would look like this:\n\n```python\n indices = tf.constant([[0], [2]])\n updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]],\n [[5, 5, 5, 5], [6, 6, 6, 6],\n [7, 7, 7, 7], [8, 8, 8, 8]]])\n tensor = tf.ones([4, 4, 4],dtype=tf.int32)\n updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)\n print(updated)\n```\n\nThe resulting tensor would look like this:\n\n [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],\n [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],\n [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],\n [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]\n\nNote that on CPU, if an out of bound index is found, an error is returned.\nOn GPU, if an out of bound index is found, the index is ignored.", + "inputs": [ + { + "description": "Tensor to copy/update.", + "name": "tensor", + "typeAttr": "T" + }, + { + "description": "Index tensor.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Updates to scatter into output.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A new tensor copied from tensor and updates subtracted according to the indices.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Subtracts sparse `updates` from an existing tensor according to `indices`." + } + }, + { + "name": "TensorScatterUpdate", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + } + ], + "description": "This operation creates a new tensor by applying sparse `updates` to the passed\nin `tensor`.\nThis operation is very similar to `tf.scatter_nd`, except that the updates are\nscattered onto an existing tensor (as opposed to a zero-tensor). If the memory\nfor the existing tensor cannot be re-used, a copy is made and updated.\n\nIf `indices` contains duplicates, then their updates are accumulated (summed).\n\n**WARNING**: The order in which updates are applied is nondeterministic, so the\noutput will be nondeterministic if `indices` contains duplicates -- because\nof some numerical approximation issues, numbers summed in different order\nmay yield different results.\n\n`indices` is an integer tensor containing indices into a new tensor of shape\n`shape`. The last dimension of `indices` can be at most the rank of `shape`:\n\n indices.shape[-1] <= shape.rank\n\nThe last dimension of `indices` corresponds to indices into elements\n(if `indices.shape[-1] = shape.rank`) or slices\n(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of\n`shape`. `updates` is a tensor with shape\n\n indices.shape[:-1] + shape[indices.shape[-1]:]\n\nThe simplest form of scatter is to insert individual elements in a tensor by\nindex. For example, say we want to insert 4 scattered elements in a rank-1\ntensor with 8 elements.\n\n
    \n\n
    \n\nIn Python, this scatter operation would look like this:\n\n >>> indices = tf.constant([[4], [3], [1], [7]])\n >>> updates = tf.constant([9, 10, 11, 12])\n >>> tensor = tf.ones([8], dtype=tf.int32)\n >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates))\n tf.Tensor([ 1 11 1 10 9 1 1 12], shape=(8,), dtype=int32)\n\nWe can also, insert entire slices of a higher rank tensor all at once. For\nexample, if we wanted to insert two slices in the first dimension of a\nrank-3 tensor with two matrices of new values.\n\nIn Python, this scatter operation would look like this:\n\n >>> indices = tf.constant([[0], [2]])\n >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],\n ... [7, 7, 7, 7], [8, 8, 8, 8]],\n ... [[5, 5, 5, 5], [6, 6, 6, 6],\n ... [7, 7, 7, 7], [8, 8, 8, 8]]])\n >>> tensor = tf.ones([4, 4, 4], dtype=tf.int32)\n >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates).numpy())\n [[[5 5 5 5]\n [6 6 6 6]\n [7 7 7 7]\n [8 8 8 8]]\n [[1 1 1 1]\n [1 1 1 1]\n [1 1 1 1]\n [1 1 1 1]]\n [[5 5 5 5]\n [6 6 6 6]\n [7 7 7 7]\n [8 8 8 8]]\n [[1 1 1 1]\n [1 1 1 1]\n [1 1 1 1]\n [1 1 1 1]]]\n\nNote that on CPU, if an out of bound index is found, an error is returned.\nOn GPU, if an out of bound index is found, the index is ignored.", + "inputs": [ + { + "description": "Tensor to copy/update.", + "name": "tensor", + "typeAttr": "T" + }, + { + "description": "Index tensor.", + "name": "indices", + "typeAttr": "Tindices" + }, + { + "description": "Updates to scatter into output.", + "name": "updates", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A new tensor with the given shape and updates applied according\nto the indices.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Scatter `updates` into an existing tensor according to `indices`." + } + }, + { + "name": "TensorSliceDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "Toutput_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "components", + "typeListAttr": "Toutput_types" + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that emits each dim-0 slice of `components` once." + } + }, + { + "name": "TensorStridedSliceUpdate", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Index", + "type": "type" + }, + { + "default": 0, + "name": "begin_mask", + "type": "int64" + }, + { + "default": 0, + "name": "end_mask", + "type": "int64" + }, + { + "default": 0, + "name": "ellipsis_mask", + "type": "int64" + }, + { + "default": 0, + "name": "new_axis_mask", + "type": "int64" + }, + { + "default": 0, + "name": "shrink_axis_mask", + "type": "int64" + } + ], + "description": "The values of `value` are assigned to the positions in the tensor `input` that\nare selected by the slice parameters. The slice parameters `begin` `end`\n`strides` etc. work exactly as in `StridedSlice`.\n\nNOTE this op currently does not support broadcasting and so `value`'s shape\nmust be exactly the shape produced by the slice of `input`.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "begin", + "typeAttr": "Index" + }, + { + "name": "end", + "typeAttr": "Index" + }, + { + "name": "strides", + "typeAttr": "Index" + }, + { + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Assign `value` to the sliced l-value reference of `input`." + } + }, + { + "name": "TensorSummary", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": "", + "description": "A json-encoded SummaryDescription proto.", + "name": "description", + "type": "string" + }, + { + "default": [], + "description": "An unused list of strings.", + "name": "labels", + "type": "string[]" + }, + { + "default": "", + "description": "An unused string.", + "name": "display_name", + "type": "string" + } + ], + "description": "This op is being phased out in favor of TensorSummaryV2, which lets callers pass\na tag as well as a serialized SummaryMetadata proto string that contains\nplugin-specific data. We will keep this op to maintain backwards compatibility.", + "inputs": [ + { + "description": "A tensor to serialize.", + "name": "tensor", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "summary", + "type": 7 + } + ], + "summary": "Outputs a `Summary` protocol buffer with a tensor." + } + }, + { + "name": "TensorSummaryV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "A string attached to this summary. Used for organization in TensorBoard.", + "name": "tag", + "type": 7 + }, + { + "description": "A tensor to serialize.", + "name": "tensor", + "typeAttr": "T" + }, + { + "description": "A serialized SummaryMetadata proto. Contains plugin\ndata.", + "name": "serialized_summary_metadata", + "type": 7 + } + ], + "outputs": [ + { + "name": "summary", + "type": 7 + } + ], + "summary": "Outputs a `Summary` protocol buffer with a tensor and per-plugin data." + } + }, + { + "name": "TextLineDataset", + "schema": { + "inputs": [ + { + "description": "A scalar or a vector containing the name(s) of the file(s) to be\nread.", + "name": "filenames", + "type": 7 + }, + { + "description": "A scalar containing either (i) the empty string (no\ncompression), (ii) \"ZLIB\", or (iii) \"GZIP\".", + "name": "compression_type", + "type": 7 + }, + { + "description": "A scalar containing the number of bytes to buffer.", + "name": "buffer_size", + "type": 9 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that emits the lines of one or more text files." + } + }, + { + "name": "TextLineReader", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Number of lines to skip from the beginning of every file.", + "name": "skip_header_lines", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "description": "The handle to reference the Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + } + ], + "summary": "A Reader that outputs the lines of a file delimited by '\\n'." + } + }, + { + "name": "TextLineReaderV2", + "schema": { + "attributes": [ + { + "default": 0, + "description": "Number of lines to skip from the beginning of every file.", + "name": "skip_header_lines", + "type": "int64" + }, + { + "default": "", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "description": "The handle to reference the Reader.", + "name": "reader_handle", + "type": 20 + } + ], + "summary": "A Reader that outputs the lines of a file delimited by '\\n'." + } + }, + { + "name": "ThreadPoolDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "A resource produced by the ThreadPoolHandle op.", + "name": "thread_pool", + "type": 20 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`." + } + }, + { + "name": "ThreadPoolHandle", + "schema": { + "attributes": [ + { + "description": "The number of threads in the thread pool.", + "name": "num_threads", + "type": "int64" + }, + { + "default": 1, + "description": "The maximum degree of parallelism to use within operations that execute on this\nthreadpool.", + "name": "max_intra_op_parallelism", + "type": "int64" + }, + { + "description": "A human-readable name for the threads that may be visible in some\nvisualizations.\nthreadpool.", + "name": "display_name", + "type": "string" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "outputs": [ + { + "description": "A resource that can be consumed by one or more ExperimentalThreadPoolDataset\nops.", + "name": "handle", + "type": 20 + } + ], + "summary": "Creates a dataset that uses a custom thread pool to compute `input_dataset`." + } + }, + { + "name": "ThreadUnsafeUnigramCandidateSampler", + "schema": { + "attributes": [ + { + "description": "Number of true labels per context.", + "minimum": 1, + "name": "num_true", + "type": "int64" + }, + { + "description": "Number of candidates to randomly sample.", + "minimum": 1, + "name": "num_sampled", + "type": "int64" + }, + { + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities.", + "name": "unique", + "type": "boolean" + }, + { + "description": "The sampler will sample integers from the interval [0, range_max).", + "minimum": 1, + "name": "range_max", + "type": "int64" + }, + { + "default": 0, + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "An second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + } + ], + "description": "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "inputs": [ + { + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "name": "true_classes", + "type": 9 + } + ], + "outputs": [ + { + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "name": "sampled_candidates", + "type": 9 + }, + { + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "name": "true_expected_count", + "type": 1 + }, + { + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "name": "sampled_expected_count", + "type": 1 + } + ], + "summary": "Generates labels for candidate sampling with a learned unigram distribution." + } + }, + { + "name": "Tile", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tmultiples", + "type": "type" + } + ], + "description": "This operation creates a new tensor by replicating `input` `multiples` times.\nThe output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,\nand the values of `input` are replicated `multiples[i]` times along the 'i'th\ndimension. For example, tiling `[a b c d]` by `[2]` produces\n`[a b c d a b c d]`.\n\n>>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32)\n>>> b = tf.constant([1,2], tf.int32)\n>>> tf.tile(a, b)\n\n>>> c = tf.constant([2,1], tf.int32)\n>>> tf.tile(a, c)\n\n>>> d = tf.constant([2,2], tf.int32)\n>>> tf.tile(a, d)\n", + "inputs": [ + { + "description": "1-D or higher.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "1-D. Length must be the same as the number of dimensions in `input`", + "name": "multiples", + "typeAttr": "Tmultiples" + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Constructs a tensor by tiling a given tensor." + } + }, + { + "name": "TileGrad", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Since `Tile` takes an input and repeats the input `multiples` times\nalong each dimension, `TileGrad` takes in `multiples` and aggregates\neach repeated tile of `input` into `output`.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + }, + { + "name": "multiples", + "type": 3 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Returns the gradient of `Tile`." + } + }, + { + "name": "Timestamp", + "schema": { + "description": "Returns the timestamp as a `float64` for seconds since the Unix epoch.\n\nNote: the timestamp is computed when the op is executed, not when it is added\nto the graph.", + "outputs": [ + { + "name": "ts", + "type": 2 + } + ], + "summary": "Provides the time since epoch in seconds." + } + }, + { + "name": "ToBool", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "description": "Converts a tensor to a scalar predicate with the following rules:\n\n- For 0D tensors, truthiness is determined by comparing against a \"zero\"\n value. For numerical types it is the obvious zero. For strings it is the\n empty string.\n\n- For >0D tensors, truthiness is determined by looking at the number of\n elements. If has zero elements, then the result is false. Otherwise the\n result is true.\n\nThis matches the behavior of If and While for determining if a tensor counts\nas true/false for a branch condition.", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "output", + "type": 10 + } + ], + "summary": "Converts a tensor to a scalar predicate." + } + }, + { + "name": "TopK", + "schema": { + "attributes": [ + { + "description": "Number of top elements to look for along the last dimension (along each\nrow for matrices).", + "minimum": 0, + "name": "k", + "type": "int64" + }, + { + "default": true, + "description": "If true the resulting `k` elements will be sorted by the values in\ndescending order.", + "name": "sorted", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "If the input is a vector (rank-1), finds the `k` largest entries in the vector\nand outputs their values and indices as vectors. Thus `values[j]` is the\n`j`-th largest entry in `input`, and its index is `indices[j]`.\n\nFor matrices (resp. higher rank input), computes the top `k` entries in each\nrow (resp. vector along the last dimension). Thus,\n\n values.shape = indices.shape = input.shape[:-1] + [k]\n\nIf two elements are equal, the lower-index element appears first.\n\nIf `k` varies dynamically, use `TopKV2` below.", + "inputs": [ + { + "description": "1-D or higher with last dimension at least `k`.", + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The `k` largest elements along each last dimensional slice.", + "name": "values", + "typeAttr": "T" + }, + { + "description": "The indices of `values` within the last dimension of `input`.", + "name": "indices", + "type": 3 + } + ], + "summary": "Finds values and indices of the `k` largest elements for the last dimension." + } + }, + { + "name": "TopKV2", + "schema": { + "attributes": [ + { + "default": true, + "description": "If true the resulting `k` elements will be sorted by the values in\ndescending order.", + "name": "sorted", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "description": "If the input is a vector (rank-1), finds the `k` largest entries in the vector\nand outputs their values and indices as vectors. Thus `values[j]` is the\n`j`-th largest entry in `input`, and its index is `indices[j]`.\n\nFor matrices (resp. higher rank input), computes the top `k` entries in each\nrow (resp. vector along the last dimension). Thus,\n\n values.shape = indices.shape = input.shape[:-1] + [k]\n\nIf two elements are equal, the lower-index element appears first.", + "inputs": [ + { + "description": "1-D or higher with last dimension at least `k`.", + "name": "input", + "typeAttr": "T" + }, + { + "description": "0-D. Number of top elements to look for along the last dimension (along each\nrow for matrices).", + "name": "k", + "type": 3 + } + ], + "outputs": [ + { + "description": "The `k` largest elements along each last dimensional slice.", + "name": "values", + "typeAttr": "T" + }, + { + "description": "The indices of `values` within the last dimension of `input`.", + "name": "indices", + "type": 3 + } + ], + "summary": "Finds values and indices of the `k` largest elements for the last dimension." + } + }, + { + "name": "Transpose", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tperm", + "type": "type" + } + ], + "description": "The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:\n `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "perm", + "typeAttr": "Tperm" + } + ], + "outputs": [ + { + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Shuffle dimensions of x according to a permutation." + } + }, + { + "name": "TridiagonalMatMul", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float64`, `float32`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Calculates product of two matrices, where left matrix is a tridiagonal matrix.", + "inputs": [ + { + "description": "Tensor of shape `[..., 1, M]`, representing superdiagonals of\ntri-diagonal matrices to the left of multiplication. Last element is ignored.", + "name": "superdiag", + "typeAttr": "T" + }, + { + "description": "Tensor of shape `[..., 1, M]`, representing main diagonals of tri-diagonal\nmatrices to the left of multiplication.", + "name": "maindiag", + "typeAttr": "T" + }, + { + "description": "Tensor of shape `[..., 1, M]`, representing subdiagonals of tri-diagonal\nmatrices to the left of multiplication. First element is ignored.", + "name": "subdiag", + "typeAttr": "T" + }, + { + "description": "Tensor of shape `[..., M, N]`, representing MxN matrices to the right of\nmultiplication.", + "name": "rhs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Tensor of shape `[..., M, N]` containing the product.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Calculate product with tridiagonal matrix." + } + }, + { + "name": "TridiagonalSolve", + "schema": { + "attributes": [ + { + "default": true, + "description": "Whether to apply partial pivoting. Partial pivoting makes the procedure more\nstable, but slower.", + "name": "partial_pivoting", + "type": "boolean" + }, + { + "description": "Must be one of the following: `float64`, `float32`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": " Solves tridiagonal systems of equations.\n Supports batch dimensions and multiple right-hand sides per each left-hand\n side.\n On CPU, solution is computed via Gaussian elimination with or without partial\n pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE\n library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv\n Partial pivoting is not yet supported by XLA backends.", + "inputs": [ + { + "description": "Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the\ntridiagonal matrices with three rows being the superdiagonal, diagonals, and\nsubdiagonals, in order. The last element of the superdiagonal and the first\nelement of the subdiagonal is ignored.", + "name": "diagonals", + "typeAttr": "T" + }, + { + "description": "Tensor of shape `[..., M, K]`, representing K right-hand sides per each\nleft-hand side.", + "name": "rhs", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "Tensor of shape `[..., M, K]` containing the solutions", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Solves tridiagonal systems of equations." + } + }, + { + "name": "TruncateDiv", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `bfloat16`, `float16`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "description": "Truncation designates that negative numbers will round fractional quantities\ntoward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different\nthan Python semantics. See `FloorDiv` for a division function that matches\nPython Semantics.\n\n*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns x / y element-wise for integer types." + } + }, + { + "name": "TruncateMod", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `int32`, `int64`, `bfloat16`, `float16`, `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "the result here is consistent with a truncating divide. E.g. `truncate(x / y) *\ny + truncate_mod(x, y) = x`.\n\n*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting\n[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns element-wise remainder of division. This emulates C semantics in that" + } + }, + { + "name": "TruncatedNormal", + "schema": { + "attributes": [ + { + "default": 0, + "description": "If either `seed` or `seed2` are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "A second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + }, + { + "description": "The type of the output. Must be one of the following: `float16`, `bfloat16`, `float32`, `float64`.", + "name": "dtype", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "T", + "type": "type" + } + ], + "description": "The generated values follow a normal distribution with mean 0 and standard\ndeviation 1, except that values whose magnitude is more than 2 standard\ndeviations from the mean are dropped and re-picked.", + "inputs": [ + { + "description": "The shape of the output tensor.", + "name": "shape", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A tensor of the specified shape filled with random truncated normal\nvalues.", + "name": "output", + "typeAttr": "dtype" + } + ], + "summary": "Outputs random values from a truncated normal distribution." + } + }, + { + "name": "TryRpc", + "schema": { + "attributes": [ + { + "default": "", + "description": "RPC protocol to use. Empty string means use the default protocol.\nOptions include 'grpc'.", + "name": "protocol", + "type": "string" + }, + { + "default": true, + "description": "`boolean`. If `true` (default), then failures to connect\n(i.e., the server does not immediately respond) cause an RPC failure.", + "name": "fail_fast", + "type": "boolean" + }, + { + "default": 0, + "description": "`int`. If `0` (default), then the kernel will run the RPC\nrequest and only time out if the RPC deadline passes or the session times out.\nIf this value is greater than `0`, then the op will raise an exception if\nthe RPC takes longer than `timeout_in_ms`.", + "name": "timeout_in_ms", + "type": "int64" + } + ], + "description": "This op asynchronously performs either a single RPC request, or a batch\nof requests. RPC requests are defined by three main parameters:\n\n - `address` (the host+port or BNS address of the request)\n - `method` (the method name for the request)\n - `request` (the serialized proto string, or vector of strings,\n of the RPC request argument).\n\nFor example, if you have an RPC service running on port localhost:2345,\nand its interface is configured with the following proto declaration:\n\n```\nservice MyService {\n rpc MyMethod(MyRequestProto) returns (MyResponseProto) {\n }\n};\n```\n\nthen call this op with arguments:\n\n```\naddress = \"localhost:2345\"\nmethod = \"MyService/MyMethod\"\n```\n\nThe `request` tensor is a string tensor representing serialized `MyRequestProto`\nstrings; and the output string tensor `response` will have the same shape\nand contain (upon successful completion) corresponding serialized\n`MyResponseProto` strings.\n\nFor example, to send a single, empty, `MyRequestProto`, call\nthis op with `request = \"\"`. To send 5 **parallel** empty requests,\ncall this op with `request = [\"\", \"\", \"\", \"\", \"\"]`.\n\nMore generally, one can create a batch of `MyRequestProto` serialized protos\nfrom regular batched tensors using the `encode_proto` op, and convert\nthe response `MyResponseProto` serialized protos to batched tensors\nusing the `decode_proto` op.\n\n**NOTE** Working with serialized proto strings is faster than instantiating\nactual proto objects in memory, so no performance degradation is expected\ncompared to writing custom kernels for this workflow.\n\nUnlike the standard `Rpc` op, if the connection fails or the remote worker\nreturns an error status, this op does **not** reraise the exception.\nInstead, the `status_code` and `status_message` entry for the corresponding RPC\ncall is set with the error returned from the RPC call. The `response` tensor\nwill contain valid response values for those minibatch entries whose RPCs did\nnot fail; the rest of the entries will have empty strings.", + "inputs": [ + { + "description": "`0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server.\nIf this tensor has more than 1 element, then multiple parallel rpc requests\nare sent. This argument broadcasts with `method` and `request`.", + "name": "address", + "type": 7 + }, + { + "description": "`0-D` or `1-D`. The method address on the RPC server.\nIf this tensor has more than 1 element, then multiple parallel rpc requests\nare sent. This argument broadcasts with `address` and `request`.", + "name": "method", + "type": 7 + }, + { + "description": "`0-D` or `1-D`. Serialized proto strings: the rpc request argument.\nIf this tensor has more than 1 element, then multiple parallel rpc requests\nare sent. This argument broadcasts with `address` and `method`.", + "name": "request", + "type": 7 + } + ], + "outputs": [ + { + "description": "Same shape as `request`. Serialized proto strings: the rpc responses.", + "name": "response", + "type": 7 + }, + { + "description": "Same shape as `request`. Values correspond to tensorflow Status enum codes.", + "name": "status_code", + "type": 3 + }, + { + "description": "Same shape as `request`. Values correspond to Status messages\nreturned from the RPC calls.", + "name": "status_message", + "type": 7 + } + ], + "summary": "Perform batches of RPC requests." + } + }, + { + "name": "Unbatch", + "schema": { + "attributes": [ + { + "name": "timeout_micros", + "type": "int64" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + }, + { + "name": "T", + "type": "type" + } + ], + "description": "An instance of Unbatch either receives an empty batched_tensor, in which case it\nasynchronously waits until the values become available from a concurrently\nrunning instance of Unbatch with the same container and shared_name, or receives\na non-empty batched_tensor in which case it finalizes all other concurrently\nrunning instances and outputs its own element from the batch.\n\nbatched_tensor: The possibly transformed output of Batch. The size of the first\n dimension should remain unchanged by the transformations for the operation to\n work.\nbatch_index: The matching batch_index obtained from Batch.\nid: The id scalar emitted by Batch.\nunbatched_tensor: The Tensor corresponding to this execution.\ntimeout_micros: Maximum amount of time (in microseconds) to wait to receive the\n batched input tensor associated with a given invocation of the op.\ncontainer: Container to control resource sharing.\nshared_name: Instances of Unbatch with the same container and shared_name are\n assumed to possibly belong to the same batch. If left empty, the op name will\n be used as the shared name.", + "inputs": [ + { + "name": "batched_tensor", + "typeAttr": "T" + }, + { + "name": "batch_index", + "type": 9 + }, + { + "name": "id", + "type": 9 + } + ], + "outputs": [ + { + "name": "unbatched_tensor", + "typeAttr": "T" + } + ], + "summary": "Reverses the operation of Batch for a single output Tensor." + } + }, + { + "name": "UnbatchDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "A dataset that splits the elements of its input into multiple elements." + } + }, + { + "name": "UnbatchGrad", + "schema": { + "attributes": [ + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + }, + { + "name": "T", + "type": "type" + } + ], + "description": "Acts like Batch but using the given batch_index index of batching things as they\nbecome available. This ensures that the gradients are propagated back in the\nsame session which did the forward pass.\n\noriginal_input: The input to the Unbatch operation this is the gradient of.\nbatch_index: The batch_index given to the Unbatch operation this is the gradient\nof.\ngrad: The downstream gradient.\nid: The id scalar emitted by Batch.\nbatched_grad: The return value, either an empty tensor or the batched gradient.\ncontainer: Container to control resource sharing.\nshared_name: Instances of UnbatchGrad with the same container and shared_name\n are assumed to possibly belong to the same batch. If left empty, the op name\n will be used as the shared name.", + "inputs": [ + { + "name": "original_input", + "typeAttr": "T" + }, + { + "name": "batch_index", + "type": 9 + }, + { + "name": "grad", + "typeAttr": "T" + }, + { + "name": "id", + "type": 9 + } + ], + "outputs": [ + { + "name": "batched_grad", + "typeAttr": "T" + } + ], + "summary": "Gradient of Unbatch." + } + }, + { + "name": "UncompressElement", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "compressed", + "type": 21 + } + ], + "outputs": [ + { + "name": "components", + "typeListAttr": "output_types" + } + ], + "summary": "Uncompresses a compressed dataset element." + } + }, + { + "name": "UnicodeDecode", + "schema": { + "attributes": [ + { + "description": "Text encoding of the input strings. This is any of the encodings supported\nby ICU ucnv algorithmic converters. Examples: `\"UTF-16\", \"US ASCII\", \"UTF-8\"`.", + "name": "input_encoding", + "type": "string" + }, + { + "default": "replace", + "description": "Error handling policy when there is invalid formatting found in the input.\nThe value of 'strict' will cause the operation to produce a InvalidArgument\nerror on any invalid input formatting. A value of 'replace' (the default) will\ncause the operation to replace any invalid formatting in the input with the\n`replacement_char` codepoint. A value of 'ignore' will cause the operation to\nskip any invalid formatting in the input and produce no corresponding output\ncharacter. Must be one of the following: `strict`, `replace`, `ignore`.", + "name": "errors", + "type": "string" + }, + { + "default": 65533, + "description": "The replacement character codepoint to be used in place of any invalid\nformatting in the input when `errors='replace'`. Any valid unicode codepoint may\nbe used. The default value is the default unicode replacement character is\n0xFFFD or U+65533.)", + "name": "replacement_char", + "type": "int64" + }, + { + "default": false, + "description": "Whether to replace the C0 control characters (00-1F) with the\n`replacement_char`. Default is false.", + "name": "replace_control_characters", + "type": "boolean" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsplits", + "type": "type" + } + ], + "description": "The character codepoints for all strings are returned using a single vector\n`char_values`, with strings expanded to characters in row-major order.\n\nThe `row_splits` tensor indicates where the codepoints for\neach input string begin and end within the `char_values` tensor.\nIn particular, the values for the `i`th\nstring (in row-major order) are stored in the slice\n`[row_splits[i]:row_splits[i+1]]`. Thus:\n\n* `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th\n character in the `i`th string (in row-major order).\n* `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th\n string (in row-major order).", + "inputs": [ + { + "description": "The text to be decoded. Can have any shape. Note that the output is flattened\nto a vector of char values.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "A 1D int32 tensor containing the row splits.", + "name": "row_splits", + "typeAttr": "Tsplits" + }, + { + "description": "A 1D int32 Tensor containing the decoded codepoints.", + "name": "char_values", + "type": 3 + } + ], + "summary": "Decodes each string in `input` into a sequence of Unicode code points." + } + }, + { + "name": "UnicodeDecodeWithOffsets", + "schema": { + "attributes": [ + { + "description": "Text encoding of the input strings. This is any of the encodings supported\nby ICU ucnv algorithmic converters. Examples: `\"UTF-16\", \"US ASCII\", \"UTF-8\"`.", + "name": "input_encoding", + "type": "string" + }, + { + "default": "replace", + "description": "Error handling policy when there is invalid formatting found in the input.\nThe value of 'strict' will cause the operation to produce a InvalidArgument\nerror on any invalid input formatting. A value of 'replace' (the default) will\ncause the operation to replace any invalid formatting in the input with the\n`replacement_char` codepoint. A value of 'ignore' will cause the operation to\nskip any invalid formatting in the input and produce no corresponding output\ncharacter. Must be one of the following: `strict`, `replace`, `ignore`.", + "name": "errors", + "type": "string" + }, + { + "default": 65533, + "description": "The replacement character codepoint to be used in place of any invalid\nformatting in the input when `errors='replace'`. Any valid unicode codepoint may\nbe used. The default value is the default unicode replacement character is\n0xFFFD or U+65533.)", + "name": "replacement_char", + "type": "int64" + }, + { + "default": false, + "description": "Whether to replace the C0 control characters (00-1F) with the\n`replacement_char`. Default is false.", + "name": "replace_control_characters", + "type": "boolean" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsplits", + "type": "type" + } + ], + "description": "The character codepoints for all strings are returned using a single vector\n`char_values`, with strings expanded to characters in row-major order.\nSimilarly, the character start byte offsets are returned using a single vector\n`char_to_byte_starts`, with strings expanded in row-major order.\n\nThe `row_splits` tensor indicates where the codepoints and start offsets for\neach input string begin and end within the `char_values` and\n`char_to_byte_starts` tensors. In particular, the values for the `i`th\nstring (in row-major order) are stored in the slice\n`[row_splits[i]:row_splits[i+1]]`. Thus:\n\n* `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th\n character in the `i`th string (in row-major order).\n* `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th\n character in the `i`th string (in row-major order).\n* `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th\n string (in row-major order).", + "inputs": [ + { + "description": "The text to be decoded. Can have any shape. Note that the output is flattened\nto a vector of char values.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "A 1D int32 tensor containing the row splits.", + "name": "row_splits", + "typeAttr": "Tsplits" + }, + { + "description": "A 1D int32 Tensor containing the decoded codepoints.", + "name": "char_values", + "type": 3 + }, + { + "description": "A 1D int32 Tensor containing the byte index in the input string where each\ncharacter in `char_values` starts.", + "name": "char_to_byte_starts", + "type": 9 + } + ], + "summary": "Decodes each string in `input` into a sequence of Unicode code points." + } + }, + { + "name": "UnicodeEncode", + "schema": { + "attributes": [ + { + "default": "replace", + "description": "Error handling policy when there is invalid formatting found in the input.\nThe value of 'strict' will cause the operation to produce a InvalidArgument\nerror on any invalid input formatting. A value of 'replace' (the default) will\ncause the operation to replace any invalid formatting in the input with the\n`replacement_char` codepoint. A value of 'ignore' will cause the operation to\nskip any invalid formatting in the input and produce no corresponding output\ncharacter. Must be one of the following: `ignore`, `replace`, `strict`.", + "name": "errors", + "type": "string" + }, + { + "description": "Unicode encoding of the output strings. Valid encodings are: `\"UTF-8\",\n\"UTF-16-BE\", and \"UTF-32-BE\"`. Must be one of the following: `UTF-8`, `UTF-16-BE`, `UTF-32-BE`.", + "name": "output_encoding", + "type": "string" + }, + { + "default": 65533, + "description": "The replacement character codepoint to be used in place of any invalid\nformatting in the input when `errors='replace'`. Any valid unicode codepoint may\nbe used. The default value is the default unicode replacement character is\n0xFFFD (U+65533).", + "name": "replacement_char", + "type": "int64" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tsplits", + "type": "type" + } + ], + "description": "Returns a vector of strings, where `output[i]` is constructed by encoding the\nUnicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]`\nusing `output_encoding`.\n\n---\n\nExample:\n\n```\ninput_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100]\ninput_splits = [0, 5, 10]\noutput_encoding = 'UTF-8'\n\noutput = ['Hello', 'World']\n```", + "inputs": [ + { + "description": "A 1D tensor containing the unicode codepoints that should be encoded.", + "name": "input_values", + "type": 3 + }, + { + "description": "A 1D tensor specifying how the unicode codepoints should be split into strings.\nIn particular, `output[i]` is constructed by encoding the codepoints in the\nslice `input_values[input_splits[i]:input_splits[i+1]]`.", + "name": "input_splits", + "typeAttr": "Tsplits" + } + ], + "outputs": [ + { + "description": "The 1-D Tensor of strings encoded from the provided unicode codepoints.", + "name": "output", + "type": 7 + } + ], + "summary": "Encode a tensor of ints into unicode strings." + } + }, + { + "name": "UnicodeScript", + "schema": { + "description": "This operation converts Unicode code points to script codes corresponding to\neach code point. Script codes correspond to International Components for\nUnicode (ICU) UScriptCode values. See http://icu-project.org/apiref/icu4c/uscript_8h.html.\nReturns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will\nmatch input shape.\n\nExamples:\n\n>>> tf.strings.unicode_script([1, 31, 38])\n", + "inputs": [ + { + "description": "A Tensor of int32 Unicode code points.", + "name": "input", + "type": 3 + } + ], + "outputs": [ + { + "description": "A Tensor of int32 script codes corresponding to each input code point.", + "name": "output", + "type": 3 + } + ], + "summary": "Determine the script codes of a given tensor of Unicode integer code points." + } + }, + { + "name": "UnicodeTranscode", + "schema": { + "attributes": [ + { + "description": "Text encoding of the input strings. This is any of the encodings supported\nby ICU ucnv algorithmic converters. Examples: `\"UTF-16\", \"US ASCII\", \"UTF-8\"`.", + "name": "input_encoding", + "type": "string" + }, + { + "description": "The unicode encoding to use in the output. Must be one of\n`\"UTF-8\", \"UTF-16-BE\", \"UTF-32-BE\"`. Multi-byte encodings will be big-endian. Must be one of the following: `UTF-8`, `UTF-16-BE`, `UTF-32-BE`.", + "name": "output_encoding", + "type": "string" + }, + { + "default": "replace", + "description": "Error handling policy when there is invalid formatting found in the input.\nThe value of 'strict' will cause the operation to produce a InvalidArgument\nerror on any invalid input formatting. A value of 'replace' (the default) will\ncause the operation to replace any invalid formatting in the input with the\n`replacement_char` codepoint. A value of 'ignore' will cause the operation to\nskip any invalid formatting in the input and produce no corresponding output\ncharacter. Must be one of the following: `strict`, `replace`, `ignore`.", + "name": "errors", + "type": "string" + }, + { + "default": 65533, + "description": "The replacement character codepoint to be used in place of any invalid\nformatting in the input when `errors='replace'`. Any valid unicode codepoint may\nbe used. The default value is the default unicode replacement character is\n0xFFFD or U+65533.)\n\nNote that for UTF-8, passing a replacement character expressible in 1 byte, such\nas ' ', will preserve string alignment to the source since invalid bytes will be\nreplaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte\nreplacement character will preserve byte alignment to the source.", + "name": "replacement_char", + "type": "int64" + }, + { + "default": false, + "description": "Whether to replace the C0 control characters (00-1F) with the\n`replacement_char`. Default is false.", + "name": "replace_control_characters", + "type": "boolean" + } + ], + "description": "The input is a string tensor of any shape. The output is a string tensor of\nthe same shape containing the transcoded strings. Output strings are always\nvalid unicode. If the input contains invalid encoding positions, the\n`errors` attribute sets the policy for how to deal with them. If the default\nerror-handling policy is used, invalid formatting will be substituted in the\noutput by the `replacement_char`. If the errors policy is to `ignore`, any\ninvalid encoding positions in the input are skipped and not included in the\noutput. If it set to `strict` then any invalid formatting will result in an\nInvalidArgument error.\n\nThis operation can be used with `output_encoding = input_encoding` to enforce\ncorrect formatting for inputs even if they are already in the desired encoding.\n\nIf the input is prefixed by a Byte Order Mark needed to determine encoding\n(e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that\nBOM will be consumed and not emitted into the output. If the input encoding\nis marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is\ninterpreted as a non-breaking-space and is preserved in the output (including\nalways for UTF-8).\n\nThe end result is that if the input is marked as an explicit endianness the\ntranscoding is faithful to all codepoints in the source. If it is not marked\nwith an explicit endianness, the BOM is not considered part of the string itself\nbut as metadata, and so is not preserved in the output.\n\nExamples:\n\n>>> tf.strings.unicode_transcode([\"Hello\", \"TensorFlow\", \"2.x\"], \"UTF-8\", \"UTF-16-BE\")\n\n>>> tf.strings.unicode_transcode([\"A\", \"B\", \"C\"], \"US ASCII\", \"UTF-8\").numpy()\narray([b'A', b'B', b'C'], dtype=object)", + "inputs": [ + { + "description": "The text to be processed. Can have any shape.", + "name": "input", + "type": 7 + } + ], + "outputs": [ + { + "description": "A string tensor containing unicode text encoded using `output_encoding`.", + "name": "output", + "type": 7 + } + ], + "summary": "Transcode the input text from a source encoding to a destination encoding." + } + }, + { + "name": "UniformCandidateSampler", + "schema": { + "attributes": [ + { + "description": "Number of true labels per context.", + "minimum": 1, + "name": "num_true", + "type": "int64" + }, + { + "description": "Number of candidates to randomly sample.", + "minimum": 1, + "name": "num_sampled", + "type": "int64" + }, + { + "description": "If unique is true, we sample with rejection, so that all sampled\ncandidates in a batch are unique. This requires some approximation to\nestimate the post-rejection sampling probabilities.", + "name": "unique", + "type": "boolean" + }, + { + "description": "The sampler will sample integers from the interval [0, range_max).", + "minimum": 1, + "name": "range_max", + "type": "int64" + }, + { + "default": 0, + "description": "If either seed or seed2 are set to be non-zero, the random number\ngenerator is seeded by the given seed. Otherwise, it is seeded by a\nrandom seed.", + "name": "seed", + "type": "int64" + }, + { + "default": 0, + "description": "An second seed to avoid seed collision.", + "name": "seed2", + "type": "int64" + } + ], + "description": "See explanations of candidate sampling and the data formats at\ngo/candidate-sampling.\n\nFor each batch, this op picks a single set of sampled candidate labels.\n\nThe advantages of sampling candidates per-batch are simplicity and the\npossibility of efficient dense matrix multiplication. The disadvantage is that\nthe sampled candidates must be chosen independently of the context and of the\ntrue labels.", + "inputs": [ + { + "description": "A batch_size * num_true matrix, in which each row contains the\nIDs of the num_true target_classes in the corresponding original label.", + "name": "true_classes", + "type": 9 + } + ], + "outputs": [ + { + "description": "A vector of length num_sampled, in which each element is\nthe ID of a sampled candidate.", + "name": "sampled_candidates", + "type": 9 + }, + { + "description": "A batch_size * num_true matrix, representing\nthe number of times each candidate is expected to occur in a batch\nof sampled candidates. If unique=true, then this is a probability.", + "name": "true_expected_count", + "type": 1 + }, + { + "description": "A vector of length num_sampled, for each sampled\ncandidate representing the number of times the candidate is expected\nto occur in a batch of sampled candidates. If unique=true, then this is a\nprobability.", + "name": "sampled_expected_count", + "type": 1 + } + ], + "summary": "Generates labels for candidate sampling with a uniform distribution." + } + }, + { + "name": "Unique", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "out_idx", + "type": "type" + } + ], + "description": "This operation returns a tensor `y` containing all of the unique elements of `x`\nsorted in the same order that they occur in `x`; `x` does not need to be sorted.\nThis operation also returns a tensor `idx` the same size as `x` that contains\nthe index of each value of `x` in the unique output `y`. In other words:\n\n`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\nExamples:\n\n```\n# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\ny, idx = unique(x)\ny ==> [1, 2, 4, 7, 8]\nidx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\n```\n\n```\n# tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5]\ny, idx = unique(x)\ny ==> [4, 5, 1, 2, 3]\nidx ==> [0, 1, 2, 3, 4, 4, 0, 1]\n```", + "inputs": [ + { + "description": "1-D.", + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1-D.", + "name": "y", + "typeAttr": "T" + }, + { + "description": "1-D.", + "name": "idx", + "typeAttr": "out_idx" + } + ], + "summary": "Finds unique elements in a 1-D tensor." + } + }, + { + "name": "UniqueDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that contains the unique elements of `input_dataset`." + } + }, + { + "name": "UniqueV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Taxis", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "out_idx", + "type": "type" + } + ], + "description": "This operation either returns a tensor `y` containing unique elements\nalong the `axis` of a tensor. The returned unique elements is sorted\nin the same order as they occur along `axis` in `x`.\nThis operation also returns a tensor `idx` that is the same size as\nthe number of the elements in `x` along the `axis` dimension. It\ncontains the index in the unique output `y`.\nIn other words, for an `1-D` tensor `x` with `axis = None:\n\n`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\nFor example:\n\n```\n# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\ny, idx = unique(x)\ny ==> [1, 2, 4, 7, 8]\nidx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\n```\n\nFor an `2-D` tensor `x` with `axis = 0`:\n\n```\n# tensor 'x' is [[1, 0, 0],\n# [1, 0, 0],\n# [2, 0, 0]]\ny, idx = unique(x, axis=0)\ny ==> [[1, 0, 0],\n [2, 0, 0]]\nidx ==> [0, 0, 1]\n```\n\nFor an `2-D` tensor `x` with `axis = 1`:\n\n```\n# tensor 'x' is [[1, 0, 0],\n# [1, 0, 0],\n# [2, 0, 0]]\ny, idx = unique(x, axis=1)\ny ==> [[1, 0],\n [1, 0],\n [2, 0]]\nidx ==> [0, 1, 1]\n```", + "inputs": [ + { + "description": "A `Tensor`.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A `Tensor` of type `int32` (default: None). The axis of the Tensor to\nfind the unique elements.", + "name": "axis", + "typeAttr": "Taxis" + } + ], + "outputs": [ + { + "description": "A `Tensor`. Unique elements along the `axis` of `Tensor` x.", + "name": "y", + "typeAttr": "T" + }, + { + "description": "A 1-D Tensor. Has the same type as x that contains the index of each\nvalue of x in the output y.", + "name": "idx", + "typeAttr": "out_idx" + } + ], + "summary": "Finds unique elements along an axis of a tensor." + } + }, + { + "name": "UniqueWithCounts", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "out_idx", + "type": "type" + } + ], + "description": "This operation returns a tensor `y` containing all of the unique elements of `x`\nsorted in the same order that they occur in `x`. This operation also returns a\ntensor `idx` the same size as `x` that contains the index of each value of `x`\nin the unique output `y`. Finally, it returns a third tensor `count` that\ncontains the count of each element of `y` in `x`. In other words:\n\n`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\nFor example:\n\n```\n# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\ny, idx, count = unique_with_counts(x)\ny ==> [1, 2, 4, 7, 8]\nidx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\ncount ==> [2, 1, 3, 1, 2]\n```", + "inputs": [ + { + "description": "1-D.", + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "1-D.", + "name": "y", + "typeAttr": "T" + }, + { + "description": "1-D.", + "name": "idx", + "typeAttr": "out_idx" + }, + { + "description": "1-D.", + "name": "count", + "typeAttr": "out_idx" + } + ], + "summary": "Finds unique elements in a 1-D tensor." + } + }, + { + "name": "UniqueWithCountsV2", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 9 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Taxis", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "out_idx", + "type": "type" + } + ], + "description": "This operation either returns a tensor `y` containing unique elements\nalong the `axis` of a tensor. The returned unique elements is sorted\nin the same order as they occur along `axis` in `x`.\nThis operation also returns a tensor `idx` and a tensor `count`\nthat are the same size as the number of the elements in `x` along the\n`axis` dimension. The `idx` contains the index in the unique output `y`\nand the `count` contains the count in the unique output `y`.\nIn other words, for an `1-D` tensor `x` with `axis = None:\n\n`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`\n\nFor example:\n\n```\n# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]\ny, idx, count = unique_with_counts(x)\ny ==> [1, 2, 4, 7, 8]\nidx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]\ncount ==> [2, 1, 3, 1, 2]\n```\n\nFor an `2-D` tensor `x` with `axis = 0`:\n\n```\n# tensor 'x' is [[1, 0, 0],\n# [1, 0, 0],\n# [2, 0, 0]]\ny, idx, count = unique_with_counts(x, axis=0)\ny ==> [[1, 0, 0],\n [2, 0, 0]]\nidx ==> [0, 0, 1]\ncount ==> [2, 1]\n```\n\nFor an `2-D` tensor `x` with `axis = 1`:\n\n```\n# tensor 'x' is [[1, 0, 0],\n# [1, 0, 0],\n# [2, 0, 0]]\ny, idx, count = unique_with_counts(x, axis=1)\ny ==> [[1, 0],\n [1, 0],\n [2, 0]]\nidx ==> [0, 1, 1]\ncount ==> [1, 2]\n```", + "inputs": [ + { + "description": "A `Tensor`.", + "name": "x", + "typeAttr": "T" + }, + { + "description": "A `Tensor` of type `int32` (default: None). The axis of the Tensor to\nfind the unique elements.", + "name": "axis", + "typeAttr": "Taxis" + } + ], + "outputs": [ + { + "description": "A `Tensor`. Unique elements along the `axis` of `Tensor` x.", + "name": "y", + "typeAttr": "T" + }, + { + "description": "A 1-D Tensor. Has the same type as x that contains the index of each\nvalue of x in the output y.", + "name": "idx", + "typeAttr": "out_idx" + }, + { + "description": "A 1-D Tensor. The count of each value of x in the output y.", + "name": "count", + "typeAttr": "out_idx" + } + ], + "summary": "Finds unique elements along an axis of a tensor." + } + }, + { + "name": "Unpack", + "schema": { + "attributes": [ + { + "minimum": 0, + "name": "num", + "type": "int64" + }, + { + "name": "T", + "type": "type" + }, + { + "default": 0, + "description": "Dimension along which to unpack. Negative values wrap around, so the\nvalid range is `[-R, R)`.", + "name": "axis", + "type": "int64" + } + ], + "description": "Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.\nFor example, given a tensor of shape `(A, B, C, D)`;\n\nIf `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`\n and each tensor in `output` will have shape `(B, C, D)`. (Note that the\n dimension unpacked along is gone, unlike `split`).\n\nIf `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`\n and each tensor in `output` will have shape `(A, C, D)`.\nEtc.\n\nThis is the opposite of `pack`.", + "inputs": [ + { + "description": "1-D or higher, with `axis` dimension size equal to `num`.", + "name": "value", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "The list of tensors unpacked from `value`.", + "name": "output", + "numberAttr": "num", + "typeAttr": "T" + } + ], + "summary": "Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors." + } + }, + { + "name": "UnravelIndex", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tidx", + "type": "type" + } + ], + "description": "\nExample:\n\n```\ny = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])\n# 'dims' represent a hypothetical (3, 3) tensor of indices:\n# [[0, 1, *2*],\n# [3, 4, *5*],\n# [6, *7*, 8]]\n# For each entry from 'indices', this operation returns\n# its coordinates (marked with '*'), such as\n# 2 ==> (0, 2)\n# 5 ==> (1, 2)\n# 7 ==> (2, 1)\ny ==> [[0, 1, 2], [2, 2, 1]]\n```\n\n@compatibility(numpy)\nEquivalent to np.unravel_index\n@end_compatibility", + "inputs": [ + { + "description": "An 0-D or 1-D `int` Tensor whose elements are indices into the\nflattened version of an array of dimensions dims.", + "name": "indices", + "typeAttr": "Tidx" + }, + { + "description": "An 1-D `int` Tensor. The shape of the array to use for unraveling\nindices.", + "name": "dims", + "typeAttr": "Tidx" + } + ], + "outputs": [ + { + "description": "An 2-D (or 1-D if indices is 0-D) tensor where each row has the\nsame shape as the indices array.", + "name": "output", + "typeAttr": "Tidx" + } + ], + "summary": "Converts an array of flat indices into a tuple of coordinate arrays." + } + }, + { + "name": "UnsortedSegmentJoin", + "schema": { + "attributes": [ + { + "default": "", + "description": "The separator to use when joining.", + "name": "separator", + "type": "string" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tnumsegments", + "type": "type" + } + ], + "description": "Computes the string join along segments of a tensor.\nGiven `segment_ids` with rank `N` and `data` with rank `N+M`:\n\n `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])`\n\nwhere the join is over all [j1...jN] such that segment_ids[j1...jN] = i.\nStrings are joined in row-major order.\n\nFor example:\n\n```python\ninputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]\noutput_array = string_ops.unsorted_segment_join(inputs=inputs,\n segment_ids=[1, 0, 1],\n num_segments=2,\n separator=':'))\n# output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']]\n\n\ninputs = ['this', 'is', 'a', 'test']\noutput_array = string_ops.unsorted_segment_join(inputs=inputs,\n segment_ids=[0, 0, 0, 0],\n num_segments=1,\n separator=':'))\n# output_array ==> ['this:is:a:test']\n```", + "inputs": [ + { + "description": "The input to be joined.", + "name": "inputs", + "type": 7 + }, + { + "description": "A tensor whose shape is a prefix of data.shape. Negative segment ids are not\nsupported.", + "name": "segment_ids", + "typeAttr": "Tindices" + }, + { + "description": "A scalar.", + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "name": "output", + "type": 7 + } + ], + "summary": "Joins the elements of `inputs` based on `segment_ids`." + } + }, + { + "name": "UnsortedSegmentMax", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tnumsegments", + "type": "type" + } + ], + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nThis operator is similar to the unsorted segment sum operator found\n[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).\nInstead of computing the sum over segments, it computes the maximum such that:\n\n\\\\(output_i = \\max_{j...} data[j...]\\\\) where max is over tuples `j...` such\nthat `segment_ids[j...] == i`.\n\nIf the maximum is empty for a given segment ID `i`, it outputs the smallest\npossible value for the specific numeric type,\n`output[i] = numeric_limits::lowest()`.\n\nIf the given segment ID `i` is negative, then the corresponding value is\ndropped, and will not be included in the result.\n\n
    \n\n
    \n\nFor example:\n\n``` python\nc = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])\ntf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2)\n# ==> [[ 4, 3, 3, 4],\n# [5, 6, 7, 8]]\n```\n", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A tensor whose shape is a prefix of `data.shape`.", + "name": "segment_ids", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the maximum along segments of a tensor." + } + }, + { + "name": "UnsortedSegmentMin", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tnumsegments", + "type": "type" + } + ], + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nThis operator is similar to the unsorted segment sum operator found\n[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).\nInstead of computing the sum over segments, it computes the minimum such that:\n\n\\\\(output_i = \\min_{j...} data_[j...]\\\\) where min is over tuples `j...` such\nthat `segment_ids[j...] == i`.\n\nIf the minimum is empty for a given segment ID `i`, it outputs the largest\npossible value for the specific numeric type,\n`output[i] = numeric_limits::max()`.\n\nFor example:\n\n``` python\nc = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])\ntf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2)\n# ==> [[ 1, 2, 2, 1],\n# [5, 6, 7, 8]]\n```\n\nIf the given segment ID `i` is negative, then the corresponding value is\ndropped, and will not be included in the result.", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A tensor whose shape is a prefix of `data.shape`.", + "name": "segment_ids", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the minimum along segments of a tensor." + } + }, + { + "name": "UnsortedSegmentProd", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tnumsegments", + "type": "type" + } + ], + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nThis operator is similar to the unsorted segment sum operator found\n[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).\nInstead of computing the sum over segments, it computes the product of all\nentries belonging to a segment such that:\n\n\\\\(output_i = \\prod_{j...} data[j...]\\\\) where the product is over tuples\n`j...` such that `segment_ids[j...] == i`.\n\nFor example:\n\n``` python\nc = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])\ntf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2)\n# ==> [[ 4, 6, 6, 4],\n# [5, 6, 7, 8]]\n```\n\nIf there is no entry for a given segment ID `i`, it outputs 1.\n\nIf the given segment ID `i` is negative, then the corresponding value is\ndropped, and will not be included in the result.", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A tensor whose shape is a prefix of `data.shape`.", + "name": "segment_ids", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the product along segments of a tensor." + } + }, + { + "name": "UnsortedSegmentSum", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + }, + { + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tindices", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "Tnumsegments", + "type": "type" + } + ], + "description": "Read\n[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)\nfor an explanation of segments.\n\nComputes a tensor such that\n\\\\(output[i] = \\sum_{j...} data[j...]\\\\) where the sum is over tuples `j...` such\nthat `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`\nneed not be sorted and need not cover all values in the full\nrange of valid values.\n\nIf the sum is empty for a given segment ID `i`, `output[i] = 0`.\nIf the given segment ID `i` is negative, the value is dropped and will not be\nadded to the sum of the segment.\n\n`num_segments` should equal the number of distinct segment IDs.\n\n
    \n\n
    \n\n``` python\nc = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])\ntf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)\n# ==> [[ 5, 5, 5, 5],\n# [5, 6, 7, 8]]\n```\n", + "inputs": [ + { + "name": "data", + "typeAttr": "T" + }, + { + "description": "A tensor whose shape is a prefix of `data.shape`.", + "name": "segment_ids", + "typeAttr": "Tindices" + }, + { + "name": "num_segments", + "typeAttr": "Tnumsegments" + } + ], + "outputs": [ + { + "description": "Has same shape as data, except for the first `segment_ids.rank`\ndimensions, which are replaced with a single dimension which has size\n`num_segments`.", + "name": "output", + "typeAttr": "T" + } + ], + "summary": "Computes the sum along segments of a tensor." + } + }, + { + "name": "Unstage", + "schema": { + "attributes": [ + { + "default": 0, + "minimum": 0, + "name": "capacity", + "type": "int64" + }, + { + "default": 0, + "minimum": 0, + "name": "memory_limit", + "type": "int64" + }, + { + "minimum": 1, + "name": "dtypes", + "type": "type[]" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "description": "The basic functionality is similar to dequeue with many fewer\ncapabilities and options. This Op is optimized for performance.", + "outputs": [ + { + "name": "values", + "typeListAttr": "dtypes" + } + ], + "summary": "Op is similar to a lightweight Dequeue." + } + }, + { + "name": "UnwrapDatasetVariant", + "schema": { + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + } + }, + { + "name": "UpperBound", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + }, + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "out_type", + "type": "type" + } + ], + "description": "Each set of rows with the same index in (sorted_inputs, values) is treated\nindependently. The resulting row is the equivalent of calling\n`np.searchsorted(sorted_inputs, values, side='right')`.\n\nThe result is not a global index to the entire\n`Tensor`, but rather just the index in the last dimension.\n\nA 2-D example:\n sorted_sequence = [[0, 3, 9, 9, 10],\n [1, 2, 3, 4, 5]]\n values = [[2, 4, 9],\n [0, 2, 6]]\n\n result = UpperBound(sorted_sequence, values)\n\n result == [[1, 2, 4],\n [0, 2, 5]]", + "inputs": [ + { + "description": "2-D Tensor where each row is ordered.", + "name": "sorted_inputs", + "typeAttr": "T" + }, + { + "description": "2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains\nthe values that will be searched for in `sorted_search_values`.", + "name": "values", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "A `Tensor` with the same shape as `values`. It contains the last scalar index\ninto the last dimension where values can be inserted without changing the\nordered property.", + "name": "output", + "typeAttr": "out_type" + } + ], + "summary": "Applies upper_bound(sorted_search_values, values) along each row." + } + }, + { + "name": "VarHandleOp", + "schema": { + "attributes": [ + { + "default": "", + "description": "the container this variable is placed in.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "the name by which this variable is referred to.", + "name": "shared_name", + "type": "string" + }, + { + "description": "the type of this variable. Must agree with the dtypes\nof all ops using this variable.", + "name": "dtype", + "type": "type" + }, + { + "description": "The (possibly partially specified) shape of this variable.", + "name": "shape", + "type": "shape" + }, + { + "default": [], + "description": "The allowed devices containing the resource variable. Set when the output\nResourceHandle represents a per-replica/partitioned resource variable.", + "name": "allowed_devices", + "type": "string[]" + } + ], + "outputs": [ + { + "name": "resource", + "type": 20 + } + ], + "summary": "Creates a handle to a Variable resource." + } + }, + { + "name": "VarIsInitializedOp", + "schema": { + "inputs": [ + { + "description": "the input resource handle.", + "name": "resource", + "type": 20 + } + ], + "outputs": [ + { + "description": "a scalar boolean which is true if the variable has been\ninitialized.", + "name": "is_initialized", + "type": 10 + } + ], + "summary": "Checks whether a resource handle-based variable has been initialized." + } + }, + { + "name": "Variable", + "schema": { + "attributes": [ + { + "name": "shape", + "type": "shape" + }, + { + "name": "dtype", + "type": "type" + }, + { + "default": "", + "name": "container", + "type": "string" + }, + { + "default": "", + "name": "shared_name", + "type": "string" + } + ], + "category": "Control", + "outputs": [ + { + "isRef": true, + "name": "ref", + "typeAttr": "dtype" + } + ], + "summary": "Use VariableV2 instead." + } + }, + { + "name": "VariableShape", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 3 + }, + "description": "Must be one of the following: `int32`, `int64`.", + "name": "out_type", + "type": "type" + } + ], + "description": "This operation returns a 1-D integer tensor representing the shape of `input`.\n\nFor example:\n\n```\n# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]\nshape(t) ==> [2, 2, 3]\n```", + "inputs": [ + { + "name": "input", + "type": 20 + } + ], + "outputs": [ + { + "name": "output", + "typeAttr": "out_type" + } + ], + "summary": "Returns the shape of the variable pointed to by `resource`." + } + }, + { + "name": "VariableV2", + "schema": { + "attributes": [ + { + "description": "The shape of the variable tensor.", + "name": "shape", + "type": "shape" + }, + { + "description": "The type of elements in the variable tensor.", + "name": "dtype", + "type": "type" + }, + { + "default": "", + "description": "If non-empty, this variable is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this variable is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + } + ], + "category": "Control", + "description": "Outputs a ref to the tensor state so it may be read or modified.\nTODO(zhifengc/mrry): Adds a pointer to a more detail document\nabout sharing states in tensorflow.", + "outputs": [ + { + "description": "A reference to the variable tensor.", + "isRef": true, + "name": "ref", + "typeAttr": "dtype" + } + ], + "summary": "Holds state in the form of a tensor that persists across steps." + } + }, + { + "name": "Where", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 10 + }, + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `float16`, `uint32`, `uint64`, `bool`.", + "name": "T", + "type": "type" + } + ], + "description": "This operation returns the coordinates of true elements in `input`. The\ncoordinates are returned in a 2-D tensor where the first dimension (rows)\nrepresents the number of true elements, and the second dimension (columns)\nrepresents the coordinates of the true elements. Keep in mind, the shape of\nthe output tensor can vary depending on how many true values there are in\n`input`. Indices are output in row-major order.\n\nFor example:\n\n```\n# 'input' tensor is [[True, False]\n# [True, False]]\n# 'input' has two true values, so output has two coordinates.\n# 'input' has rank of 2, so coordinates have two indices.\nwhere(input) ==> [[0, 0],\n [1, 0]]\n\n# `input` tensor is [[[True, False]\n# [True, False]]\n# [[False, True]\n# [False, True]]\n# [[False, False]\n# [False, True]]]\n# 'input' has 5 true values, so output has 5 coordinates.\n# 'input' has rank of 3, so coordinates have three indices.\nwhere(input) ==> [[0, 0, 0],\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 1],\n [2, 1, 1]]\n\n# `input` tensor is [[[1.5, 0.0]\n# [-0.5, 0.0]]\n# [[0.0, 0.25]\n# [0.0, 0.75]]\n# [[0.0, 0.0]\n# [0.0, 0.01]]]\n# 'input' has 5 nonzero values, so output has 5 coordinates.\n# 'input' has rank of 3, so coordinates have three indices.\nwhere(input) ==> [[0, 0, 0],\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 1],\n [2, 1, 1]]\n\n# `input` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j]\n# [0.0 + 0.5j, 0.0 + 0.0j]]\n# [[0.0 + 0.0j, 0.25 + 1.5j]\n# [0.0 + 0.0j, 0.75 + 0.0j]]\n# [[0.0 + 0.0j, 0.0 + 0.0j]\n# [0.0 + 0.0j, 0.01 + 0.0j]]]\n# 'input' has 5 nonzero magnitude values, so output has 5 coordinates.\n# 'input' has rank of 3, so coordinates have three indices.\nwhere(input) ==> [[0, 0, 0],\n [0, 1, 0],\n [1, 0, 1],\n [1, 1, 1],\n [2, 1, 1]]\n```", + "inputs": [ + { + "name": "input", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "index", + "type": 9 + } + ], + "summary": "Returns locations of nonzero / true values in a tensor." + } + }, + { + "name": "While", + "schema": { + "attributes": [ + { + "description": "dtype in use.", + "minimum": 0, + "name": "T", + "type": "type[]" + }, + { + "description": " A function takes 'input' and returns a tensor. If the tensor is\n a scalar of non-boolean, the scalar is converted to a boolean\n according to the following rule: if the scalar is a numerical\n value, non-zero means True and zero means False; if the scalar is\n a string, non-empty means True and empty means False. If the\n tensor is not a scalar, non-emptiness means True and False\n otherwise.", + "name": "cond", + "type": "function" + }, + { + "description": " A function that takes a list of tensors and returns another\n list of tensors. Both lists have the same types as specified\n by T.", + "name": "body", + "type": "function" + }, + { + "default": [], + "name": "output_shapes", + "type": "shape[]" + }, + { + "default": 10, + "name": "parallel_iterations", + "type": "int64" + } + ], + "inputs": [ + { + "description": "A list of input tensors whose types are T.", + "name": "input", + "typeListAttr": "T" + } + ], + "outputs": [ + { + "description": "A list of output tensors whose types are T.", + "name": "output", + "typeListAttr": "T" + } + ], + "summary": "output = input; While (Cond(output)) { output = Body(output) }" + } + }, + { + "name": "WholeFileReader", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + } + ], + "description": "To use, enqueue filenames in a Queue. The output of ReaderRead will\nbe a filename (key) and the contents of that file (value).", + "outputs": [ + { + "description": "The handle to reference the Reader.", + "isRef": true, + "name": "reader_handle", + "type": 7 + } + ], + "summary": "A Reader that outputs the entire contents of a file as a value." + } + }, + { + "name": "WholeFileReaderV2", + "schema": { + "attributes": [ + { + "default": "", + "description": "If non-empty, this reader is placed in the given container.\nOtherwise, a default container is used.", + "name": "container", + "type": "string" + }, + { + "default": "", + "description": "If non-empty, this reader is named in the given bucket\nwith this shared_name. Otherwise, the node name is used instead.", + "name": "shared_name", + "type": "string" + } + ], + "description": "To use, enqueue filenames in a Queue. The output of ReaderRead will\nbe a filename (key) and the contents of that file (value).", + "outputs": [ + { + "description": "The handle to reference the Reader.", + "name": "reader_handle", + "type": 20 + } + ], + "summary": "A Reader that outputs the entire contents of a file as a value." + } + }, + { + "name": "WindowDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + } + ], + "inputs": [ + { + "name": "input_dataset", + "type": 21 + }, + { + "description": "An integer scalar, representing the number of elements\nof the input dataset to combine into a window. Must be positive.", + "name": "size", + "type": 9 + }, + { + "description": "An integer scalar, representing the number of input elements\nby which the window moves in each iteration. Defaults to `size`.\nMust be positive.", + "name": "shift", + "type": 9 + }, + { + "description": "An integer scalar, representing the stride of the input elements\nin the sliding window. Must be positive. The default value of 1 means\n\"retain every input element\".", + "name": "stride", + "type": 9 + }, + { + "description": "A Boolean scalar, representing whether the last window should be\ndropped if its size is smaller than `window_size`.", + "name": "drop_remainder", + "type": 10 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": " Combines (nests of) input elements into a dataset of (nests of) windows.\n\n A \"window\" is a finite dataset of flat elements of size `size` (or possibly\n fewer if there are not enough input elements to fill the window and\n `drop_remainder` evaluates to false).\n\n The `shift` argument determines the number of input elements by which\n the window moves on each iteration. The first element in the `k`th window\n will be element\n\n ```\n 1 + (k-1) * shift\n ```\n\n of the input dataset. In particular, the first element of the first window\n will always be the first element of the input dataset. \n\n If the `stride` parameter is greater than 1, then each window will skip\n `(stride - 1)` input elements between each element that appears in the\n window. Output windows will still contain `size` elements regardless of\n the value of `stride`.\n\n The `stride` argument determines the stride of the input elements, and the\n `shift` argument determines the shift of the window.\n\n For example, letting `{...}` to represent a Dataset:\n\n - `tf.data.Dataset.range(7).window(2)` produces\n `{{0, 1}, {2, 3}, {4, 5}, {6}}`\n - `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces\n `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}`\n - `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces\n `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}`\n\n Note that when the `window` transformation is applied to a dataset of\n nested elements, it produces a dataset of nested windows.\n\n For example:\n\n - `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)`\n produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}`\n - `tf.data.Dataset.from_tensor_slices({\"a\": range(4)}).window(2)`\n produces `{{\"a\": {0, 1}}, {\"a\": {2, 3}}}`" + } + }, + { + "name": "WorkerHeartbeat", + "schema": { + "description": "Heartbeats may be sent periodically to indicate the coordinator is still active,\nto retrieve the current worker status and to expedite shutdown when necessary.", + "inputs": [ + { + "description": "A string tensor containing a serialized WorkerHeartbeatRequest", + "name": "request", + "type": 7 + } + ], + "outputs": [ + { + "description": "A string tensor containing a serialized WorkerHeartbeatResponse", + "name": "response", + "type": 7 + } + ], + "summary": "Worker heartbeat op." + } + }, + { + "name": "WrapDatasetVariant", + "schema": { + "inputs": [ + { + "name": "input_handle", + "type": 21 + } + ], + "outputs": [ + { + "name": "output_handle", + "type": 21 + } + ] + } + }, + { + "name": "WriteAudioSummary", + "schema": { + "attributes": [ + { + "default": 3, + "minimum": 1, + "name": "max_outputs", + "type": "int64" + } + ], + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "tensor", + "type": 1 + }, + { + "name": "sample_rate", + "type": 1 + } + ] + } + }, + { + "name": "WriteFile", + "schema": { + "description": "creates directory if not existing.", + "inputs": [ + { + "description": "scalar. The name of the file to which we write the contents.", + "name": "filename", + "type": 7 + }, + { + "description": "scalar. The content to be written to the output file.", + "name": "contents", + "type": 7 + } + ], + "summary": "Writes contents to the file at input filename. Creates file and recursively" + } + }, + { + "name": "WriteGraphSummary", + "schema": { + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tensor", + "type": 7 + } + ] + } + }, + { + "name": "WriteHistogramSummary", + "schema": { + "attributes": [ + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "values", + "typeAttr": "T" + } + ] + } + }, + { + "name": "WriteImageSummary", + "schema": { + "attributes": [ + { + "default": 3, + "minimum": 1, + "name": "max_images", + "type": "int64" + }, + { + "default": { + "type": "type", + "value": 1 + }, + "description": "Must be one of the following: `uint8`, `float32`, `float16`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "tensor", + "typeAttr": "T" + }, + { + "name": "bad_color", + "type": 4 + } + ] + } + }, + { + "name": "WriteRawProtoSummary", + "schema": { + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tensor", + "type": 7 + } + ] + } + }, + { + "name": "WriteScalarSummary", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `float16`, `uint32`, `uint64`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "value", + "typeAttr": "T" + } + ] + } + }, + { + "name": "WriteSummary", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "writer", + "type": 20 + }, + { + "name": "step", + "type": 9 + }, + { + "name": "tensor", + "typeAttr": "T" + }, + { + "name": "tag", + "type": 7 + }, + { + "name": "summary_metadata", + "type": 7 + } + ] + } + }, + { + "name": "Xdivy", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns 0 if x == 0, and x / y otherwise, elementwise." + } + }, + { + "name": "Xlog1py", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise." + } + }, + { + "name": "Xlogy", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float16`, `float32`, `float64`, `complex64`, `complex128`.", + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "y", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Returns 0 if x == 0, and x * log(y) otherwise, elementwise." + } + }, + { + "name": "ZerosLike", + "schema": { + "attributes": [ + { + "name": "T", + "type": "type" + } + ], + "inputs": [ + { + "description": "a tensor of type T.", + "name": "x", + "typeAttr": "T" + } + ], + "outputs": [ + { + "description": "a tensor of the same shape and type as x but filled with zeros.", + "name": "y", + "typeAttr": "T" + } + ], + "summary": "Returns a tensor of zeros with the same shape and type as x." + } + }, + { + "name": "Zeta", + "schema": { + "attributes": [ + { + "description": "Must be one of the following: `float32`, `float64`.", + "name": "T", + "type": "type" + } + ], + "description": "The Hurwitz zeta function is defined as:\n\n\n\\\\(\\zeta(x, q) = \\sum_{n=0}^{\\infty} (q + n)^{-x}\\\\)", + "inputs": [ + { + "name": "x", + "typeAttr": "T" + }, + { + "name": "q", + "typeAttr": "T" + } + ], + "outputs": [ + { + "name": "z", + "typeAttr": "T" + } + ], + "summary": "Compute the Hurwitz zeta function \\\\(\\zeta(x, q)\\\\)." + } + }, + { + "name": "ZipDataset", + "schema": { + "attributes": [ + { + "minimum": 1, + "name": "output_types", + "type": "type[]" + }, + { + "minimum": 1, + "name": "output_shapes", + "type": "shape[]" + }, + { + "description": "The length of `input_datasets`", + "minimum": 1, + "name": "N", + "type": "int64" + } + ], + "description": "The elements of the resulting dataset are created by zipping corresponding\nelements from each of the input datasets.\n\nThe size of the resulting dataset will match the size of the smallest input\ndataset, and no error will be raised if input datasets have different sizes.", + "inputs": [ + { + "description": "List of `N` variant Tensors representing datasets to be zipped together.", + "name": "input_datasets", + "numberAttr": "N", + "type": 21 + } + ], + "outputs": [ + { + "name": "handle", + "type": 21 + } + ], + "summary": "Creates a dataset that zips together `input_datasets`." + } + } +] diff --git a/frontend/packages/core/public/netron/tf-proto.js b/frontend/packages/core/public/netron/tf-proto.js new file mode 100644 index 00000000..c8d5df3c --- /dev/null +++ b/frontend/packages/core/public/netron/tf-proto.js @@ -0,0 +1,6153 @@ +/*eslint-disable block-scoped-var, id-length, no-control-regex, no-magic-numbers, no-prototype-builtins, no-redeclare, no-shadow, no-var, sort-vars*/ +(function($protobuf) { + "use strict"; + + var $Reader = $protobuf.Reader, $util = $protobuf.util; + + var $root = $protobuf.roots.tf || ($protobuf.roots.tf = {}); + + $root.tensorflow = (function() { + + var tensorflow = {}; + + tensorflow.SavedModel = (function() { + + function SavedModel(properties) { + this.meta_graphs = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedModel.prototype.saved_model_schema_version = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + SavedModel.prototype.meta_graphs = $util.emptyArray; + + SavedModel.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedModel(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.saved_model_schema_version = reader.int64(); + break; + case 2: + if (!(message.meta_graphs && message.meta_graphs.length)) + message.meta_graphs = []; + message.meta_graphs.push($root.tensorflow.MetaGraphDef.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedModel.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedModel(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "saved_model_schema_version": + message.saved_model_schema_version = reader.int64(); + break; + case "meta_graphs": + if (!(message.meta_graphs && message.meta_graphs.length)) + message.meta_graphs = []; + message.meta_graphs.push($root.tensorflow.MetaGraphDef.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedModel; + })(); + + tensorflow.MetaGraphDef = (function() { + + function MetaGraphDef(properties) { + this.collection_def = {}; + this.signature_def = {}; + this.asset_file_def = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MetaGraphDef.prototype.meta_info_def = null; + MetaGraphDef.prototype.graph_def = null; + MetaGraphDef.prototype.saver_def = null; + MetaGraphDef.prototype.collection_def = $util.emptyObject; + MetaGraphDef.prototype.signature_def = $util.emptyObject; + MetaGraphDef.prototype.asset_file_def = $util.emptyArray; + MetaGraphDef.prototype.object_graph_def = null; + + MetaGraphDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.MetaGraphDef(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.meta_info_def = $root.tensorflow.MetaGraphDef.MetaInfoDef.decode(reader, reader.uint32()); + break; + case 2: + message.graph_def = $root.tensorflow.GraphDef.decode(reader, reader.uint32()); + break; + case 3: + message.saver_def = $root.tensorflow.SaverDef.decode(reader, reader.uint32()); + break; + case 4: + reader.skip().pos++; + if (message.collection_def === $util.emptyObject) + message.collection_def = {}; + key = reader.string(); + reader.pos++; + message.collection_def[key] = $root.tensorflow.CollectionDef.decode(reader, reader.uint32()); + break; + case 5: + reader.skip().pos++; + if (message.signature_def === $util.emptyObject) + message.signature_def = {}; + key = reader.string(); + reader.pos++; + message.signature_def[key] = $root.tensorflow.SignatureDef.decode(reader, reader.uint32()); + break; + case 6: + if (!(message.asset_file_def && message.asset_file_def.length)) + message.asset_file_def = []; + message.asset_file_def.push($root.tensorflow.AssetFileDef.decode(reader, reader.uint32())); + break; + case 7: + message.object_graph_def = $root.tensorflow.SavedObjectGraph.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + MetaGraphDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.MetaGraphDef(), key, value; + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "meta_info_def": + message.meta_info_def = $root.tensorflow.MetaGraphDef.MetaInfoDef.decodeText(reader, true); + break; + case "graph_def": + message.graph_def = $root.tensorflow.GraphDef.decodeText(reader, true); + break; + case "saver_def": + message.saver_def = $root.tensorflow.SaverDef.decodeText(reader, true); + break; + case "collection_def": + if (message.collection_def === $util.emptyObject) + message.collection_def = {}; + reader.start(); + key = ""; + value = null; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = $root.tensorflow.CollectionDef.decodeText(reader, true); + break; + } + message.collection_def[key] = value; + break; + case "signature_def": + if (message.signature_def === $util.emptyObject) + message.signature_def = {}; + reader.start(); + key = ""; + value = null; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = $root.tensorflow.SignatureDef.decodeText(reader, true); + break; + } + message.signature_def[key] = value; + break; + case "asset_file_def": + if (!(message.asset_file_def && message.asset_file_def.length)) + message.asset_file_def = []; + message.asset_file_def.push($root.tensorflow.AssetFileDef.decodeText(reader, true)); + break; + case "object_graph_def": + message.object_graph_def = $root.tensorflow.SavedObjectGraph.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + MetaGraphDef.MetaInfoDef = (function() { + + function MetaInfoDef(properties) { + this.tags = []; + this.function_aliases = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + MetaInfoDef.prototype.meta_graph_version = ""; + MetaInfoDef.prototype.stripped_op_list = null; + MetaInfoDef.prototype.any_info = null; + MetaInfoDef.prototype.tags = $util.emptyArray; + MetaInfoDef.prototype.tensorflow_version = ""; + MetaInfoDef.prototype.tensorflow_git_version = ""; + MetaInfoDef.prototype.stripped_default_attrs = false; + MetaInfoDef.prototype.function_aliases = $util.emptyObject; + + MetaInfoDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.MetaGraphDef.MetaInfoDef(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.meta_graph_version = reader.string(); + break; + case 2: + message.stripped_op_list = $root.tensorflow.OpList.decode(reader, reader.uint32()); + break; + case 3: + message.any_info = $root.google.protobuf.Any.decode(reader, reader.uint32()); + break; + case 4: + if (!(message.tags && message.tags.length)) + message.tags = []; + message.tags.push(reader.string()); + break; + case 5: + message.tensorflow_version = reader.string(); + break; + case 6: + message.tensorflow_git_version = reader.string(); + break; + case 7: + message.stripped_default_attrs = reader.bool(); + break; + case 8: + reader.skip().pos++; + if (message.function_aliases === $util.emptyObject) + message.function_aliases = {}; + key = reader.string(); + reader.pos++; + message.function_aliases[key] = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + MetaInfoDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.MetaGraphDef.MetaInfoDef(), key, value; + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "meta_graph_version": + message.meta_graph_version = reader.string(); + break; + case "stripped_op_list": + message.stripped_op_list = $root.tensorflow.OpList.decodeText(reader, true); + break; + case "any_info": + message.any_info = $root.google.protobuf.Any.decodeText(reader, true); + break; + case "tags": + if (!(message.tags && message.tags.length)) + message.tags = []; + if (reader.first()) + while (!reader.last()) { + message.tags.push(reader.string()); + reader.next(); + } + else + message.tags.push(reader.string()); + break; + case "tensorflow_version": + message.tensorflow_version = reader.string(); + break; + case "tensorflow_git_version": + message.tensorflow_git_version = reader.string(); + break; + case "stripped_default_attrs": + message.stripped_default_attrs = reader.bool(); + break; + case "function_aliases": + if (message.function_aliases === $util.emptyObject) + message.function_aliases = {}; + reader.start(); + key = ""; + value = ""; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = reader.string(); + break; + } + message.function_aliases[key] = value; + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return MetaInfoDef; + })(); + + return MetaGraphDef; + })(); + + tensorflow.CollectionDef = (function() { + + function CollectionDef(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CollectionDef.prototype.node_list = null; + CollectionDef.prototype.bytes_list = null; + CollectionDef.prototype.int64_list = null; + CollectionDef.prototype.float_list = null; + CollectionDef.prototype.any_list = null; + + var $oneOfFields; + + Object.defineProperty(CollectionDef.prototype, "kind", { + get: $util.oneOfGetter($oneOfFields = ["node_list", "bytes_list", "int64_list", "float_list", "any_list"]), + set: $util.oneOfSetter($oneOfFields) + }); + + CollectionDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.CollectionDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.node_list = $root.tensorflow.CollectionDef.NodeList.decode(reader, reader.uint32()); + break; + case 2: + message.bytes_list = $root.tensorflow.CollectionDef.BytesList.decode(reader, reader.uint32()); + break; + case 3: + message.int64_list = $root.tensorflow.CollectionDef.Int64List.decode(reader, reader.uint32()); + break; + case 4: + message.float_list = $root.tensorflow.CollectionDef.FloatList.decode(reader, reader.uint32()); + break; + case 5: + message.any_list = $root.tensorflow.CollectionDef.AnyList.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + CollectionDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.CollectionDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "node_list": + message.node_list = $root.tensorflow.CollectionDef.NodeList.decodeText(reader, true); + break; + case "bytes_list": + message.bytes_list = $root.tensorflow.CollectionDef.BytesList.decodeText(reader, true); + break; + case "int64_list": + message.int64_list = $root.tensorflow.CollectionDef.Int64List.decodeText(reader, true); + break; + case "float_list": + message.float_list = $root.tensorflow.CollectionDef.FloatList.decodeText(reader, true); + break; + case "any_list": + message.any_list = $root.tensorflow.CollectionDef.AnyList.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + CollectionDef.NodeList = (function() { + + function NodeList(properties) { + this.value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NodeList.prototype.value = $util.emptyArray; + + NodeList.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.CollectionDef.NodeList(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.value && message.value.length)) + message.value = []; + message.value.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NodeList.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.CollectionDef.NodeList(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "value": + if (!(message.value && message.value.length)) + message.value = []; + if (reader.first()) + while (!reader.last()) { + message.value.push(reader.string()); + reader.next(); + } + else + message.value.push(reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return NodeList; + })(); + + CollectionDef.BytesList = (function() { + + function BytesList(properties) { + this.value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BytesList.prototype.value = $util.emptyArray; + + BytesList.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.CollectionDef.BytesList(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.value && message.value.length)) + message.value = []; + message.value.push(reader.bytes()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BytesList.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.CollectionDef.BytesList(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "value": + if (!(message.value && message.value.length)) + message.value = []; + if (reader.first()) + while (!reader.last()) { + message.value.push(reader.bytes()); + reader.next(); + } + else + message.value.push(reader.bytes()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return BytesList; + })(); + + CollectionDef.Int64List = (function() { + + function Int64List(properties) { + this.value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Int64List.prototype.value = $util.emptyArray; + + Int64List.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.CollectionDef.Int64List(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.value && message.value.length)) + message.value = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.value.push(reader.int64()); + } else + message.value.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Int64List.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.CollectionDef.Int64List(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "value": + if (!(message.value && message.value.length)) + message.value = []; + if (reader.first()) + while (!reader.last()) { + message.value.push(reader.int64()); + reader.next(); + } + else + message.value.push(reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Int64List; + })(); + + CollectionDef.FloatList = (function() { + + function FloatList(properties) { + this.value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FloatList.prototype.value = $util.emptyArray; + + FloatList.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.CollectionDef.FloatList(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.value && message.value.length)) + message.value = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.value.push(reader.float()); + } else + message.value.push(reader.float()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + FloatList.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.CollectionDef.FloatList(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "value": + if (!(message.value && message.value.length)) + message.value = []; + if (reader.first()) + while (!reader.last()) { + message.value.push(reader.float()); + reader.next(); + } + else + message.value.push(reader.float()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return FloatList; + })(); + + CollectionDef.AnyList = (function() { + + function AnyList(properties) { + this.value = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AnyList.prototype.value = $util.emptyArray; + + AnyList.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.CollectionDef.AnyList(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.value && message.value.length)) + message.value = []; + message.value.push($root.google.protobuf.Any.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + AnyList.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.CollectionDef.AnyList(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "value": + if (!(message.value && message.value.length)) + message.value = []; + message.value.push($root.google.protobuf.Any.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return AnyList; + })(); + + return CollectionDef; + })(); + + tensorflow.TensorInfo = (function() { + + function TensorInfo(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorInfo.prototype.name = ""; + TensorInfo.prototype.coo_sparse = null; + TensorInfo.prototype.composite_tensor = null; + TensorInfo.prototype.dtype = 0; + TensorInfo.prototype.tensor_shape = null; + + var $oneOfFields; + + Object.defineProperty(TensorInfo.prototype, "encoding", { + get: $util.oneOfGetter($oneOfFields = ["name", "coo_sparse", "composite_tensor"]), + set: $util.oneOfSetter($oneOfFields) + }); + + TensorInfo.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TensorInfo(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 4: + message.coo_sparse = $root.tensorflow.TensorInfo.CooSparse.decode(reader, reader.uint32()); + break; + case 5: + message.composite_tensor = $root.tensorflow.TensorInfo.CompositeTensor.decode(reader, reader.uint32()); + break; + case 2: + message.dtype = reader.int32(); + break; + case 3: + message.tensor_shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorInfo.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TensorInfo(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "coo_sparse": + message.coo_sparse = $root.tensorflow.TensorInfo.CooSparse.decodeText(reader, true); + break; + case "composite_tensor": + message.composite_tensor = $root.tensorflow.TensorInfo.CompositeTensor.decodeText(reader, true); + break; + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "tensor_shape": + message.tensor_shape = $root.tensorflow.TensorShapeProto.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TensorInfo.CooSparse = (function() { + + function CooSparse(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CooSparse.prototype.values_tensor_name = ""; + CooSparse.prototype.indices_tensor_name = ""; + CooSparse.prototype.dense_shape_tensor_name = ""; + + CooSparse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TensorInfo.CooSparse(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.values_tensor_name = reader.string(); + break; + case 2: + message.indices_tensor_name = reader.string(); + break; + case 3: + message.dense_shape_tensor_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + CooSparse.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TensorInfo.CooSparse(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "values_tensor_name": + message.values_tensor_name = reader.string(); + break; + case "indices_tensor_name": + message.indices_tensor_name = reader.string(); + break; + case "dense_shape_tensor_name": + message.dense_shape_tensor_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return CooSparse; + })(); + + TensorInfo.CompositeTensor = (function() { + + function CompositeTensor(properties) { + this.components = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + CompositeTensor.prototype.type_spec = null; + CompositeTensor.prototype.components = $util.emptyArray; + + CompositeTensor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TensorInfo.CompositeTensor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_spec = $root.tensorflow.TypeSpecProto.decode(reader, reader.uint32()); + break; + case 2: + if (!(message.components && message.components.length)) + message.components = []; + message.components.push($root.tensorflow.TensorInfo.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + CompositeTensor.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TensorInfo.CompositeTensor(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "type_spec": + message.type_spec = $root.tensorflow.TypeSpecProto.decodeText(reader, true); + break; + case "components": + if (!(message.components && message.components.length)) + message.components = []; + message.components.push($root.tensorflow.TensorInfo.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return CompositeTensor; + })(); + + return TensorInfo; + })(); + + tensorflow.SignatureDef = (function() { + + function SignatureDef(properties) { + this.inputs = {}; + this.outputs = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SignatureDef.prototype.inputs = $util.emptyObject; + SignatureDef.prototype.outputs = $util.emptyObject; + SignatureDef.prototype.method_name = ""; + + SignatureDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SignatureDef(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.skip().pos++; + if (message.inputs === $util.emptyObject) + message.inputs = {}; + key = reader.string(); + reader.pos++; + message.inputs[key] = $root.tensorflow.TensorInfo.decode(reader, reader.uint32()); + break; + case 2: + reader.skip().pos++; + if (message.outputs === $util.emptyObject) + message.outputs = {}; + key = reader.string(); + reader.pos++; + message.outputs[key] = $root.tensorflow.TensorInfo.decode(reader, reader.uint32()); + break; + case 3: + message.method_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SignatureDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SignatureDef(), key, value; + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "inputs": + if (message.inputs === $util.emptyObject) + message.inputs = {}; + reader.start(); + key = ""; + value = null; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = $root.tensorflow.TensorInfo.decodeText(reader, true); + break; + } + message.inputs[key] = value; + break; + case "outputs": + if (message.outputs === $util.emptyObject) + message.outputs = {}; + reader.start(); + key = ""; + value = null; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = $root.tensorflow.TensorInfo.decodeText(reader, true); + break; + } + message.outputs[key] = value; + break; + case "method_name": + message.method_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SignatureDef; + })(); + + tensorflow.AssetFileDef = (function() { + + function AssetFileDef(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AssetFileDef.prototype.tensor_info = null; + AssetFileDef.prototype.filename = ""; + + AssetFileDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.AssetFileDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.tensor_info = $root.tensorflow.TensorInfo.decode(reader, reader.uint32()); + break; + case 2: + message.filename = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + AssetFileDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.AssetFileDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "tensor_info": + message.tensor_info = $root.tensorflow.TensorInfo.decodeText(reader, true); + break; + case "filename": + message.filename = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return AssetFileDef; + })(); + + tensorflow.SaverDef = (function() { + + function SaverDef(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SaverDef.prototype.filename_tensor_name = ""; + SaverDef.prototype.save_tensor_name = ""; + SaverDef.prototype.restore_op_name = ""; + SaverDef.prototype.max_to_keep = 0; + SaverDef.prototype.sharded = false; + SaverDef.prototype.keep_checkpoint_every_n_hours = 0; + SaverDef.prototype.version = 0; + + SaverDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SaverDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.filename_tensor_name = reader.string(); + break; + case 2: + message.save_tensor_name = reader.string(); + break; + case 3: + message.restore_op_name = reader.string(); + break; + case 4: + message.max_to_keep = reader.int32(); + break; + case 5: + message.sharded = reader.bool(); + break; + case 6: + message.keep_checkpoint_every_n_hours = reader.float(); + break; + case 7: + message.version = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SaverDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SaverDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "filename_tensor_name": + message.filename_tensor_name = reader.string(); + break; + case "save_tensor_name": + message.save_tensor_name = reader.string(); + break; + case "restore_op_name": + message.restore_op_name = reader.string(); + break; + case "max_to_keep": + message.max_to_keep = reader.int32(); + break; + case "sharded": + message.sharded = reader.bool(); + break; + case "keep_checkpoint_every_n_hours": + message.keep_checkpoint_every_n_hours = reader.float(); + break; + case "version": + message.version = reader.enum($root.tensorflow.SaverDef.CheckpointFormatVersion); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + SaverDef.CheckpointFormatVersion = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "LEGACY"] = 0; + values[valuesById[1] = "V1"] = 1; + values[valuesById[2] = "V2"] = 2; + return values; + })(); + + return SaverDef; + })(); + + tensorflow.GraphDef = (function() { + + function GraphDef(properties) { + this.node = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GraphDef.prototype.node = $util.emptyArray; + GraphDef.prototype.versions = null; + GraphDef.prototype.version = 0; + GraphDef.prototype.library = null; + + GraphDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.GraphDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.node && message.node.length)) + message.node = []; + message.node.push($root.tensorflow.NodeDef.decode(reader, reader.uint32())); + break; + case 4: + message.versions = $root.tensorflow.VersionDef.decode(reader, reader.uint32()); + break; + case 3: + message.version = reader.int32(); + break; + case 2: + message.library = $root.tensorflow.FunctionDefLibrary.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + GraphDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.GraphDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "node": + if (!(message.node && message.node.length)) + message.node = []; + message.node.push($root.tensorflow.NodeDef.decodeText(reader, true)); + break; + case "versions": + message.versions = $root.tensorflow.VersionDef.decodeText(reader, true); + break; + case "version": + message.version = reader.int32(); + break; + case "library": + message.library = $root.tensorflow.FunctionDefLibrary.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return GraphDef; + })(); + + tensorflow.OpDef = (function() { + + function OpDef(properties) { + this.input_arg = []; + this.output_arg = []; + this.control_output = []; + this.attr = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OpDef.prototype.name = ""; + OpDef.prototype.input_arg = $util.emptyArray; + OpDef.prototype.output_arg = $util.emptyArray; + OpDef.prototype.control_output = $util.emptyArray; + OpDef.prototype.attr = $util.emptyArray; + OpDef.prototype.deprecation = null; + OpDef.prototype.summary = ""; + OpDef.prototype.description = ""; + OpDef.prototype.is_commutative = false; + OpDef.prototype.is_aggregate = false; + OpDef.prototype.is_stateful = false; + OpDef.prototype.allows_uninitialized_input = false; + + OpDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.OpDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + if (!(message.input_arg && message.input_arg.length)) + message.input_arg = []; + message.input_arg.push($root.tensorflow.OpDef.ArgDef.decode(reader, reader.uint32())); + break; + case 3: + if (!(message.output_arg && message.output_arg.length)) + message.output_arg = []; + message.output_arg.push($root.tensorflow.OpDef.ArgDef.decode(reader, reader.uint32())); + break; + case 20: + if (!(message.control_output && message.control_output.length)) + message.control_output = []; + message.control_output.push(reader.string()); + break; + case 4: + if (!(message.attr && message.attr.length)) + message.attr = []; + message.attr.push($root.tensorflow.OpDef.AttrDef.decode(reader, reader.uint32())); + break; + case 8: + message.deprecation = $root.tensorflow.OpDeprecation.decode(reader, reader.uint32()); + break; + case 5: + message.summary = reader.string(); + break; + case 6: + message.description = reader.string(); + break; + case 18: + message.is_commutative = reader.bool(); + break; + case 16: + message.is_aggregate = reader.bool(); + break; + case 17: + message.is_stateful = reader.bool(); + break; + case 19: + message.allows_uninitialized_input = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + OpDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.OpDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "input_arg": + if (!(message.input_arg && message.input_arg.length)) + message.input_arg = []; + message.input_arg.push($root.tensorflow.OpDef.ArgDef.decodeText(reader, true)); + break; + case "output_arg": + if (!(message.output_arg && message.output_arg.length)) + message.output_arg = []; + message.output_arg.push($root.tensorflow.OpDef.ArgDef.decodeText(reader, true)); + break; + case "control_output": + if (!(message.control_output && message.control_output.length)) + message.control_output = []; + if (reader.first()) + while (!reader.last()) { + message.control_output.push(reader.string()); + reader.next(); + } + else + message.control_output.push(reader.string()); + break; + case "attr": + if (!(message.attr && message.attr.length)) + message.attr = []; + message.attr.push($root.tensorflow.OpDef.AttrDef.decodeText(reader, true)); + break; + case "deprecation": + message.deprecation = $root.tensorflow.OpDeprecation.decodeText(reader, true); + break; + case "summary": + message.summary = reader.string(); + break; + case "description": + message.description = reader.string(); + break; + case "is_commutative": + message.is_commutative = reader.bool(); + break; + case "is_aggregate": + message.is_aggregate = reader.bool(); + break; + case "is_stateful": + message.is_stateful = reader.bool(); + break; + case "allows_uninitialized_input": + message.allows_uninitialized_input = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + OpDef.ArgDef = (function() { + + function ArgDef(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ArgDef.prototype.name = ""; + ArgDef.prototype.description = ""; + ArgDef.prototype.type = 0; + ArgDef.prototype.type_attr = ""; + ArgDef.prototype.number_attr = ""; + ArgDef.prototype.type_list_attr = ""; + ArgDef.prototype.is_ref = false; + + ArgDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.OpDef.ArgDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.description = reader.string(); + break; + case 3: + message.type = reader.int32(); + break; + case 4: + message.type_attr = reader.string(); + break; + case 5: + message.number_attr = reader.string(); + break; + case 6: + message.type_list_attr = reader.string(); + break; + case 16: + message.is_ref = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ArgDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.OpDef.ArgDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "description": + message.description = reader.string(); + break; + case "type": + message.type = reader.enum($root.tensorflow.DataType); + break; + case "type_attr": + message.type_attr = reader.string(); + break; + case "number_attr": + message.number_attr = reader.string(); + break; + case "type_list_attr": + message.type_list_attr = reader.string(); + break; + case "is_ref": + message.is_ref = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ArgDef; + })(); + + OpDef.AttrDef = (function() { + + function AttrDef(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AttrDef.prototype.name = ""; + AttrDef.prototype.type = ""; + AttrDef.prototype.default_value = null; + AttrDef.prototype.description = ""; + AttrDef.prototype.has_minimum = false; + AttrDef.prototype.minimum = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + AttrDef.prototype.allowed_values = null; + + AttrDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.OpDef.AttrDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.type = reader.string(); + break; + case 3: + message.default_value = $root.tensorflow.AttrValue.decode(reader, reader.uint32()); + break; + case 4: + message.description = reader.string(); + break; + case 5: + message.has_minimum = reader.bool(); + break; + case 6: + message.minimum = reader.int64(); + break; + case 7: + message.allowed_values = $root.tensorflow.AttrValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + AttrDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.OpDef.AttrDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "type": + message.type = reader.string(); + break; + case "default_value": + message.default_value = $root.tensorflow.AttrValue.decodeText(reader, true); + break; + case "description": + message.description = reader.string(); + break; + case "has_minimum": + message.has_minimum = reader.bool(); + break; + case "minimum": + message.minimum = reader.int64(); + break; + case "allowed_values": + message.allowed_values = $root.tensorflow.AttrValue.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return AttrDef; + })(); + + return OpDef; + })(); + + tensorflow.OpDeprecation = (function() { + + function OpDeprecation(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OpDeprecation.prototype.version = 0; + OpDeprecation.prototype.explanation = ""; + + OpDeprecation.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.OpDeprecation(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.version = reader.int32(); + break; + case 2: + message.explanation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + OpDeprecation.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.OpDeprecation(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "version": + message.version = reader.int32(); + break; + case "explanation": + message.explanation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return OpDeprecation; + })(); + + tensorflow.OpList = (function() { + + function OpList(properties) { + this.op = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + OpList.prototype.op = $util.emptyArray; + + OpList.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.OpList(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.op && message.op.length)) + message.op = []; + message.op.push($root.tensorflow.OpDef.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + OpList.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.OpList(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "op": + if (!(message.op && message.op.length)) + message.op = []; + message.op.push($root.tensorflow.OpDef.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return OpList; + })(); + + tensorflow.TensorShapeProto = (function() { + + function TensorShapeProto(properties) { + this.dim = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorShapeProto.prototype.dim = $util.emptyArray; + TensorShapeProto.prototype.unknown_rank = false; + + TensorShapeProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TensorShapeProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + if (!(message.dim && message.dim.length)) + message.dim = []; + message.dim.push($root.tensorflow.TensorShapeProto.Dim.decode(reader, reader.uint32())); + break; + case 3: + message.unknown_rank = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorShapeProto.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TensorShapeProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dim": + if (!(message.dim && message.dim.length)) + message.dim = []; + message.dim.push($root.tensorflow.TensorShapeProto.Dim.decodeText(reader, true)); + break; + case "unknown_rank": + message.unknown_rank = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TensorShapeProto.Dim = (function() { + + function Dim(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Dim.prototype.size = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Dim.prototype.name = ""; + + Dim.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TensorShapeProto.Dim(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.size = reader.int64(); + break; + case 2: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Dim.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TensorShapeProto.Dim(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "size": + message.size = reader.int64(); + break; + case "name": + message.name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Dim; + })(); + + return TensorShapeProto; + })(); + + tensorflow.DataType = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DT_INVALID"] = 0; + values[valuesById[1] = "DT_FLOAT"] = 1; + values[valuesById[2] = "DT_DOUBLE"] = 2; + values[valuesById[3] = "DT_INT32"] = 3; + values[valuesById[4] = "DT_UINT8"] = 4; + values[valuesById[5] = "DT_INT16"] = 5; + values[valuesById[6] = "DT_INT8"] = 6; + values[valuesById[7] = "DT_STRING"] = 7; + values[valuesById[8] = "DT_COMPLEX64"] = 8; + values[valuesById[9] = "DT_INT64"] = 9; + values[valuesById[10] = "DT_BOOL"] = 10; + values[valuesById[11] = "DT_QINT8"] = 11; + values[valuesById[12] = "DT_QUINT8"] = 12; + values[valuesById[13] = "DT_QINT32"] = 13; + values[valuesById[14] = "DT_BFLOAT16"] = 14; + values[valuesById[15] = "DT_QINT16"] = 15; + values[valuesById[16] = "DT_QUINT16"] = 16; + values[valuesById[17] = "DT_UINT16"] = 17; + values[valuesById[18] = "DT_COMPLEX128"] = 18; + values[valuesById[19] = "DT_HALF"] = 19; + values[valuesById[20] = "DT_RESOURCE"] = 20; + values[valuesById[21] = "DT_VARIANT"] = 21; + values[valuesById[22] = "DT_UINT32"] = 22; + values[valuesById[23] = "DT_UINT64"] = 23; + values[valuesById[101] = "DT_FLOAT_REF"] = 101; + values[valuesById[102] = "DT_DOUBLE_REF"] = 102; + values[valuesById[103] = "DT_INT32_REF"] = 103; + values[valuesById[104] = "DT_UINT8_REF"] = 104; + values[valuesById[105] = "DT_INT16_REF"] = 105; + values[valuesById[106] = "DT_INT8_REF"] = 106; + values[valuesById[107] = "DT_STRING_REF"] = 107; + values[valuesById[108] = "DT_COMPLEX64_REF"] = 108; + values[valuesById[109] = "DT_INT64_REF"] = 109; + values[valuesById[110] = "DT_BOOL_REF"] = 110; + values[valuesById[111] = "DT_QINT8_REF"] = 111; + values[valuesById[112] = "DT_QUINT8_REF"] = 112; + values[valuesById[113] = "DT_QINT32_REF"] = 113; + values[valuesById[114] = "DT_BFLOAT16_REF"] = 114; + values[valuesById[115] = "DT_QINT16_REF"] = 115; + values[valuesById[116] = "DT_QUINT16_REF"] = 116; + values[valuesById[117] = "DT_UINT16_REF"] = 117; + values[valuesById[118] = "DT_COMPLEX128_REF"] = 118; + values[valuesById[119] = "DT_HALF_REF"] = 119; + values[valuesById[120] = "DT_RESOURCE_REF"] = 120; + values[valuesById[121] = "DT_VARIANT_REF"] = 121; + values[valuesById[122] = "DT_UINT32_REF"] = 122; + values[valuesById[123] = "DT_UINT64_REF"] = 123; + return values; + })(); + + tensorflow.NodeDef = (function() { + + function NodeDef(properties) { + this.input = []; + this.attr = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NodeDef.prototype.name = ""; + NodeDef.prototype.op = ""; + NodeDef.prototype.input = $util.emptyArray; + NodeDef.prototype.device = ""; + NodeDef.prototype.attr = $util.emptyObject; + NodeDef.prototype.experimental_debug_info = null; + + NodeDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.NodeDef(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.op = reader.string(); + break; + case 3: + if (!(message.input && message.input.length)) + message.input = []; + message.input.push(reader.string()); + break; + case 4: + message.device = reader.string(); + break; + case 5: + reader.skip().pos++; + if (message.attr === $util.emptyObject) + message.attr = {}; + key = reader.string(); + reader.pos++; + message.attr[key] = $root.tensorflow.AttrValue.decode(reader, reader.uint32()); + break; + case 6: + message.experimental_debug_info = $root.tensorflow.NodeDef.ExperimentalDebugInfo.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NodeDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.NodeDef(), key, value; + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "op": + message.op = reader.string(); + break; + case "input": + if (!(message.input && message.input.length)) + message.input = []; + if (reader.first()) + while (!reader.last()) { + message.input.push(reader.string()); + reader.next(); + } + else + message.input.push(reader.string()); + break; + case "device": + message.device = reader.string(); + break; + case "attr": + if (message.attr === $util.emptyObject) + message.attr = {}; + reader.start(); + key = ""; + value = null; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = $root.tensorflow.AttrValue.decodeText(reader, true); + break; + } + message.attr[key] = value; + break; + case "experimental_debug_info": + message.experimental_debug_info = $root.tensorflow.NodeDef.ExperimentalDebugInfo.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + NodeDef.ExperimentalDebugInfo = (function() { + + function ExperimentalDebugInfo(properties) { + this.original_node_names = []; + this.original_func_names = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ExperimentalDebugInfo.prototype.original_node_names = $util.emptyArray; + ExperimentalDebugInfo.prototype.original_func_names = $util.emptyArray; + + ExperimentalDebugInfo.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.NodeDef.ExperimentalDebugInfo(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.original_node_names && message.original_node_names.length)) + message.original_node_names = []; + message.original_node_names.push(reader.string()); + break; + case 2: + if (!(message.original_func_names && message.original_func_names.length)) + message.original_func_names = []; + message.original_func_names.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ExperimentalDebugInfo.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.NodeDef.ExperimentalDebugInfo(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "original_node_names": + if (!(message.original_node_names && message.original_node_names.length)) + message.original_node_names = []; + if (reader.first()) + while (!reader.last()) { + message.original_node_names.push(reader.string()); + reader.next(); + } + else + message.original_node_names.push(reader.string()); + break; + case "original_func_names": + if (!(message.original_func_names && message.original_func_names.length)) + message.original_func_names = []; + if (reader.first()) + while (!reader.last()) { + message.original_func_names.push(reader.string()); + reader.next(); + } + else + message.original_func_names.push(reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ExperimentalDebugInfo; + })(); + + return NodeDef; + })(); + + tensorflow.VersionDef = (function() { + + function VersionDef(properties) { + this.bad_consumers = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + VersionDef.prototype.producer = 0; + VersionDef.prototype.min_consumer = 0; + VersionDef.prototype.bad_consumers = $util.emptyArray; + + VersionDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.VersionDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.producer = reader.int32(); + break; + case 2: + message.min_consumer = reader.int32(); + break; + case 3: + if (!(message.bad_consumers && message.bad_consumers.length)) + message.bad_consumers = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.bad_consumers.push(reader.int32()); + } else + message.bad_consumers.push(reader.int32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + VersionDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.VersionDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "producer": + message.producer = reader.int32(); + break; + case "min_consumer": + message.min_consumer = reader.int32(); + break; + case "bad_consumers": + if (!(message.bad_consumers && message.bad_consumers.length)) + message.bad_consumers = []; + if (reader.first()) + while (!reader.last()) { + message.bad_consumers.push(reader.int32()); + reader.next(); + } + else + message.bad_consumers.push(reader.int32()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return VersionDef; + })(); + + tensorflow.FunctionDefLibrary = (function() { + + function FunctionDefLibrary(properties) { + this["function"] = []; + this.gradient = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FunctionDefLibrary.prototype["function"] = $util.emptyArray; + FunctionDefLibrary.prototype.gradient = $util.emptyArray; + + FunctionDefLibrary.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.FunctionDefLibrary(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message["function"] && message["function"].length)) + message["function"] = []; + message["function"].push($root.tensorflow.FunctionDef.decode(reader, reader.uint32())); + break; + case 2: + if (!(message.gradient && message.gradient.length)) + message.gradient = []; + message.gradient.push($root.tensorflow.GradientDef.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + FunctionDefLibrary.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.FunctionDefLibrary(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "function": + if (!(message["function"] && message["function"].length)) + message["function"] = []; + message["function"].push($root.tensorflow.FunctionDef.decodeText(reader, true)); + break; + case "gradient": + if (!(message.gradient && message.gradient.length)) + message.gradient = []; + message.gradient.push($root.tensorflow.GradientDef.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return FunctionDefLibrary; + })(); + + tensorflow.FunctionDef = (function() { + + function FunctionDef(properties) { + this.attr = {}; + this.arg_attr = {}; + this.resource_arg_unique_id = {}; + this.node_def = []; + this.ret = {}; + this.control_ret = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FunctionDef.prototype.signature = null; + FunctionDef.prototype.attr = $util.emptyObject; + FunctionDef.prototype.arg_attr = $util.emptyObject; + FunctionDef.prototype.resource_arg_unique_id = $util.emptyObject; + FunctionDef.prototype.node_def = $util.emptyArray; + FunctionDef.prototype.ret = $util.emptyObject; + FunctionDef.prototype.control_ret = $util.emptyObject; + + FunctionDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.FunctionDef(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.signature = $root.tensorflow.OpDef.decode(reader, reader.uint32()); + break; + case 5: + reader.skip().pos++; + if (message.attr === $util.emptyObject) + message.attr = {}; + key = reader.string(); + reader.pos++; + message.attr[key] = $root.tensorflow.AttrValue.decode(reader, reader.uint32()); + break; + case 7: + reader.skip().pos++; + if (message.arg_attr === $util.emptyObject) + message.arg_attr = {}; + key = reader.uint32(); + reader.pos++; + message.arg_attr[key] = $root.tensorflow.FunctionDef.ArgAttrs.decode(reader, reader.uint32()); + break; + case 8: + reader.skip().pos++; + if (message.resource_arg_unique_id === $util.emptyObject) + message.resource_arg_unique_id = {}; + key = reader.uint32(); + reader.pos++; + message.resource_arg_unique_id[key] = reader.uint32(); + break; + case 3: + if (!(message.node_def && message.node_def.length)) + message.node_def = []; + message.node_def.push($root.tensorflow.NodeDef.decode(reader, reader.uint32())); + break; + case 4: + reader.skip().pos++; + if (message.ret === $util.emptyObject) + message.ret = {}; + key = reader.string(); + reader.pos++; + message.ret[key] = reader.string(); + break; + case 6: + reader.skip().pos++; + if (message.control_ret === $util.emptyObject) + message.control_ret = {}; + key = reader.string(); + reader.pos++; + message.control_ret[key] = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + FunctionDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.FunctionDef(), key, value; + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "signature": + message.signature = $root.tensorflow.OpDef.decodeText(reader, true); + break; + case "attr": + if (message.attr === $util.emptyObject) + message.attr = {}; + reader.start(); + key = ""; + value = null; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = $root.tensorflow.AttrValue.decodeText(reader, true); + break; + } + message.attr[key] = value; + break; + case "arg_attr": + if (message.arg_attr === $util.emptyObject) + message.arg_attr = {}; + reader.start(); + key = 0; + value = null; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.uint32(); + break; + case "value": + value = $root.tensorflow.FunctionDef.ArgAttrs.decodeText(reader, true); + break; + } + message.arg_attr[key] = value; + break; + case "resource_arg_unique_id": + if (message.resource_arg_unique_id === $util.emptyObject) + message.resource_arg_unique_id = {}; + reader.start(); + key = 0; + value = 0; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.uint32(); + break; + case "value": + value = reader.uint32(); + break; + } + message.resource_arg_unique_id[key] = value; + break; + case "node_def": + if (!(message.node_def && message.node_def.length)) + message.node_def = []; + message.node_def.push($root.tensorflow.NodeDef.decodeText(reader, true)); + break; + case "ret": + if (message.ret === $util.emptyObject) + message.ret = {}; + reader.start(); + key = ""; + value = ""; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = reader.string(); + break; + } + message.ret[key] = value; + break; + case "control_ret": + if (message.control_ret === $util.emptyObject) + message.control_ret = {}; + reader.start(); + key = ""; + value = ""; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = reader.string(); + break; + } + message.control_ret[key] = value; + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + FunctionDef.ArgAttrs = (function() { + + function ArgAttrs(properties) { + this.attr = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ArgAttrs.prototype.attr = $util.emptyObject; + + ArgAttrs.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.FunctionDef.ArgAttrs(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.skip().pos++; + if (message.attr === $util.emptyObject) + message.attr = {}; + key = reader.string(); + reader.pos++; + message.attr[key] = $root.tensorflow.AttrValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ArgAttrs.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.FunctionDef.ArgAttrs(), key, value; + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "attr": + if (message.attr === $util.emptyObject) + message.attr = {}; + reader.start(); + key = ""; + value = null; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = $root.tensorflow.AttrValue.decodeText(reader, true); + break; + } + message.attr[key] = value; + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ArgAttrs; + })(); + + return FunctionDef; + })(); + + tensorflow.GradientDef = (function() { + + function GradientDef(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + GradientDef.prototype.function_name = ""; + GradientDef.prototype.gradient_func = ""; + + GradientDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.GradientDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.function_name = reader.string(); + break; + case 2: + message.gradient_func = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + GradientDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.GradientDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "function_name": + message.function_name = reader.string(); + break; + case "gradient_func": + message.gradient_func = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return GradientDef; + })(); + + tensorflow.AttrValue = (function() { + + function AttrValue(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + AttrValue.prototype.s = $util.newBuffer([]); + AttrValue.prototype.i = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + AttrValue.prototype.f = 0; + AttrValue.prototype.b = false; + AttrValue.prototype.type = 0; + AttrValue.prototype.shape = null; + AttrValue.prototype.tensor = null; + AttrValue.prototype.list = null; + AttrValue.prototype.func = null; + AttrValue.prototype.placeholder = ""; + + var $oneOfFields; + + Object.defineProperty(AttrValue.prototype, "value", { + get: $util.oneOfGetter($oneOfFields = ["s", "i", "f", "b", "type", "shape", "tensor", "list", "func", "placeholder"]), + set: $util.oneOfSetter($oneOfFields) + }); + + AttrValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.AttrValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + message.s = reader.bytes(); + break; + case 3: + message.i = reader.int64(); + break; + case 4: + message.f = reader.float(); + break; + case 5: + message.b = reader.bool(); + break; + case 6: + message.type = reader.int32(); + break; + case 7: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 8: + message.tensor = $root.tensorflow.TensorProto.decode(reader, reader.uint32()); + break; + case 1: + message.list = $root.tensorflow.AttrValue.ListValue.decode(reader, reader.uint32()); + break; + case 10: + message.func = $root.tensorflow.NameAttrList.decode(reader, reader.uint32()); + break; + case 9: + message.placeholder = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + AttrValue.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.AttrValue(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "s": + message.s = reader.bytes(); + break; + case "i": + message.i = reader.int64(); + break; + case "f": + message.f = reader.float(); + break; + case "b": + message.b = reader.bool(); + break; + case "type": + message.type = reader.enum($root.tensorflow.DataType); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader, true); + break; + case "tensor": + message.tensor = $root.tensorflow.TensorProto.decodeText(reader, true); + break; + case "list": + message.list = $root.tensorflow.AttrValue.ListValue.decodeText(reader, true); + break; + case "func": + message.func = $root.tensorflow.NameAttrList.decodeText(reader, true); + break; + case "placeholder": + message.placeholder = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + AttrValue.ListValue = (function() { + + function ListValue(properties) { + this.s = []; + this.i = []; + this.f = []; + this.b = []; + this.type = []; + this.shape = []; + this.tensor = []; + this.func = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ListValue.prototype.s = $util.emptyArray; + ListValue.prototype.i = $util.emptyArray; + ListValue.prototype.f = $util.emptyArray; + ListValue.prototype.b = $util.emptyArray; + ListValue.prototype.type = $util.emptyArray; + ListValue.prototype.shape = $util.emptyArray; + ListValue.prototype.tensor = $util.emptyArray; + ListValue.prototype.func = $util.emptyArray; + + ListValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.AttrValue.ListValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + if (!(message.s && message.s.length)) + message.s = []; + message.s.push(reader.bytes()); + break; + case 3: + if (!(message.i && message.i.length)) + message.i = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.i.push(reader.int64()); + } else + message.i.push(reader.int64()); + break; + case 4: + if (!(message.f && message.f.length)) + message.f = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.f.push(reader.float()); + } else + message.f.push(reader.float()); + break; + case 5: + if (!(message.b && message.b.length)) + message.b = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.b.push(reader.bool()); + } else + message.b.push(reader.bool()); + break; + case 6: + if (!(message.type && message.type.length)) + message.type = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.type.push(reader.int32()); + } else + message.type.push(reader.int32()); + break; + case 7: + if (!(message.shape && message.shape.length)) + message.shape = []; + message.shape.push($root.tensorflow.TensorShapeProto.decode(reader, reader.uint32())); + break; + case 8: + if (!(message.tensor && message.tensor.length)) + message.tensor = []; + message.tensor.push($root.tensorflow.TensorProto.decode(reader, reader.uint32())); + break; + case 9: + if (!(message.func && message.func.length)) + message.func = []; + message.func.push($root.tensorflow.NameAttrList.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ListValue.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.AttrValue.ListValue(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "s": + if (!(message.s && message.s.length)) + message.s = []; + if (reader.first()) + while (!reader.last()) { + message.s.push(reader.bytes()); + reader.next(); + } + else + message.s.push(reader.bytes()); + break; + case "i": + if (!(message.i && message.i.length)) + message.i = []; + if (reader.first()) + while (!reader.last()) { + message.i.push(reader.int64()); + reader.next(); + } + else + message.i.push(reader.int64()); + break; + case "f": + if (!(message.f && message.f.length)) + message.f = []; + if (reader.first()) + while (!reader.last()) { + message.f.push(reader.float()); + reader.next(); + } + else + message.f.push(reader.float()); + break; + case "b": + if (!(message.b && message.b.length)) + message.b = []; + if (reader.first()) + while (!reader.last()) { + message.b.push(reader.bool()); + reader.next(); + } + else + message.b.push(reader.bool()); + break; + case "type": + if (!(message.type && message.type.length)) + message.type = []; + if (reader.first()) + while (!reader.last()) { + message.type.push(reader.enum($root.tensorflow.DataType)); + reader.next(); + } + else + message.type.push(reader.enum($root.tensorflow.DataType)); + break; + case "shape": + if (!(message.shape && message.shape.length)) + message.shape = []; + message.shape.push($root.tensorflow.TensorShapeProto.decodeText(reader, true)); + break; + case "tensor": + if (!(message.tensor && message.tensor.length)) + message.tensor = []; + message.tensor.push($root.tensorflow.TensorProto.decodeText(reader, true)); + break; + case "func": + if (!(message.func && message.func.length)) + message.func = []; + message.func.push($root.tensorflow.NameAttrList.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ListValue; + })(); + + return AttrValue; + })(); + + tensorflow.NameAttrList = (function() { + + function NameAttrList(properties) { + this.attr = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NameAttrList.prototype.name = ""; + NameAttrList.prototype.attr = $util.emptyObject; + + NameAttrList.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.NameAttrList(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + reader.skip().pos++; + if (message.attr === $util.emptyObject) + message.attr = {}; + key = reader.string(); + reader.pos++; + message.attr[key] = $root.tensorflow.AttrValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NameAttrList.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.NameAttrList(), key, value; + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "attr": + if (message.attr === $util.emptyObject) + message.attr = {}; + reader.start(); + key = ""; + value = null; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = $root.tensorflow.AttrValue.decodeText(reader, true); + break; + } + message.attr[key] = value; + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return NameAttrList; + })(); + + tensorflow.TensorProto = (function() { + + function TensorProto(properties) { + this.half_val = []; + this.float_val = []; + this.double_val = []; + this.int_val = []; + this.string_val = []; + this.scomplex_val = []; + this.int64_val = []; + this.bool_val = []; + this.dcomplex_val = []; + this.resource_handle_val = []; + this.variant_val = []; + this.uint32_val = []; + this.uint64_val = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorProto.prototype.dtype = 0; + TensorProto.prototype.tensor_shape = null; + TensorProto.prototype.version_number = 0; + TensorProto.prototype.tensor_content = $util.newBuffer([]); + TensorProto.prototype.half_val = $util.emptyArray; + TensorProto.prototype.float_val = $util.emptyArray; + TensorProto.prototype.double_val = $util.emptyArray; + TensorProto.prototype.int_val = $util.emptyArray; + TensorProto.prototype.string_val = $util.emptyArray; + TensorProto.prototype.scomplex_val = $util.emptyArray; + TensorProto.prototype.int64_val = $util.emptyArray; + TensorProto.prototype.bool_val = $util.emptyArray; + TensorProto.prototype.dcomplex_val = $util.emptyArray; + TensorProto.prototype.resource_handle_val = $util.emptyArray; + TensorProto.prototype.variant_val = $util.emptyArray; + TensorProto.prototype.uint32_val = $util.emptyArray; + TensorProto.prototype.uint64_val = $util.emptyArray; + + TensorProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TensorProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dtype = reader.int32(); + break; + case 2: + message.tensor_shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.version_number = reader.int32(); + break; + case 4: + message.tensor_content = reader.bytes(); + break; + case 13: + if (!(message.half_val && message.half_val.length)) + message.half_val = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.half_val.push(reader.int32()); + } else + message.half_val.push(reader.int32()); + break; + case 5: + if (!(message.float_val && message.float_val.length)) + message.float_val = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.float_val.push(reader.float()); + } else + message.float_val.push(reader.float()); + break; + case 6: + if (!(message.double_val && message.double_val.length)) + message.double_val = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.double_val.push(reader.double()); + } else + message.double_val.push(reader.double()); + break; + case 7: + if (!(message.int_val && message.int_val.length)) + message.int_val = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.int_val.push(reader.int32()); + } else + message.int_val.push(reader.int32()); + break; + case 8: + if (!(message.string_val && message.string_val.length)) + message.string_val = []; + message.string_val.push(reader.bytes()); + break; + case 9: + if (!(message.scomplex_val && message.scomplex_val.length)) + message.scomplex_val = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.scomplex_val.push(reader.float()); + } else + message.scomplex_val.push(reader.float()); + break; + case 10: + if (!(message.int64_val && message.int64_val.length)) + message.int64_val = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.int64_val.push(reader.int64()); + } else + message.int64_val.push(reader.int64()); + break; + case 11: + if (!(message.bool_val && message.bool_val.length)) + message.bool_val = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.bool_val.push(reader.bool()); + } else + message.bool_val.push(reader.bool()); + break; + case 12: + if (!(message.dcomplex_val && message.dcomplex_val.length)) + message.dcomplex_val = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.dcomplex_val.push(reader.double()); + } else + message.dcomplex_val.push(reader.double()); + break; + case 14: + if (!(message.resource_handle_val && message.resource_handle_val.length)) + message.resource_handle_val = []; + message.resource_handle_val.push($root.tensorflow.ResourceHandleProto.decode(reader, reader.uint32())); + break; + case 15: + if (!(message.variant_val && message.variant_val.length)) + message.variant_val = []; + message.variant_val.push($root.tensorflow.VariantTensorDataProto.decode(reader, reader.uint32())); + break; + case 16: + if (!(message.uint32_val && message.uint32_val.length)) + message.uint32_val = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.uint32_val.push(reader.uint32()); + } else + message.uint32_val.push(reader.uint32()); + break; + case 17: + if (!(message.uint64_val && message.uint64_val.length)) + message.uint64_val = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.uint64_val.push(reader.uint64()); + } else + message.uint64_val.push(reader.uint64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorProto.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TensorProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "tensor_shape": + message.tensor_shape = $root.tensorflow.TensorShapeProto.decodeText(reader, true); + break; + case "version_number": + message.version_number = reader.int32(); + break; + case "tensor_content": + message.tensor_content = reader.bytes(); + break; + case "half_val": + if (!(message.half_val && message.half_val.length)) + message.half_val = []; + if (reader.first()) + while (!reader.last()) { + message.half_val.push(reader.int32()); + reader.next(); + } + else + message.half_val.push(reader.int32()); + break; + case "float_val": + if (!(message.float_val && message.float_val.length)) + message.float_val = []; + if (reader.first()) + while (!reader.last()) { + message.float_val.push(reader.float()); + reader.next(); + } + else + message.float_val.push(reader.float()); + break; + case "double_val": + if (!(message.double_val && message.double_val.length)) + message.double_val = []; + if (reader.first()) + while (!reader.last()) { + message.double_val.push(reader.double()); + reader.next(); + } + else + message.double_val.push(reader.double()); + break; + case "int_val": + if (!(message.int_val && message.int_val.length)) + message.int_val = []; + if (reader.first()) + while (!reader.last()) { + message.int_val.push(reader.int32()); + reader.next(); + } + else + message.int_val.push(reader.int32()); + break; + case "string_val": + if (!(message.string_val && message.string_val.length)) + message.string_val = []; + if (reader.first()) + while (!reader.last()) { + message.string_val.push(reader.bytes()); + reader.next(); + } + else + message.string_val.push(reader.bytes()); + break; + case "scomplex_val": + if (!(message.scomplex_val && message.scomplex_val.length)) + message.scomplex_val = []; + if (reader.first()) + while (!reader.last()) { + message.scomplex_val.push(reader.float()); + reader.next(); + } + else + message.scomplex_val.push(reader.float()); + break; + case "int64_val": + if (!(message.int64_val && message.int64_val.length)) + message.int64_val = []; + if (reader.first()) + while (!reader.last()) { + message.int64_val.push(reader.int64()); + reader.next(); + } + else + message.int64_val.push(reader.int64()); + break; + case "bool_val": + if (!(message.bool_val && message.bool_val.length)) + message.bool_val = []; + if (reader.first()) + while (!reader.last()) { + message.bool_val.push(reader.bool()); + reader.next(); + } + else + message.bool_val.push(reader.bool()); + break; + case "dcomplex_val": + if (!(message.dcomplex_val && message.dcomplex_val.length)) + message.dcomplex_val = []; + if (reader.first()) + while (!reader.last()) { + message.dcomplex_val.push(reader.double()); + reader.next(); + } + else + message.dcomplex_val.push(reader.double()); + break; + case "resource_handle_val": + if (!(message.resource_handle_val && message.resource_handle_val.length)) + message.resource_handle_val = []; + message.resource_handle_val.push($root.tensorflow.ResourceHandleProto.decodeText(reader, true)); + break; + case "variant_val": + if (!(message.variant_val && message.variant_val.length)) + message.variant_val = []; + message.variant_val.push($root.tensorflow.VariantTensorDataProto.decodeText(reader, true)); + break; + case "uint32_val": + if (!(message.uint32_val && message.uint32_val.length)) + message.uint32_val = []; + if (reader.first()) + while (!reader.last()) { + message.uint32_val.push(reader.uint32()); + reader.next(); + } + else + message.uint32_val.push(reader.uint32()); + break; + case "uint64_val": + if (!(message.uint64_val && message.uint64_val.length)) + message.uint64_val = []; + if (reader.first()) + while (!reader.last()) { + message.uint64_val.push(reader.uint64()); + reader.next(); + } + else + message.uint64_val.push(reader.uint64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return TensorProto; + })(); + + tensorflow.VariantTensorDataProto = (function() { + + function VariantTensorDataProto(properties) { + this.tensors = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + VariantTensorDataProto.prototype.type_name = ""; + VariantTensorDataProto.prototype.metadata = $util.newBuffer([]); + VariantTensorDataProto.prototype.tensors = $util.emptyArray; + + VariantTensorDataProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.VariantTensorDataProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_name = reader.string(); + break; + case 2: + message.metadata = reader.bytes(); + break; + case 3: + if (!(message.tensors && message.tensors.length)) + message.tensors = []; + message.tensors.push($root.tensorflow.TensorProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + VariantTensorDataProto.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.VariantTensorDataProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "type_name": + message.type_name = reader.string(); + break; + case "metadata": + message.metadata = reader.bytes(); + break; + case "tensors": + if (!(message.tensors && message.tensors.length)) + message.tensors = []; + message.tensors.push($root.tensorflow.TensorProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return VariantTensorDataProto; + })(); + + tensorflow.VariableSynchronization = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "VARIABLE_SYNCHRONIZATION_AUTO"] = 0; + values[valuesById[1] = "VARIABLE_SYNCHRONIZATION_NONE"] = 1; + values[valuesById[2] = "VARIABLE_SYNCHRONIZATION_ON_WRITE"] = 2; + values[valuesById[3] = "VARIABLE_SYNCHRONIZATION_ON_READ"] = 3; + return values; + })(); + + tensorflow.VariableAggregation = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "VARIABLE_AGGREGATION_NONE"] = 0; + values[valuesById[1] = "VARIABLE_AGGREGATION_SUM"] = 1; + values[valuesById[2] = "VARIABLE_AGGREGATION_MEAN"] = 2; + values[valuesById[3] = "VARIABLE_AGGREGATION_ONLY_FIRST_REPLICA"] = 3; + return values; + })(); + + tensorflow.VariableDef = (function() { + + function VariableDef(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + VariableDef.prototype.variable_name = ""; + VariableDef.prototype.initial_value_name = ""; + VariableDef.prototype.initializer_name = ""; + VariableDef.prototype.snapshot_name = ""; + VariableDef.prototype.save_slice_info_def = null; + VariableDef.prototype.is_resource = false; + VariableDef.prototype.trainable = false; + VariableDef.prototype.synchronization = 0; + VariableDef.prototype.aggregation = 0; + + VariableDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.VariableDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.variable_name = reader.string(); + break; + case 6: + message.initial_value_name = reader.string(); + break; + case 2: + message.initializer_name = reader.string(); + break; + case 3: + message.snapshot_name = reader.string(); + break; + case 4: + message.save_slice_info_def = $root.tensorflow.SaveSliceInfoDef.decode(reader, reader.uint32()); + break; + case 5: + message.is_resource = reader.bool(); + break; + case 7: + message.trainable = reader.bool(); + break; + case 8: + message.synchronization = reader.int32(); + break; + case 9: + message.aggregation = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + VariableDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.VariableDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "variable_name": + message.variable_name = reader.string(); + break; + case "initial_value_name": + message.initial_value_name = reader.string(); + break; + case "initializer_name": + message.initializer_name = reader.string(); + break; + case "snapshot_name": + message.snapshot_name = reader.string(); + break; + case "save_slice_info_def": + message.save_slice_info_def = $root.tensorflow.SaveSliceInfoDef.decodeText(reader, true); + break; + case "is_resource": + message.is_resource = reader.bool(); + break; + case "trainable": + message.trainable = reader.bool(); + break; + case "synchronization": + message.synchronization = reader.enum($root.tensorflow.VariableSynchronization); + break; + case "aggregation": + message.aggregation = reader.enum($root.tensorflow.VariableAggregation); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return VariableDef; + })(); + + tensorflow.SaveSliceInfoDef = (function() { + + function SaveSliceInfoDef(properties) { + this.full_shape = []; + this.var_offset = []; + this.var_shape = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SaveSliceInfoDef.prototype.full_name = ""; + SaveSliceInfoDef.prototype.full_shape = $util.emptyArray; + SaveSliceInfoDef.prototype.var_offset = $util.emptyArray; + SaveSliceInfoDef.prototype.var_shape = $util.emptyArray; + + SaveSliceInfoDef.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SaveSliceInfoDef(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.full_name = reader.string(); + break; + case 2: + if (!(message.full_shape && message.full_shape.length)) + message.full_shape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.full_shape.push(reader.int64()); + } else + message.full_shape.push(reader.int64()); + break; + case 3: + if (!(message.var_offset && message.var_offset.length)) + message.var_offset = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.var_offset.push(reader.int64()); + } else + message.var_offset.push(reader.int64()); + break; + case 4: + if (!(message.var_shape && message.var_shape.length)) + message.var_shape = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.var_shape.push(reader.int64()); + } else + message.var_shape.push(reader.int64()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SaveSliceInfoDef.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SaveSliceInfoDef(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "full_name": + message.full_name = reader.string(); + break; + case "full_shape": + if (!(message.full_shape && message.full_shape.length)) + message.full_shape = []; + if (reader.first()) + while (!reader.last()) { + message.full_shape.push(reader.int64()); + reader.next(); + } + else + message.full_shape.push(reader.int64()); + break; + case "var_offset": + if (!(message.var_offset && message.var_offset.length)) + message.var_offset = []; + if (reader.first()) + while (!reader.last()) { + message.var_offset.push(reader.int64()); + reader.next(); + } + else + message.var_offset.push(reader.int64()); + break; + case "var_shape": + if (!(message.var_shape && message.var_shape.length)) + message.var_shape = []; + if (reader.first()) + while (!reader.last()) { + message.var_shape.push(reader.int64()); + reader.next(); + } + else + message.var_shape.push(reader.int64()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SaveSliceInfoDef; + })(); + + tensorflow.ResourceHandleProto = (function() { + + function ResourceHandleProto(properties) { + this.dtypes_and_shapes = []; + this.allowed_devices = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ResourceHandleProto.prototype.device = ""; + ResourceHandleProto.prototype.container = ""; + ResourceHandleProto.prototype.name = ""; + ResourceHandleProto.prototype.hash_code = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + ResourceHandleProto.prototype.maybe_type_name = ""; + ResourceHandleProto.prototype.dtypes_and_shapes = $util.emptyArray; + ResourceHandleProto.prototype.allowed_devices = $util.emptyArray; + + ResourceHandleProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.ResourceHandleProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.device = reader.string(); + break; + case 2: + message.container = reader.string(); + break; + case 3: + message.name = reader.string(); + break; + case 4: + message.hash_code = reader.uint64(); + break; + case 5: + message.maybe_type_name = reader.string(); + break; + case 6: + if (!(message.dtypes_and_shapes && message.dtypes_and_shapes.length)) + message.dtypes_and_shapes = []; + message.dtypes_and_shapes.push($root.tensorflow.ResourceHandleProto.DtypeAndShape.decode(reader, reader.uint32())); + break; + case 7: + if (!(message.allowed_devices && message.allowed_devices.length)) + message.allowed_devices = []; + message.allowed_devices.push(reader.string()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ResourceHandleProto.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.ResourceHandleProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "device": + message.device = reader.string(); + break; + case "container": + message.container = reader.string(); + break; + case "name": + message.name = reader.string(); + break; + case "hash_code": + message.hash_code = reader.uint64(); + break; + case "maybe_type_name": + message.maybe_type_name = reader.string(); + break; + case "dtypes_and_shapes": + if (!(message.dtypes_and_shapes && message.dtypes_and_shapes.length)) + message.dtypes_and_shapes = []; + message.dtypes_and_shapes.push($root.tensorflow.ResourceHandleProto.DtypeAndShape.decodeText(reader, true)); + break; + case "allowed_devices": + if (!(message.allowed_devices && message.allowed_devices.length)) + message.allowed_devices = []; + if (reader.first()) + while (!reader.last()) { + message.allowed_devices.push(reader.string()); + reader.next(); + } + else + message.allowed_devices.push(reader.string()); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + ResourceHandleProto.DtypeAndShape = (function() { + + function DtypeAndShape(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DtypeAndShape.prototype.dtype = 0; + DtypeAndShape.prototype.shape = null; + + DtypeAndShape.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.ResourceHandleProto.DtypeAndShape(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dtype = reader.int32(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + DtypeAndShape.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.ResourceHandleProto.DtypeAndShape(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return DtypeAndShape; + })(); + + return ResourceHandleProto; + })(); + + tensorflow.SavedObjectGraph = (function() { + + function SavedObjectGraph(properties) { + this.nodes = []; + this.concrete_functions = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedObjectGraph.prototype.nodes = $util.emptyArray; + SavedObjectGraph.prototype.concrete_functions = $util.emptyObject; + + SavedObjectGraph.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedObjectGraph(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.nodes && message.nodes.length)) + message.nodes = []; + message.nodes.push($root.tensorflow.SavedObject.decode(reader, reader.uint32())); + break; + case 2: + reader.skip().pos++; + if (message.concrete_functions === $util.emptyObject) + message.concrete_functions = {}; + key = reader.string(); + reader.pos++; + message.concrete_functions[key] = $root.tensorflow.SavedConcreteFunction.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedObjectGraph.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedObjectGraph(), key, value; + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "nodes": + if (!(message.nodes && message.nodes.length)) + message.nodes = []; + message.nodes.push($root.tensorflow.SavedObject.decodeText(reader, true)); + break; + case "concrete_functions": + if (message.concrete_functions === $util.emptyObject) + message.concrete_functions = {}; + reader.start(); + key = ""; + value = null; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = $root.tensorflow.SavedConcreteFunction.decodeText(reader, true); + break; + } + message.concrete_functions[key] = value; + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedObjectGraph; + })(); + + tensorflow.SavedObject = (function() { + + function SavedObject(properties) { + this.children = []; + this.slot_variables = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedObject.prototype.children = $util.emptyArray; + SavedObject.prototype.slot_variables = $util.emptyArray; + SavedObject.prototype.user_object = null; + SavedObject.prototype.asset = null; + SavedObject.prototype["function"] = null; + SavedObject.prototype.variable = null; + SavedObject.prototype.bare_concrete_function = null; + SavedObject.prototype.constant = null; + SavedObject.prototype.resource = null; + + var $oneOfFields; + + Object.defineProperty(SavedObject.prototype, "kind", { + get: $util.oneOfGetter($oneOfFields = ["user_object", "asset", "function", "variable", "bare_concrete_function", "constant", "resource"]), + set: $util.oneOfSetter($oneOfFields) + }); + + SavedObject.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedObject(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.children && message.children.length)) + message.children = []; + message.children.push($root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.decode(reader, reader.uint32())); + break; + case 3: + if (!(message.slot_variables && message.slot_variables.length)) + message.slot_variables = []; + message.slot_variables.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference.decode(reader, reader.uint32())); + break; + case 4: + message.user_object = $root.tensorflow.SavedUserObject.decode(reader, reader.uint32()); + break; + case 5: + message.asset = $root.tensorflow.SavedAsset.decode(reader, reader.uint32()); + break; + case 6: + message["function"] = $root.tensorflow.SavedFunction.decode(reader, reader.uint32()); + break; + case 7: + message.variable = $root.tensorflow.SavedVariable.decode(reader, reader.uint32()); + break; + case 8: + message.bare_concrete_function = $root.tensorflow.SavedBareConcreteFunction.decode(reader, reader.uint32()); + break; + case 9: + message.constant = $root.tensorflow.SavedConstant.decode(reader, reader.uint32()); + break; + case 10: + message.resource = $root.tensorflow.SavedResource.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedObject.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedObject(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "children": + if (!(message.children && message.children.length)) + message.children = []; + message.children.push($root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.decodeText(reader, true)); + break; + case "slot_variables": + if (!(message.slot_variables && message.slot_variables.length)) + message.slot_variables = []; + message.slot_variables.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference.decodeText(reader, true)); + break; + case "user_object": + message.user_object = $root.tensorflow.SavedUserObject.decodeText(reader, true); + break; + case "asset": + message.asset = $root.tensorflow.SavedAsset.decodeText(reader, true); + break; + case "function": + message["function"] = $root.tensorflow.SavedFunction.decodeText(reader, true); + break; + case "variable": + message.variable = $root.tensorflow.SavedVariable.decodeText(reader, true); + break; + case "bare_concrete_function": + message.bare_concrete_function = $root.tensorflow.SavedBareConcreteFunction.decodeText(reader, true); + break; + case "constant": + message.constant = $root.tensorflow.SavedConstant.decodeText(reader, true); + break; + case "resource": + message.resource = $root.tensorflow.SavedResource.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedObject; + })(); + + tensorflow.SavedUserObject = (function() { + + function SavedUserObject(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedUserObject.prototype.identifier = ""; + SavedUserObject.prototype.version = null; + SavedUserObject.prototype.metadata = ""; + + SavedUserObject.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedUserObject(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.identifier = reader.string(); + break; + case 2: + message.version = $root.tensorflow.VersionDef.decode(reader, reader.uint32()); + break; + case 3: + message.metadata = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedUserObject.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedUserObject(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "identifier": + message.identifier = reader.string(); + break; + case "version": + message.version = $root.tensorflow.VersionDef.decodeText(reader, true); + break; + case "metadata": + message.metadata = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedUserObject; + })(); + + tensorflow.SavedAsset = (function() { + + function SavedAsset(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedAsset.prototype.asset_file_def_index = 0; + + SavedAsset.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedAsset(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.asset_file_def_index = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedAsset.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedAsset(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "asset_file_def_index": + message.asset_file_def_index = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedAsset; + })(); + + tensorflow.SavedFunction = (function() { + + function SavedFunction(properties) { + this.concrete_functions = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedFunction.prototype.concrete_functions = $util.emptyArray; + SavedFunction.prototype.function_spec = null; + + SavedFunction.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedFunction(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.concrete_functions && message.concrete_functions.length)) + message.concrete_functions = []; + message.concrete_functions.push(reader.string()); + break; + case 2: + message.function_spec = $root.tensorflow.FunctionSpec.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedFunction.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedFunction(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "concrete_functions": + if (!(message.concrete_functions && message.concrete_functions.length)) + message.concrete_functions = []; + if (reader.first()) + while (!reader.last()) { + message.concrete_functions.push(reader.string()); + reader.next(); + } + else + message.concrete_functions.push(reader.string()); + break; + case "function_spec": + message.function_spec = $root.tensorflow.FunctionSpec.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedFunction; + })(); + + tensorflow.SavedConcreteFunction = (function() { + + function SavedConcreteFunction(properties) { + this.bound_inputs = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedConcreteFunction.prototype.bound_inputs = $util.emptyArray; + SavedConcreteFunction.prototype.canonicalized_input_signature = null; + SavedConcreteFunction.prototype.output_signature = null; + + SavedConcreteFunction.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedConcreteFunction(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + if (!(message.bound_inputs && message.bound_inputs.length)) + message.bound_inputs = []; + if ((tag & 7) === 2) { + var end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.bound_inputs.push(reader.int32()); + } else + message.bound_inputs.push(reader.int32()); + break; + case 3: + message.canonicalized_input_signature = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + case 4: + message.output_signature = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedConcreteFunction.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedConcreteFunction(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "bound_inputs": + if (!(message.bound_inputs && message.bound_inputs.length)) + message.bound_inputs = []; + if (reader.first()) + while (!reader.last()) { + message.bound_inputs.push(reader.int32()); + reader.next(); + } + else + message.bound_inputs.push(reader.int32()); + break; + case "canonicalized_input_signature": + message.canonicalized_input_signature = $root.tensorflow.StructuredValue.decodeText(reader, true); + break; + case "output_signature": + message.output_signature = $root.tensorflow.StructuredValue.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedConcreteFunction; + })(); + + tensorflow.SavedBareConcreteFunction = (function() { + + function SavedBareConcreteFunction(properties) { + this.argument_keywords = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedBareConcreteFunction.prototype.concrete_function_name = ""; + SavedBareConcreteFunction.prototype.argument_keywords = $util.emptyArray; + SavedBareConcreteFunction.prototype.allowed_positional_arguments = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + SavedBareConcreteFunction.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedBareConcreteFunction(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.concrete_function_name = reader.string(); + break; + case 2: + if (!(message.argument_keywords && message.argument_keywords.length)) + message.argument_keywords = []; + message.argument_keywords.push(reader.string()); + break; + case 3: + message.allowed_positional_arguments = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedBareConcreteFunction.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedBareConcreteFunction(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "concrete_function_name": + message.concrete_function_name = reader.string(); + break; + case "argument_keywords": + if (!(message.argument_keywords && message.argument_keywords.length)) + message.argument_keywords = []; + if (reader.first()) + while (!reader.last()) { + message.argument_keywords.push(reader.string()); + reader.next(); + } + else + message.argument_keywords.push(reader.string()); + break; + case "allowed_positional_arguments": + message.allowed_positional_arguments = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedBareConcreteFunction; + })(); + + tensorflow.SavedConstant = (function() { + + function SavedConstant(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedConstant.prototype.operation = ""; + + SavedConstant.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedConstant(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.operation = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedConstant.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedConstant(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "operation": + message.operation = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedConstant; + })(); + + tensorflow.SavedVariable = (function() { + + function SavedVariable(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedVariable.prototype.dtype = 0; + SavedVariable.prototype.shape = null; + SavedVariable.prototype.trainable = false; + SavedVariable.prototype.synchronization = 0; + SavedVariable.prototype.aggregation = 0; + SavedVariable.prototype.name = ""; + + SavedVariable.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedVariable(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dtype = reader.int32(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.trainable = reader.bool(); + break; + case 4: + message.synchronization = reader.int32(); + break; + case 5: + message.aggregation = reader.int32(); + break; + case 6: + message.name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedVariable.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedVariable(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader, true); + break; + case "trainable": + message.trainable = reader.bool(); + break; + case "synchronization": + message.synchronization = reader.enum($root.tensorflow.VariableSynchronization); + break; + case "aggregation": + message.aggregation = reader.enum($root.tensorflow.VariableAggregation); + break; + case "name": + message.name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedVariable; + })(); + + tensorflow.FunctionSpec = (function() { + + function FunctionSpec(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + FunctionSpec.prototype.fullargspec = null; + FunctionSpec.prototype.is_method = false; + FunctionSpec.prototype.input_signature = null; + + FunctionSpec.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.FunctionSpec(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.fullargspec = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + case 2: + message.is_method = reader.bool(); + break; + case 5: + message.input_signature = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + FunctionSpec.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.FunctionSpec(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "fullargspec": + message.fullargspec = $root.tensorflow.StructuredValue.decodeText(reader, true); + break; + case "is_method": + message.is_method = reader.bool(); + break; + case "input_signature": + message.input_signature = $root.tensorflow.StructuredValue.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return FunctionSpec; + })(); + + tensorflow.SavedResource = (function() { + + function SavedResource(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedResource.prototype.device = ""; + + SavedResource.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedResource(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.device = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedResource.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedResource(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "device": + message.device = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedResource; + })(); + + tensorflow.TrackableObjectGraph = (function() { + + function TrackableObjectGraph(properties) { + this.nodes = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TrackableObjectGraph.prototype.nodes = $util.emptyArray; + + TrackableObjectGraph.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TrackableObjectGraph(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.nodes && message.nodes.length)) + message.nodes = []; + message.nodes.push($root.tensorflow.TrackableObjectGraph.TrackableObject.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TrackableObjectGraph.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TrackableObjectGraph(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "nodes": + if (!(message.nodes && message.nodes.length)) + message.nodes = []; + message.nodes.push($root.tensorflow.TrackableObjectGraph.TrackableObject.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TrackableObjectGraph.TrackableObject = (function() { + + function TrackableObject(properties) { + this.children = []; + this.attributes = []; + this.slot_variables = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TrackableObject.prototype.children = $util.emptyArray; + TrackableObject.prototype.attributes = $util.emptyArray; + TrackableObject.prototype.slot_variables = $util.emptyArray; + + TrackableObject.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TrackableObjectGraph.TrackableObject(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.children && message.children.length)) + message.children = []; + message.children.push($root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.decode(reader, reader.uint32())); + break; + case 2: + if (!(message.attributes && message.attributes.length)) + message.attributes = []; + message.attributes.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor.decode(reader, reader.uint32())); + break; + case 3: + if (!(message.slot_variables && message.slot_variables.length)) + message.slot_variables = []; + message.slot_variables.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TrackableObject.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TrackableObjectGraph.TrackableObject(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "children": + if (!(message.children && message.children.length)) + message.children = []; + message.children.push($root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference.decodeText(reader, true)); + break; + case "attributes": + if (!(message.attributes && message.attributes.length)) + message.attributes = []; + message.attributes.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor.decodeText(reader, true)); + break; + case "slot_variables": + if (!(message.slot_variables && message.slot_variables.length)) + message.slot_variables = []; + message.slot_variables.push($root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TrackableObject.ObjectReference = (function() { + + function ObjectReference(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ObjectReference.prototype.node_id = 0; + ObjectReference.prototype.local_name = ""; + + ObjectReference.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.node_id = reader.int32(); + break; + case 2: + message.local_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ObjectReference.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.ObjectReference(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "node_id": + message.node_id = reader.int32(); + break; + case "local_name": + message.local_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ObjectReference; + })(); + + TrackableObject.SerializedTensor = (function() { + + function SerializedTensor(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SerializedTensor.prototype.name = ""; + SerializedTensor.prototype.full_name = ""; + SerializedTensor.prototype.checkpoint_key = ""; + SerializedTensor.prototype.optional_restore = false; + + SerializedTensor.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.full_name = reader.string(); + break; + case 3: + message.checkpoint_key = reader.string(); + break; + case 4: + message.optional_restore = reader.bool(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SerializedTensor.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.SerializedTensor(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "full_name": + message.full_name = reader.string(); + break; + case "checkpoint_key": + message.checkpoint_key = reader.string(); + break; + case "optional_restore": + message.optional_restore = reader.bool(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SerializedTensor; + })(); + + TrackableObject.SlotVariableReference = (function() { + + function SlotVariableReference(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SlotVariableReference.prototype.original_variable_node_id = 0; + SlotVariableReference.prototype.slot_name = ""; + SlotVariableReference.prototype.slot_variable_node_id = 0; + + SlotVariableReference.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.original_variable_node_id = reader.int32(); + break; + case 2: + message.slot_name = reader.string(); + break; + case 3: + message.slot_variable_node_id = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SlotVariableReference.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TrackableObjectGraph.TrackableObject.SlotVariableReference(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "original_variable_node_id": + message.original_variable_node_id = reader.int32(); + break; + case "slot_name": + message.slot_name = reader.string(); + break; + case "slot_variable_node_id": + message.slot_variable_node_id = reader.int32(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SlotVariableReference; + })(); + + return TrackableObject; + })(); + + return TrackableObjectGraph; + })(); + + tensorflow.StructuredValue = (function() { + + function StructuredValue(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + StructuredValue.prototype.none_value = null; + StructuredValue.prototype.float64_value = 0; + StructuredValue.prototype.int64_value = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + StructuredValue.prototype.string_value = ""; + StructuredValue.prototype.bool_value = false; + StructuredValue.prototype.tensor_shape_value = null; + StructuredValue.prototype.tensor_dtype_value = 0; + StructuredValue.prototype.tensor_spec_value = null; + StructuredValue.prototype.type_spec_value = null; + StructuredValue.prototype.bounded_tensor_spec_value = null; + StructuredValue.prototype.list_value = null; + StructuredValue.prototype.tuple_value = null; + StructuredValue.prototype.dict_value = null; + StructuredValue.prototype.named_tuple_value = null; + + var $oneOfFields; + + Object.defineProperty(StructuredValue.prototype, "kind", { + get: $util.oneOfGetter($oneOfFields = ["none_value", "float64_value", "int64_value", "string_value", "bool_value", "tensor_shape_value", "tensor_dtype_value", "tensor_spec_value", "type_spec_value", "bounded_tensor_spec_value", "list_value", "tuple_value", "dict_value", "named_tuple_value"]), + set: $util.oneOfSetter($oneOfFields) + }); + + StructuredValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.StructuredValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.none_value = $root.tensorflow.NoneValue.decode(reader, reader.uint32()); + break; + case 11: + message.float64_value = reader.double(); + break; + case 12: + message.int64_value = reader.sint64(); + break; + case 13: + message.string_value = reader.string(); + break; + case 14: + message.bool_value = reader.bool(); + break; + case 31: + message.tensor_shape_value = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 32: + message.tensor_dtype_value = reader.int32(); + break; + case 33: + message.tensor_spec_value = $root.tensorflow.TensorSpecProto.decode(reader, reader.uint32()); + break; + case 34: + message.type_spec_value = $root.tensorflow.TypeSpecProto.decode(reader, reader.uint32()); + break; + case 35: + message.bounded_tensor_spec_value = $root.tensorflow.BoundedTensorSpecProto.decode(reader, reader.uint32()); + break; + case 51: + message.list_value = $root.tensorflow.ListValue.decode(reader, reader.uint32()); + break; + case 52: + message.tuple_value = $root.tensorflow.TupleValue.decode(reader, reader.uint32()); + break; + case 53: + message.dict_value = $root.tensorflow.DictValue.decode(reader, reader.uint32()); + break; + case 54: + message.named_tuple_value = $root.tensorflow.NamedTupleValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + StructuredValue.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.StructuredValue(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "none_value": + message.none_value = $root.tensorflow.NoneValue.decodeText(reader, true); + break; + case "float64_value": + message.float64_value = reader.double(); + break; + case "int64_value": + message.int64_value = reader.sint64(); + break; + case "string_value": + message.string_value = reader.string(); + break; + case "bool_value": + message.bool_value = reader.bool(); + break; + case "tensor_shape_value": + message.tensor_shape_value = $root.tensorflow.TensorShapeProto.decodeText(reader, true); + break; + case "tensor_dtype_value": + message.tensor_dtype_value = reader.enum($root.tensorflow.DataType); + break; + case "tensor_spec_value": + message.tensor_spec_value = $root.tensorflow.TensorSpecProto.decodeText(reader, true); + break; + case "type_spec_value": + message.type_spec_value = $root.tensorflow.TypeSpecProto.decodeText(reader, true); + break; + case "bounded_tensor_spec_value": + message.bounded_tensor_spec_value = $root.tensorflow.BoundedTensorSpecProto.decodeText(reader, true); + break; + case "list_value": + message.list_value = $root.tensorflow.ListValue.decodeText(reader, true); + break; + case "tuple_value": + message.tuple_value = $root.tensorflow.TupleValue.decodeText(reader, true); + break; + case "dict_value": + message.dict_value = $root.tensorflow.DictValue.decodeText(reader, true); + break; + case "named_tuple_value": + message.named_tuple_value = $root.tensorflow.NamedTupleValue.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return StructuredValue; + })(); + + tensorflow.NoneValue = (function() { + + function NoneValue(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NoneValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.NoneValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NoneValue.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.NoneValue(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return NoneValue; + })(); + + tensorflow.ListValue = (function() { + + function ListValue(properties) { + this.values = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + ListValue.prototype.values = $util.emptyArray; + + ListValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.ListValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.values && message.values.length)) + message.values = []; + message.values.push($root.tensorflow.StructuredValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + ListValue.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.ListValue(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "values": + if (!(message.values && message.values.length)) + message.values = []; + message.values.push($root.tensorflow.StructuredValue.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return ListValue; + })(); + + tensorflow.TupleValue = (function() { + + function TupleValue(properties) { + this.values = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TupleValue.prototype.values = $util.emptyArray; + + TupleValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TupleValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.values && message.values.length)) + message.values = []; + message.values.push($root.tensorflow.StructuredValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TupleValue.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TupleValue(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "values": + if (!(message.values && message.values.length)) + message.values = []; + message.values.push($root.tensorflow.StructuredValue.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return TupleValue; + })(); + + tensorflow.DictValue = (function() { + + function DictValue(properties) { + this.fields = {}; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + DictValue.prototype.fields = $util.emptyObject; + + DictValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.DictValue(), key; + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + reader.skip().pos++; + if (message.fields === $util.emptyObject) + message.fields = {}; + key = reader.string(); + reader.pos++; + message.fields[key] = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + DictValue.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.DictValue(), key, value; + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "fields": + if (message.fields === $util.emptyObject) + message.fields = {}; + reader.start(); + key = ""; + value = null; + while (!reader.end()) + switch (reader.tag()) { + case "key": + key = reader.string(); + break; + case "value": + value = $root.tensorflow.StructuredValue.decodeText(reader, true); + break; + } + message.fields[key] = value; + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return DictValue; + })(); + + tensorflow.PairValue = (function() { + + function PairValue(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + PairValue.prototype.key = ""; + PairValue.prototype.value = null; + + PairValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.PairValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.key = reader.string(); + break; + case 2: + message.value = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + PairValue.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.PairValue(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "key": + message.key = reader.string(); + break; + case "value": + message.value = $root.tensorflow.StructuredValue.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return PairValue; + })(); + + tensorflow.NamedTupleValue = (function() { + + function NamedTupleValue(properties) { + this.values = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + NamedTupleValue.prototype.name = ""; + NamedTupleValue.prototype.values = $util.emptyArray; + + NamedTupleValue.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.NamedTupleValue(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + if (!(message.values && message.values.length)) + message.values = []; + message.values.push($root.tensorflow.PairValue.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + NamedTupleValue.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.NamedTupleValue(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "values": + if (!(message.values && message.values.length)) + message.values = []; + message.values.push($root.tensorflow.PairValue.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return NamedTupleValue; + })(); + + tensorflow.TensorSpecProto = (function() { + + function TensorSpecProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorSpecProto.prototype.name = ""; + TensorSpecProto.prototype.shape = null; + TensorSpecProto.prototype.dtype = 0; + + TensorSpecProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TensorSpecProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.dtype = reader.int32(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorSpecProto.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TensorSpecProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader, true); + break; + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return TensorSpecProto; + })(); + + tensorflow.BoundedTensorSpecProto = (function() { + + function BoundedTensorSpecProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BoundedTensorSpecProto.prototype.name = ""; + BoundedTensorSpecProto.prototype.shape = null; + BoundedTensorSpecProto.prototype.dtype = 0; + BoundedTensorSpecProto.prototype.minimum = null; + BoundedTensorSpecProto.prototype.maximum = null; + + BoundedTensorSpecProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.BoundedTensorSpecProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.dtype = reader.int32(); + break; + case 4: + message.minimum = $root.tensorflow.TensorProto.decode(reader, reader.uint32()); + break; + case 5: + message.maximum = $root.tensorflow.TensorProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BoundedTensorSpecProto.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.BoundedTensorSpecProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader, true); + break; + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "minimum": + message.minimum = $root.tensorflow.TensorProto.decodeText(reader, true); + break; + case "maximum": + message.maximum = $root.tensorflow.TensorProto.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return BoundedTensorSpecProto; + })(); + + tensorflow.TypeSpecProto = (function() { + + function TypeSpecProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TypeSpecProto.prototype.type_spec_class = 0; + TypeSpecProto.prototype.type_state = null; + TypeSpecProto.prototype.type_spec_class_name = ""; + + TypeSpecProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TypeSpecProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_spec_class = reader.int32(); + break; + case 2: + message.type_state = $root.tensorflow.StructuredValue.decode(reader, reader.uint32()); + break; + case 3: + message.type_spec_class_name = reader.string(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TypeSpecProto.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TypeSpecProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "type_spec_class": + message.type_spec_class = reader.enum($root.tensorflow.TypeSpecProto.TypeSpecClass); + break; + case "type_state": + message.type_state = $root.tensorflow.StructuredValue.decodeText(reader, true); + break; + case "type_spec_class_name": + message.type_spec_class_name = reader.string(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TypeSpecProto.TypeSpecClass = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNKNOWN"] = 0; + values[valuesById[1] = "SPARSE_TENSOR_SPEC"] = 1; + values[valuesById[2] = "INDEXED_SLICES_SPEC"] = 2; + values[valuesById[3] = "RAGGED_TENSOR_SPEC"] = 3; + values[valuesById[4] = "TENSOR_ARRAY_SPEC"] = 4; + values[valuesById[5] = "DATA_DATASET_SPEC"] = 5; + values[valuesById[6] = "DATA_ITERATOR_SPEC"] = 6; + values[valuesById[7] = "OPTIONAL_SPEC"] = 7; + values[valuesById[8] = "PER_REPLICA_SPEC"] = 8; + values[valuesById[9] = "VARIABLE_SPEC"] = 9; + values[valuesById[10] = "ROW_PARTITION_SPEC"] = 10; + return values; + })(); + + return TypeSpecProto; + })(); + + tensorflow.BundleHeaderProto = (function() { + + function BundleHeaderProto(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BundleHeaderProto.prototype.num_shards = 0; + BundleHeaderProto.prototype.endianness = 0; + BundleHeaderProto.prototype.version = null; + + BundleHeaderProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.BundleHeaderProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.num_shards = reader.int32(); + break; + case 2: + message.endianness = reader.int32(); + break; + case 3: + message.version = $root.tensorflow.VersionDef.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BundleHeaderProto.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.BundleHeaderProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "num_shards": + message.num_shards = reader.int32(); + break; + case "endianness": + message.endianness = reader.enum($root.tensorflow.BundleHeaderProto.Endianness); + break; + case "version": + message.version = $root.tensorflow.VersionDef.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + BundleHeaderProto.Endianness = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "LITTLE"] = 0; + values[valuesById[1] = "BIG"] = 1; + return values; + })(); + + return BundleHeaderProto; + })(); + + tensorflow.BundleEntryProto = (function() { + + function BundleEntryProto(properties) { + this.slices = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + BundleEntryProto.prototype.dtype = 0; + BundleEntryProto.prototype.shape = null; + BundleEntryProto.prototype.shard_id = 0; + BundleEntryProto.prototype.offset = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + BundleEntryProto.prototype.size = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + BundleEntryProto.prototype.crc32c = 0; + BundleEntryProto.prototype.slices = $util.emptyArray; + + BundleEntryProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.BundleEntryProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.dtype = reader.int32(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.shard_id = reader.int32(); + break; + case 4: + message.offset = reader.int64(); + break; + case 5: + message.size = reader.int64(); + break; + case 6: + message.crc32c = reader.fixed32(); + break; + case 7: + if (!(message.slices && message.slices.length)) + message.slices = []; + message.slices.push($root.tensorflow.TensorSliceProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + BundleEntryProto.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.BundleEntryProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "dtype": + message.dtype = reader.enum($root.tensorflow.DataType); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader, true); + break; + case "shard_id": + message.shard_id = reader.int32(); + break; + case "offset": + message.offset = reader.int64(); + break; + case "size": + message.size = reader.int64(); + break; + case "crc32c": + message.crc32c = reader.fixed32(); + break; + case "slices": + if (!(message.slices && message.slices.length)) + message.slices = []; + message.slices.push($root.tensorflow.TensorSliceProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return BundleEntryProto; + })(); + + tensorflow.TensorSliceProto = (function() { + + function TensorSliceProto(properties) { + this.extent = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + TensorSliceProto.prototype.extent = $util.emptyArray; + + TensorSliceProto.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TensorSliceProto(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.extent && message.extent.length)) + message.extent = []; + message.extent.push($root.tensorflow.TensorSliceProto.Extent.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + TensorSliceProto.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TensorSliceProto(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "extent": + if (!(message.extent && message.extent.length)) + message.extent = []; + message.extent.push($root.tensorflow.TensorSliceProto.Extent.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + TensorSliceProto.Extent = (function() { + + function Extent(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Extent.prototype.start = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Extent.prototype.length = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + var $oneOfFields; + + Object.defineProperty(Extent.prototype, "has_length", { + get: $util.oneOfGetter($oneOfFields = ["length"]), + set: $util.oneOfSetter($oneOfFields) + }); + + Extent.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.TensorSliceProto.Extent(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.start = reader.int64(); + break; + case 2: + message.length = reader.int64(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Extent.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.TensorSliceProto.Extent(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "start": + message.start = reader.int64(); + break; + case "length": + message.length = reader.int64(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Extent; + })(); + + return TensorSliceProto; + })(); + + tensorflow.SavedSliceMeta = (function() { + + function SavedSliceMeta(properties) { + this.slice = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedSliceMeta.prototype.name = ""; + SavedSliceMeta.prototype.shape = null; + SavedSliceMeta.prototype.type = 0; + SavedSliceMeta.prototype.slice = $util.emptyArray; + + SavedSliceMeta.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedSliceMeta(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.shape = $root.tensorflow.TensorShapeProto.decode(reader, reader.uint32()); + break; + case 3: + message.type = reader.int32(); + break; + case 4: + if (!(message.slice && message.slice.length)) + message.slice = []; + message.slice.push($root.tensorflow.TensorSliceProto.decode(reader, reader.uint32())); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedSliceMeta.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedSliceMeta(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "shape": + message.shape = $root.tensorflow.TensorShapeProto.decodeText(reader, true); + break; + case "type": + message.type = reader.enum($root.tensorflow.DataType); + break; + case "slice": + if (!(message.slice && message.slice.length)) + message.slice = []; + message.slice.push($root.tensorflow.TensorSliceProto.decodeText(reader, true)); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedSliceMeta; + })(); + + tensorflow.SavedTensorSliceMeta = (function() { + + function SavedTensorSliceMeta(properties) { + this.tensor = []; + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedTensorSliceMeta.prototype.tensor = $util.emptyArray; + SavedTensorSliceMeta.prototype.versions = null; + + SavedTensorSliceMeta.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedTensorSliceMeta(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (!(message.tensor && message.tensor.length)) + message.tensor = []; + message.tensor.push($root.tensorflow.SavedSliceMeta.decode(reader, reader.uint32())); + break; + case 2: + message.versions = $root.tensorflow.VersionDef.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedTensorSliceMeta.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedTensorSliceMeta(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "tensor": + if (!(message.tensor && message.tensor.length)) + message.tensor = []; + message.tensor.push($root.tensorflow.SavedSliceMeta.decodeText(reader, true)); + break; + case "versions": + message.versions = $root.tensorflow.VersionDef.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedTensorSliceMeta; + })(); + + tensorflow.SavedSlice = (function() { + + function SavedSlice(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedSlice.prototype.name = ""; + SavedSlice.prototype.slice = null; + SavedSlice.prototype.data = null; + + SavedSlice.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedSlice(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.name = reader.string(); + break; + case 2: + message.slice = $root.tensorflow.TensorSliceProto.decode(reader, reader.uint32()); + break; + case 3: + message.data = $root.tensorflow.TensorProto.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedSlice.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedSlice(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "name": + message.name = reader.string(); + break; + case "slice": + message.slice = $root.tensorflow.TensorSliceProto.decodeText(reader, true); + break; + case "data": + message.data = $root.tensorflow.TensorProto.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedSlice; + })(); + + tensorflow.SavedTensorSlices = (function() { + + function SavedTensorSlices(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + SavedTensorSlices.prototype.meta = null; + SavedTensorSlices.prototype.data = null; + + SavedTensorSlices.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.tensorflow.SavedTensorSlices(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.meta = $root.tensorflow.SavedTensorSliceMeta.decode(reader, reader.uint32()); + break; + case 2: + message.data = $root.tensorflow.SavedSlice.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + SavedTensorSlices.decodeText = function decodeText(reader) { + var message = new $root.tensorflow.SavedTensorSlices(); + reader.start(); + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "meta": + message.meta = $root.tensorflow.SavedTensorSliceMeta.decodeText(reader, true); + break; + case "data": + message.data = $root.tensorflow.SavedSlice.decodeText(reader, true); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return SavedTensorSlices; + })(); + + return tensorflow; + })(); + + $root.google = (function() { + + var google = {}; + + google.protobuf = (function() { + + var protobuf = {}; + + protobuf.Any = (function() { + + function Any(properties) { + if (properties) + for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + Any.prototype.type_url = ""; + Any.prototype.value = $util.newBuffer([]); + + Any.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + var end = length === undefined ? reader.len : reader.pos + length, message = new $root.google.protobuf.Any(); + while (reader.pos < end) { + var tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + message.type_url = reader.string(); + break; + case 2: + message.value = reader.bytes(); + break; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + Any.decodeText = function decodeText(reader) { + var message = new $root.google.protobuf.Any(); + reader.start(); + if (reader.any(message)) + return message; + while (!reader.end()) { + var tag = reader.tag(); + switch (tag) { + case "type_url": + message.type_url = reader.string(); + break; + case "value": + message.value = reader.bytes(); + break; + default: + reader.field(tag, message); + break; + } + } + return message; + }; + + return Any; + })(); + + return protobuf; + })(); + + return google; + })(); + + return $root; +})(protobuf); diff --git a/frontend/packages/core/public/netron/tf.js b/frontend/packages/core/public/netron/tf.js new file mode 100644 index 00000000..d72aa3d6 --- /dev/null +++ b/frontend/packages/core/public/netron/tf.js @@ -0,0 +1,1682 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +// Experimental + +var tf = tf || {}; +var long = long || { Long: require('long') }; +var protobuf = protobuf || require('protobufjs'); +var prototxt = prototxt || require('protobufjs/ext/prototxt'); + +tf.ModelFactory = class { + + match(context) { + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + if (extension === 'meta') { + const tags = context.tags('pb'); + if (tags.size !== 0) { + return true; + } + } + if (extension === 'pbtxt' || extension === 'prototxt') { + if (identifier.endsWith('predict_net.pbtxt') || identifier.endsWith('predict_net.prototxt') || + identifier.endsWith('init_net.pbtxt') || identifier.endsWith('init_net.prototxt')) { + return false; + } + const tags = context.tags('pbtxt'); + if (tags.has('input_stream') || tags.has('output_stream')) { + return false; + } + if (tags.has('node') || tags.has('saved_model_schema_version') || tags.has('meta_graphs') || tags.has('graph_def')) { + return true; + } + } + if (extension === 'pb' || extension === 'pbtxt' || extension === 'prototxt') { + if (identifier.endsWith('predict_net.pb') || identifier.endsWith('init_net.pb')) { + return false; + } + if (identifier == 'tfhub_module.pb') { + const buffer = context.buffer; + if (buffer && buffer.length == 2 && buffer[0] == 0x08 && buffer[1] == 0x03) { + return false; + } + } + const tags = context.tags('pb'); + if (tags.size === 0) { + const tags = context.tags('pbtxt'); + if (tags.has('input_stream') || tags.has('output_stream')) { + return false; + } + if (tags.has('node') || tags.has('saved_model_schema_version') || tags.has('meta_graphs') || tags.has('graph_def')) { + return true; + } + } + else { + // ignore input_0.pb, output_0.pb + if (tags.has(1) && tags.get(1) === 0 && + tags.has(2) && tags.get(2) === 0 && + tags.has(9) && tags.get(9) === 2) { + return false; + } + if (!Array.from(tags.values()).some((v) => v === 5)) { + return true; + } + } + } + if (extension === 'json') { + try { + const root = JSON.parse(context.text); + if (root && root.format && root.format === 'graph-model' && root.modelTopology) { + return true; + } + } + catch (err) { + // continue regardless of error + } + } + if (extension === 'index' || extension === 'ckpt') { + if (context.buffer.length > 8) { + const buffer = context.buffer.subarray(context.buffer.length - 8, context.buffer.length); + const signature = [ 0x57, 0xfb, 0x80, 0x8b, 0x24, 0x75, 0x47, 0xdb ]; + if (buffer.every((value, index) => value === signature[index])) { + return true; + } + } + } + return false; + } + + open(context, host) { + return host.require('./tf-proto').then(() => { + tf.proto = protobuf.roots.tf.tensorflow; + let saved_model = null; + let format = null; + let producer = null; + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + switch (extension) { + case 'ckpt': + case 'index': { + return tf.ModelFactory._openBundle(context, host); + } + case 'json': { + try { + const root = JSON.parse(context.text); + let graph_def = new tf.proto.GraphDef(); + let meta_graph = new tf.proto.MetaGraphDef(); + meta_graph.graph_def = graph_def; + saved_model = new tf.proto.SavedModel(); + saved_model.meta_graphs.push(meta_graph); + for (const node of root.modelTopology.node) { + graph_def.node.push(node); + node.input = node.input || []; + } + format = 'TensorFlow.js ' + root.format; + producer = root.convertedBy || root.generatedBy || ''; + } + catch (error) { + throw new tf.Error("File text format is not TensorFlow.js graph-model (" + error.message + ") in '" + identifier + "'."); + } + break; + } + default: { + const tags = context.tags('pbtxt'); + if (tags.has('node') || tags.has('saved_model_schema_version') || tags.has('meta_graphs') || tags.has('graph_def')) { + if (tags.has('saved_model_schema_version') || tags.has('meta_graphs')) { + try { + if (identifier.endsWith('saved_model.pbtxt') || identifier.endsWith('saved_model.prototxt')) { + saved_model = tf.proto.SavedModel.decodeText(prototxt.TextReader.create(context.text)); + format = 'TensorFlow Saved Model'; + if (saved_model && Object.prototype.hasOwnProperty.call(saved_model, 'saved_model_schema_version')) { + format = format + ' v' + saved_model.saved_model_schema_version.toString(); + } + } + } + catch (error) { + throw new tf.Error("File text format is not tensorflow.SavedModel (" + error.message + ") in '" + identifier + "'."); + } + } + else if (tags.has('graph_def')) { + try { + if (!saved_model) { + const meta_graph = tf.proto.MetaGraphDef.decodeText(prototxt.TextReader.create(context.text)); + saved_model = new tf.proto.SavedModel(); + saved_model.meta_graphs.push(meta_graph); + format = 'TensorFlow MetaGraph'; + } + } + catch (error) { + throw new tf.Error("File text format is not tensorflow.MetaGraphDef (" + error.message + ") in '" + identifier + "'."); + } + } + else if (tags.has('node')) { + try { + const graph_def = tf.proto.GraphDef.decodeText(prototxt.TextReader.create(context.text)); + let meta_graph = new tf.proto.MetaGraphDef(); + meta_graph.graph_def = graph_def; + saved_model = new tf.proto.SavedModel(); + saved_model.meta_graphs.push(meta_graph); + format = 'TensorFlow Graph'; + } + catch (error) { + throw new tf.Error("File text format is not tensorflow.GraphDef (" + error.message + ") in '" + identifier + "'."); + } + } + } + else { + try { + if (identifier.endsWith('saved_model.pb')) { + saved_model = tf.proto.SavedModel.decode(context.buffer); + format = 'TensorFlow Saved Model'; + if (saved_model && Object.prototype.hasOwnProperty.call(saved_model, 'saved_model_schema_version')) { + format = format + ' v' + saved_model.saved_model_schema_version.toString(); + } + } + } + catch (error) { + let buffer = context.buffer; + if (buffer.length > 3 && buffer[0] == 0x08 && buffer[1] == 0x01 && buffer[2] == 0x12) { + throw new tf.Error("File format is not tensorflow.SavedModel (" + error.message + ") in '" + identifier + "'."); + } + } + try { + if (!saved_model && extension == 'meta') { + const meta_graph = tf.proto.MetaGraphDef.decode(context.buffer); + saved_model = new tf.proto.SavedModel(); + saved_model.meta_graphs.push(meta_graph); + format = 'TensorFlow MetaGraph'; + } + } + catch (error) { + throw new tf.Error("File format is not tensorflow.MetaGraphDef (" + error.message + ") in '" + identifier + "'."); + } + try { + if (!saved_model) { + const graph_def = tf.proto.GraphDef.decode(context.buffer); + let meta_graph = new tf.proto.MetaGraphDef(); + meta_graph.graph_def = graph_def; + saved_model = new tf.proto.SavedModel(); + saved_model.meta_graphs.push(meta_graph); + format = 'TensorFlow Graph'; + } + } + catch (error) { + throw new tf.Error("File format is not tensorflow.GraphDef (" + error.message + ") in '" + identifier + "'."); + } + } + if (saved_model && saved_model.meta_graphs && saved_model.meta_graphs.length > 0 && + saved_model.meta_graphs[0].meta_info_def && + Object.prototype.hasOwnProperty.call(saved_model.meta_graphs[0].meta_info_def, 'tensorflow_version')) { + producer = 'TensorFlow v' + saved_model.meta_graphs[0].meta_info_def.tensorflow_version; + } + break; + } + } + + return tf.Metadata.open(host).then((metadata) => { + if (saved_model.meta_graphs.length === 1 && + saved_model.meta_graphs[0].object_graph_def && + saved_model.meta_graphs[0].object_graph_def.nodes && + saved_model.meta_graphs[0].object_graph_def.nodes.length > 0) { + const identifier = 'variables/variables.index'; + return context.request(identifier, null).then((buffer) => { + return tf.TensorBundle.open(buffer, identifier, context, host).then((bundle) => { + return tf.ModelFactory._openModel(identifier, host, metadata, saved_model, format, producer, bundle); + }); + }).catch(() => { + return tf.ModelFactory._openModel(identifier, host, metadata, saved_model, format, producer, null); + }); + } + return tf.ModelFactory._openModel(identifier, host, metadata, saved_model, format, producer, null); + }); + }); + } + + static _openModel(identifier, host, metadata, saved_model, format, producer, bundle) { + try { + return new tf.Model(metadata, saved_model, format, producer, bundle); + } + catch (error) { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new tf.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + } + + static _openBundle(context, host) { + return tf.Metadata.open(host).then((metadata) => { + const identifier = context.identifier; + return tf.TensorBundle.open(context.buffer, identifier, context, host).then((bundle) => { + return new tf.Model(metadata, null, 'TensorFlow Tensor Bundle v' + bundle.format.toString(), null, bundle); + }).catch((error) => { + host.exception(error, false); + const message = error && error.message ? error.message : error.toString(); + throw new tf.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + }); + }); + } +}; + +tf.Model = class { + + constructor(metadata, model, format, producer, bundle) { + this._format = format; + this._producer = producer || ''; + this._graphs = []; + if (model) { + for (let i = 0; i < model.meta_graphs.length; i++) { + const metaGraph = model.meta_graphs[i]; + let name = null; + if (metaGraph.any_info) { + name = metaGraph.any_info.toString(); + } + else if (model.meta_graphs.length > 1) { + name = i.toString(); + } + else { + name = '-'; + } + this._graphs.push(new tf.Graph(metadata, metaGraph, name, bundle)); + } + // Recursively add all subgraphs. + let visited_graph = []; + let pending_graphs = [...this._graphs]; + while (pending_graphs.length > 0) { + let g = pending_graphs.shift(); + visited_graph.push(g); + for (let f of g.functions) { + pending_graphs.push(f); + } + } + this._graphs = visited_graph; + } + else { + this._graphs.push(new tf.Graph(metadata, null, '', bundle)); + } + + } + + get format() { + return this._format; + } + + get producer() { + return this._producer; + } + + get description() { + return null; + } + + get graphs() { + return this._graphs; + } +}; + +tf.Graph = class { + + constructor(metadata, metaGraph, name, bundle) { + this._metadata = metadata; + this._version = null; + this._name = name; + this._inputs = []; + this._outputs = []; + this._nodes = []; + this._functions = []; + + if (metaGraph && metaGraph.graph_def) { + this._metadata = new tf.GraphMetadata(metadata, metaGraph.meta_info_def); + const graph = metaGraph.graph_def; + if (graph.versions) { + this._version = 'v' + graph.versions.producer.toString(); + } + else if (graph.version) { + this._version = graph.version; + } + else if (metaGraph.meta_info_def && metaGraph.meta_info_def.tensorflow_version) { + this._version = metaGraph.meta_info_def.tensorflow_version; + } + if (metaGraph.meta_info_def && metaGraph.meta_info_def.tags) { + this._tags = metaGraph.meta_info_def.tags.join(', '); + } + const nodes = graph.node; + if (nodes) { + let nodeMap = {}; + this._namespaces = {}; + for (const node of nodes) { + const nodeName = node.name; + nodeMap[nodeName] = node; + if (node.op != 'Const') { + let lastIndex = nodeName.lastIndexOf('/'); + if (lastIndex != -1) { + let namespace = nodeName.substring(0, lastIndex); + this._namespaces[namespace] = true; + } + } + node.output = []; + } + for (const node of nodes) { + const inputs = node.input; + node.input = []; + node.controlDependencies = []; + for (const input of inputs) { + let split = input.split(':', 2); + let inputName = split[0]; + let outputIndex = split.length == 1 ? 0 : parseInt(split[1]); + let outputName = inputName.startsWith('^') ? inputName.substring(1) : inputName; + let outputNode = nodeMap[outputName]; + outputName = outputIndex == 0 ? outputName : outputName + ':' + outputIndex.toString(); + if (inputName.startsWith('^')) { + node.controlDependencies.push(outputName); + } + else { + node.input.push(outputName); + } + if (outputNode) { + for (let j = outputNode.output.length; j <= outputIndex; j++) { + outputNode.output.push(''); + } + outputNode.output[outputIndex] = outputName; + } + } + } + this._nodeOutputCountMap = {}; + for (const node of nodes) { + for (const input of node.input) { + this._nodeOutputCountMap[input] = (this._nodeOutputCountMap[input] || 0) + 1; + } + for (const controlDependency of node.controlDependencies) { + this._nodeOutputCountMap[controlDependency] = (this._nodeOutputCountMap[controlDependency] || 0) + 1; + } + } + let initializers = {}; + for (const node of nodes) { + if (node.op == 'Const' && node.input.length == 0 && node.controlDependencies.length == 0 && this._checkSingleOutput(node)) { + let value = node.attr.value; + if (value && Object.prototype.hasOwnProperty.call(value, 'tensor')) { + let output = node.output[0]; + if (output) { + initializers[output] = new tf.Tensor(value.tensor, node.name, 'Constant'); + } + } + } + } + for (const node of nodes) { + if (node.op == 'Identity' && node.input.length == 1 && node.controlDependencies.length == 0 && this._checkSingleOutput(node)) { + let initializer_name = node.input[0]; + let initializer = initializers[initializer_name]; + if (initializer) { + initializers[initializer_name] = "-"; + initializer.kind = 'Identity Constant'; + initializers[node.output[0]] = initializer; + } + } + } + let inputMap = {}; + for (const node of nodes) { + if (node.op == 'Placeholder' && node.input.length == 0 && node.controlDependencies.length == 0 && node.output.length == 1) { + const dtype = node.attr.dtype; + const shape = node.attr.shape; + if (dtype && dtype.type && shape && shape.shape) { + const type = new tf.TensorType(dtype.type, shape.shape); + const argument = new tf.Argument(node.output[0], type, null); + inputMap[node.output[0]] = new tf.Parameter(node.name, [ argument ]); + } + } + } + this._inputs = Object.keys(inputMap).map((key) => { + return inputMap[key]; + }); + for (const node of nodes) { + let id = node.name; + if (!initializers[id] && !inputMap[id] /* && node.op != 'NoOp' */) { + this._nodes.push(new tf.Node(this, node, node.op, node.name, initializers, null)); + } + } + } + + if (graph.library) { + const funcs = graph.library.function; + for (const func of funcs) { + this._functions.push(new tf.Function(this, func, this._metadata)); + } + } + } + else if (bundle) { + let nodeNames = []; + let nodeMap = new Map(); + for (const tensor of bundle.tensors) { + let parts = tensor.name.split('/'); + if (bundle.format === 2) { + if (tensor.name === '_CHECKPOINTABLE_OBJECT_GRAPH' || + tensor.name.startsWith('optimizer/') || + tensor.name.startsWith('keras_api/metrics/') || + tensor.name.endsWith('/ExponentialMovingAverage') || + tensor.name.indexOf('.OPTIMIZER_SLOT') !== -1) { + continue; + } + if (tensor.name.endsWith('/.ATTRIBUTES/VARIABLE_VALUE')) { + parts.pop(); + parts.pop(); + } + } + let tensorName = parts.pop(); + let nodeName = parts.join('/'); + if (!nodeMap.has(nodeName)) { + nodeNames.push(nodeName); + nodeMap.set(nodeName, []); + } + nodeMap.get(nodeName).push({ name: tensorName, value: tensor }); + } + for (const nodeName of nodeNames) { + this._nodes.push(new tf.Node(this, null, 'Node', nodeName, null, nodeMap.get(nodeName))); + } + } + } + + get name() { + return this._name; + } + + get version() { + return this._version; + } + + get tags() { + return this._tags; + } + + get groups() { + return false; + // TODO return true; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + get metadata() { + return this._metadata; + } + + get namespaces() { + return this._namespaces; + } + + get functions() { + return this._functions; + } + + _checkSingleOutput(node) { + if (node.output.length != 1) { + return false; + } + const output = node.output[0]; + const count = this._nodeOutputCountMap[output]; + if (count != 1) { + return false; + } + return true; + } +}; + +tf.Parameter = class { + + constructor(name, args) { + this._name = name; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return true; + } + + get arguments() { + return this._arguments; + } +}; + +tf.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new tf.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type || null; + this._initializer = initializer || null; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +tf.Function = class { + + constructor(graph, func, metadata) { + this._name = func.signature.name; + this._version = null; + this._tags = null; + this._inputs = []; + this._outputs = []; + this._nodes = []; + this._metadata = metadata; + this._namespaces = {}; + this._functions = []; + + let inputs = func.signature.input_arg; + if (inputs) { + for (const input of inputs) { + let inputArgument = new tf.Argument(input.name, new tf.TensorType(input.type, null), null); + this._inputs.push(new tf.Parameter(input.name, [ inputArgument ])); + } + } + + let ret_map = {}; + for (const key of Object.keys(func.ret)) { + const v = func.ret[key].split(':', 2); + ret_map[key] = v[0]; + } + + let out_args_reverse_map = {}; + let outputs = func.signature.output_arg; + if (outputs) { + for (const output of outputs) { + let name = ret_map[output.name]; + this._outputs.push(new tf.Parameter(output.name, [ + new tf.Argument(name, new tf.TensorType(output.type, null), null) + ])); + out_args_reverse_map[name] = output.name; + } + } + + let nodes = func.node_def; + if (nodes) { + let nodeMap = {}; + + for (const node of nodes) { + let nodeName = node.name; + nodeMap[nodeName] = node; + if (node.op != 'Const') { + let lastIndex = nodeName.lastIndexOf('/'); + if (lastIndex != -1) { + let namespace = nodeName.substring(0, lastIndex); + this._namespaces[namespace] = true; + } + } + node.output = []; + } + for (const node of nodes) { + let inputs = node.input; + node.input = []; + node.controlDependencies = []; + for (const input of inputs) { + let split = input.split(':', 3); + let inputName = split[0]; + let outputIndex = split.length == 1 ? 0 : parseInt(split[split.length - 1]); + let outputName = inputName.startsWith('^') ? inputName.substring(1) : inputName; + let outputNode = nodeMap[outputName]; + outputName = outputIndex == 0 ? outputName : outputName + ':' + outputIndex.toString(); + if (inputName.startsWith('^')) { + node.controlDependencies.push(outputName); + } + else { + node.input.push(outputName); + } + if (outputNode) { + for (let j = outputNode.output.length; j <= outputIndex; j++) { + outputNode.output.push(''); + } + outputNode.output[outputIndex] = outputName; + } + } + + if (out_args_reverse_map[node.name]) { + node.output.push(node.name); + } + } + + let nodeOutputCountMap = {}; + for (const node of nodes) { + for (const input of node.input) { + nodeOutputCountMap[input] = (nodeOutputCountMap[input] || 0) + 1; + } + for (const controlDependency of node.controlDependencies) { + nodeOutputCountMap[controlDependency] = (nodeOutputCountMap[controlDependency] || 0) + 1; + } + } + + let initializers = {}; + for (const node of nodes) { + if (node.op == 'Const' && node.input.length == 0 && node.controlDependencies.length == 0 && tf.Function._checkSingleOutput(node, nodeOutputCountMap)) { + let value = node.attr.value; + if (value && Object.prototype.hasOwnProperty.call(value, 'tensor')) { + let output = node.output[0]; + if (output) { + initializers[output] = new tf.Tensor(value.tensor, node.name, 'Constant'); + } + } + } + } + for (const node of nodes) { + if (node.op == 'Identity' && node.input.length == 1 && node.controlDependencies.length == 0 && tf.Function._checkSingleOutput(node, nodeOutputCountMap)) { + let initializer_name = node.input[0]; + let initializer = initializers[initializer_name]; + if (initializer) { + initializers[initializer_name] = "-"; + initializer.kind = 'Identity Constant'; + initializers[node.output[0]] = initializer; + } + } + } + + for (const node of nodes) { + if (!initializers[node.name]) + this._nodes.push(new tf.Node(this, node, node.op, node.name, initializers, null)); + } + } + } + + get name() { + return this._name; + } + + get version() { + return this._version; + } + + get tags() { + return this._tags; + } + + get groups() { + return false; + // TODO return true; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + get metadata() { + return this._metadata; + } + + get namespaces() { + return this._namespaces; + } + + get functions() { + return this._functions; + } + + static _checkSingleOutput(node, nodeOutputCountMap) { + if (node.output.length != 1) { + return false; + } + let output = node.output[0]; + let count = nodeOutputCountMap[output]; + if (count != 1) { + return false; + } + return true; + } +}; + +tf.Node = class { + + constructor(graph, node, op, name, initializers, tensors) { + this._graph = graph; + this._type = op; + this._name = name; + this._attributes = []; + this._inputs = []; + this._outputs = []; + if (node) { + if (Object.prototype.hasOwnProperty.call(node, 'device')) { + this._device = node.device; + } + const metadata = graph.metadata; + if (node.attr) { + for (const attributeName of Object.keys(node.attr)) { + const schema = metadata.attribute(this._type, attributeName); + const visible = metadata.getAttributeVisibleMap(this._type)[attributeName] ? false : true; + this._attributes.push(new tf.Attribute(schema, attributeName, node.attr[attributeName], visible)); + } + } + const schema = metadata.type(this._type); + let inputIndex = 0; + let inputs = node.input.filter(input => !input.startsWith('^')); + if (schema && schema.inputs) { + for (const input of schema.inputs) { + let inputCount = 1; + if (input.numberAttr) { + let inputNumber = node.attr[input.numberAttr]; + if (inputNumber && inputNumber.i) { + inputCount = inputNumber.i; + } + } + else if (input.typeListAttr) { + let inputTypeListAttr = node.attr[input.typeListAttr]; + if (inputTypeListAttr && inputTypeListAttr.list && inputTypeListAttr.list.type) { + inputCount = inputTypeListAttr.list.type.length; + } + } + let inputArguments = inputs.slice(inputIndex, inputIndex + inputCount).map((id) => { + return new tf.Argument(id, null, initializers[id]); + }); + this._inputs.push(new tf.Parameter(input.name, inputArguments)); + inputIndex += inputCount; + } + } + this._inputs = this._inputs.concat(inputs.slice(inputIndex).map((input, index) => { + return new tf.Parameter((inputIndex + index).toString(), [ + new tf.Argument(input, null, initializers[input]) + ]); + })); + let outputIndex = 0; + let outputs = node.output; + if (schema && schema.outputs) { + for (const output of schema.outputs) { + let outputCount = 1; + if (output.numberAttr) { + let outputNumber = node.attr[output.numberAttr]; + if (outputNumber && outputNumber.i) { + outputCount = outputNumber.i; + } + } + else if (output.typeListAttr) { + let outputTypeListAttr = node.attr[output.typeListAttr]; + if (outputTypeListAttr && outputTypeListAttr.list && outputTypeListAttr.list.type) { + outputCount = outputTypeListAttr.list.type.length; + } + } + let outputArguments = outputs.slice(outputIndex, outputIndex + outputCount).map((id) => { + return new tf.Argument(id, null, null); + }); + this._outputs.push(new tf.Parameter(output.name, outputArguments)); + outputIndex += outputCount; + } + } + this._outputs = this._outputs.concat(outputs.slice(outputIndex).map((output, index) => { + return new tf.Parameter((outputIndex + index).toString(), [ + new tf.Argument(output, null, null) + ]); + })); + this._controlDependencies = node.controlDependencies; + } + else if (tensors) { + for (const tensor of tensors) { + this._inputs.push(new tf.Parameter(tensor.name, [ + new tf.Argument(tensor.value.name, null, tensor.value) + ])); + } + } + } + + get type() { + return this._type; + } + + get name() { + return this._name; + } + + get device() { + return this._device || null; + } + + get group() { + const name = this._name; + if (this._graph.namespaces[name]) { + return name; + } + let lastIndex = name.lastIndexOf('/'); + if (lastIndex != -1) { + let namespace = name.substring(0, lastIndex); + if (this._graph.namespaces[namespace]) { + return namespace; + } + } + return ''; + } + + get description() { + return ''; + } + + get domain() { + return null; + } + + get metadata() { + return this._graph.metadata.type(this.type); + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get controlDependencies() { + return this._controlDependencies; + } + + get attributes() { + return this._attributes; + } +}; + +tf.Attribute = class { + + constructor(schema, name, value, visible) { + this._name = name; + this._value = null; + this._type = null; + if (Object.prototype.hasOwnProperty.call(value, 'tensor')) { + this._type = 'tensor'; + this._value = new tf.Tensor(value.tensor); + } + else if (schema && schema.type) { + this._type = schema.type; + } + if (Object.prototype.hasOwnProperty.call(value, 'type')) { + this._type = 'type'; + this._value = tf.Tensor.formatDataType(value.type); + } + else if (Object.prototype.hasOwnProperty.call(value, 'i')) { + this._value = value.i; + } + else if (Object.prototype.hasOwnProperty.call(value, 'f')) { + this._value = value.f; + } + else if (Object.prototype.hasOwnProperty.call(value, 'b')) { + this._value = value.b; + } + else if (Object.prototype.hasOwnProperty.call(value, 'shape')) { + this._type = 'shape'; + this._value = new tf.TensorShape(value.shape); + } + else if (Object.prototype.hasOwnProperty.call(value, 's')) { + if (typeof value.s === 'string') { + this._value = value.s; + } + else if (ArrayBuffer.isView(value.s)) { + this._value = (value.s.length === 0) ? '' : (value.s.filter(c => c <= 32 && c >= 128).length === 0) ? tf.Metadata.textDecoder.decode(value.s) : Array.from(value.s); + } + else { + this._value = value.s; + } + } + else if (Object.prototype.hasOwnProperty.call(value, 'list')) { + let list = value.list; + if (list.s && list.s.length > 0) { + this._value = list.s.map((s) => { + if (typeof s === 'string') { + return s; + } + else if (ArrayBuffer.isView(s)) { + return (s.length === 0) ? '' : (s.filter(c => c <= 32 && c >= 128).length === 0) ? tf.Metadata.textDecoder.decode(s) : Array.from(s); + } + else { + return s; + } + }); + } + else if (list.i && list.i.length > 0) { + this._value = list.i; + } + else if (list.f && list.f.length > 0) { + this._value = list.f; + } + else if (list.type && list.type.length > 0) { + this._type = 'type[]'; + this._value = list.type.map((type) => tf.Tensor.formatDataType(type)); + } + else if (list.shape && list.shape.length > 0) { + this._type = 'shape[]'; + this._value = list.shape.map((shape) => new tf.TensorShape(shape)); + } + else { + this._value = []; + } + } + else if (Object.prototype.hasOwnProperty.call(value, 'func')) { + const func = value.func; + this._type = 'function'; + this._value = func.name; + } + + if (schema) { + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + if (!Array.isArray(this._value) || Array.isArray(schema.default) || this._value.length === schema.default.length) { + let value = this._value; + let defaultValue = schema.default; + if (this._type === 'float32') { + let temp = new Float32Array(1); + temp[0] = value; + value = temp[0]; + temp[0] = defaultValue; + defaultValue = temp[0]; + } + const valueText = tf.GraphMetadata._formatAttributeValue(value); + const defaultValueText = tf.GraphMetadata._formatAttributeValue(defaultValue); + if (JSON.stringify(valueText) == JSON.stringify(defaultValueText)) { + this._visible = false; + } + } + } + } + if (name == '_output_shapes') { + this._visible = false; + this._type = 'shape[]'; + } + if (name == '_class') { + this._visible = false; + } + if (visible === false) { + this._visible = false; + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +tf.Tensor = class { + + constructor(tensor, name, kind) { + this._type = new tf.TensorType(tensor.dtype, tensor.tensor_shape || tensor.tensorShape); + this._name = name; + this._kind = kind || null; + this._tensor = tensor; + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get kind() { + return this._kind; + } + + set kind(value) { + this._kind = value; + } + + get state() { + return this._context().state; + } + + get value() { + let context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + let context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + let context = {}; + context.state = null; + context.index = 0; + context.count = 0; + context.size = 1; + + if (!this._tensor.dtype) { + context.state = 'Tensor has no data type.'; + return context; + } + const shape = this._tensor.tensor_shape || this._tensor.tensorShape; + if (!shape || !shape.dim) { + context.state = 'Tensor has no dimensions.'; + return context; + } + + for (const dim of shape.dim) { + context.size = context.size * (dim.size ? dim.size : 0); + } + + switch (this._tensor.dtype) { + case 'DT_FLOAT': + case tf.proto.DataType.DT_FLOAT: + if (this._tensor.tensor_content && this._tensor.tensor_content.length > 0) { + context.rawData = new DataView(this._tensor.tensor_content.buffer, this._tensor.tensor_content.byteOffset, this._tensor.tensor_content.byteLength); + } + else if (this._tensor.float_val && this._tensor.float_val.length == context.size) { + context.data = this._tensor.float_val; + } + else { + context.state = 'Tensor data is empty.'; + } + break; + case tf.proto.DataType.DT_QINT8: + case tf.proto.DataType.DT_QUINT8: + if (this._tensor.tensor_content && this._tensor.tensor_content.length > 0) { + context.rawData = new DataView(this._tensor.tensor_content.buffer, this._tensor.tensor_content.byteOffset, this._tensor.tensor_content.byteLength); + } + else { + context.state = 'Tensor data is empty.'; + } + break; + case tf.proto.DataType.DT_INT32: + case tf.proto.DataType.DT_UINT32: + if (this._tensor.tensor_content && this._tensor.tensor_content.length > 0) { + context.rawData = new DataView(this._tensor.tensor_content.buffer, this._tensor.tensor_content.byteOffset, this._tensor.tensor_content.byteLength); + } + else if (this._tensor.int_val && this._tensor.int_val.length == context.size) { + context.data = this._tensor.int_val; + } + else { + context.state = 'Tensor data is empty.'; + } + break; + case tf.proto.DataType.DT_STRING: + if (this._tensor.tensor_content && this._tensor.tensor_content.length > 0) { + context.state = 'Tensor data type is not implemented.'; + } + else if (this._tensor.string_val && this._tensor.string_val.length == context.size) { + context.data = this._tensor.string_val; + } + else { + context.state = 'Tensor data is empty.'; + } + break; + case tf.proto.DataType.DT_BOOL: + context.state = "Tensor data type 'bool' is not implemented."; + break; + default: + context.state = "Tensor data type '" + this._tensor.dtype + "' is not implemented."; + break; + } + + context.shape = shape.dim.map((dim) => dim.size); + return context; + } + + _decode(context, dimension) { + let shape = context.shape; + if (shape.length == 0) { + shape = [ 1 ]; + } + let results = []; + const size = shape[dimension]; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + if (context.data) { + results.push(this._decodeDataValue(context)); + context.count++; + } + else { + if (context.rawData) { + switch (this._tensor.dtype) { + case tf.proto.DataType.DT_FLOAT: + results.push(context.rawData.getFloat32(context.index, true)); + context.index += 4; + context.count++; + break; + case tf.proto.DataType.DT_INT32: + results.push(context.rawData.getInt32(context.index, true)); + context.index += 4; + context.count++; + break; + case tf.proto.DataType.DT_UINT32: + results.push(context.rawData.getUInt32(context.index, true)); + context.index += 4; + context.count++; + break; + case tf.proto.DataType.DT_QINT8: + results.push(context.rawData.getInt8(context.index, true)); + context.index += 1; + context.count++; + break; + case tf.proto.DataType.DT_QUINT8: + results.push(context.rawData.getUint8(context.index, true)); + context.index += 1; + context.count++; + break; + } + } + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1, shape)); + } + } + if (context.shape.length == 0) { + return results[0]; + } + return results; + } + + _decodeDataValue(context) { + const value = context.data[context.index++]; + if (this._tensor.dtype == tf.proto.DataType.DT_STRING) { + return tf.Metadata.textDecoder.decode(value); + } + return value; + } + + static formatDataType(type) { + if (!tf.Tensor.dataType) { + tf.Tensor.dataType = {}; + for (let key of Object.keys(tf.proto.DataType)) { + const value = tf.proto.DataType[key]; + key = key.startsWith('DT_') ? key.substring(3) : key; + tf.Tensor.dataType[value] = key.toLowerCase(); + } + tf.Tensor.dataType[tf.proto.DataType.DT_HALF] = 'float16'; + tf.Tensor.dataType[tf.proto.DataType.DT_FLOAT] = 'float32'; + tf.Tensor.dataType[tf.proto.DataType.DT_DOUBLE] = 'float64'; + tf.Tensor.dataType['DT_FLOAT'] = 'float32'; + } + return tf.Tensor.dataType[type] || '?'; + } +}; + +tf.TensorType = class { + + constructor(dtype, shape) { + this._dtype = dtype; + this._shape = new tf.TensorShape(shape); + } + + get dataType() { + return this._dtype ? tf.Tensor.formatDataType(this._dtype) : '?'; + } + + get shape() { + return this._shape; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +tf.TensorShape = class { + + constructor(shape) { + this._shape = shape; + } + + get dimensions() { + if (this._shape && this._shape.dim) { + if (this._shape.unknown_rank) { + return null; + } + if (this._shape.dim.length == 0) { + return []; + } + if (this._shape.dim.length == 1 && !this._shape.dim[0].size) { + return [ 0 ]; + } + return this._shape.dim.map((dim) => (dim.size && dim.size != -1) ? dim.size : '?'); + } + return null; + } + + toString() { + if (this._shape && this._shape.dim) { + if (this._shape.unknown_rank) { + return '[-]'; + } + if (this._shape.dim.length == 0) { + return ''; + } + if (this._shape.dim.length == 1 && !this._shape.dim[0].size) { + return '[0]'; + } + return '[' + this._shape.dim.map((dim) => (dim.size && dim.size != -1) ? dim.size.toString() : '?').join(',') + ']'; + } + return '?'; + } +}; + +tf.TensorBundle = class { + + static open(buffer, identifier, context, host) { + const format = !identifier.toLowerCase().endsWith('.index') ? 1 : 2; + if (buffer.length <= 48) { + throw new tf.Error('Invalid index file size.'); + } + let reader = new tf.TensorBundle.BinaryReader(buffer, host); + reader.seek(-8); + const signature = [ 0x57, 0xfb, 0x80, 0x8b, 0x24, 0x75, 0x47, 0xdb ]; + if (!reader.bytes(8).every((value, index) => value === signature[index])) { + throw new tf.Error('Invalid table signature.'); + } + reader.seek(-48); + reader.varint64(); // metaindex offset + reader.varint64(); // metaindex size + const indexOffset = reader.varint64(); + const indexSize = reader.varint64(); + reader.seek(indexOffset); + let indexData = reader.bytes(indexSize); + let indexCompression = reader.byte(); + if (indexCompression !== 0) { // kNoCompression + throw new tf.Error("Unsupported block compression '" + indexCompression + "'."); + } + let indexReader = new tf.TensorBundle.BinaryReader(indexData); + indexReader.seek(-4); + const numRestarts = indexReader.int32(); + indexReader.seek(-4 - (4 * numRestarts)); + let restartOffsets = []; + for (let i = 0; i < numRestarts; i++) { + restartOffsets.push(indexReader.int32()); + } + const textDecoder = new TextDecoder(); + let entries = new Map(); + for (let i = 0; i < numRestarts; i++) { + indexReader.seek(restartOffsets[i]); + indexReader.varint32(); // index shared size + const indexNonSharedSize = indexReader.varint32(); + const indexValueSize = indexReader.varint32(); + indexReader.skip(indexNonSharedSize); + let indexValueReader = new tf.TensorBundle.BinaryReader(indexReader.bytes(indexValueSize)); + reader.seek(indexValueReader.varint64()); + let blockReader = new tf.TensorBundle.BinaryReader(reader.bytes(indexValueReader.varint64())); + let key = ''; + while (!blockReader.end()) { + const sharedSize = blockReader.varint32(); + const nonSharedSize = blockReader.varint32(); + const valueSize = blockReader.varint32(); + if (sharedSize === 0 && nonSharedSize === 0 && valueSize === 0) { + break; + } + key = key.substring(0, sharedSize); + key = key + textDecoder.decode(blockReader.bytes(nonSharedSize)); + const value = blockReader.bytes(valueSize); + entries.set(key, value); + } + } + if (!entries.has('')) { + throw new tf.Error('Bundle header not available.'); + } + if (format === 1) { + return Promise.resolve(new tf.TensorBundle(format, entries, [])); + } + const header = tf.proto.BundleHeaderProto.decode(entries.get('')); + const numShards = header.num_shards; + let promises = []; + for (let i = 0; i < numShards; i++) { + const shardIndex = ('0000' + i).slice(-5); + const shardCount = ('0000' + numShards).slice(-5); + const filename = identifier.split('.'); + filename.pop(); + const basename = filename.join('.'); + const name = basename + '.data-' + shardIndex + '-of-' + shardCount; + promises.push(context.request(name, null)); + } + return Promise.all(promises).then((shards) => { + return new tf.TensorBundle(format, entries, shards); + }).catch((error) => { + host.exception(error, false); + return new tf.TensorBundle(format, entries, null); + }); + } + + constructor(format, entries, shards) { + this._format = format; + this._tensors = []; + switch (format) { + case 1: { + const header = tf.proto.SavedTensorSlices.decode(entries.get('')); + let data = new Map(); + for (const pair of entries) { + if (pair[0] !== '' && pair[0] !== 'global_step') { + const slices = tf.proto.SavedTensorSlices.decode(pair[1]); + const name = slices.data.name; + const tensor = slices.data.data; + if (!data.has(name)) { + if (tensor.tensor_content && tensor.tensor_content.length > 0) { + data.set(name, { key: 'tensor_content', value: tensor.tensor_content }); + } + else { + const keys = Object.keys(tensor).filter((key) => key.endsWith('_val') && tensor[key] && tensor[key].length > 0); + data.set(name, keys.length == 1 ? { key: keys[0], value: tensor[keys[0]] } : null); + } + } + else { + let item = data.get(name); + if (item !== null) { + if (tensor[item.key] && tensor[item.key].length > 0) { + item.value = item.value.concat(tensor[item.key]); + } + else { + data.set(name, null); + } + } + } + } + } + for (const meta of header.meta.tensor) { + if (meta.name !== 'global_step') { + let tensor = new tf.proto.TensorProto(); + tensor.dtype = meta.type; + tensor.tensor_shape = meta.shape; + const item = data.get(meta.name); + if (item) { + tensor[item.key] = item.value; + } + this._tensors.push(new tf.Tensor(tensor, meta.name, null)); + } + } + break; + } + case 2: { + entries.forEach((value, name) => { + if (name !== '') { + const entry = tf.proto.BundleEntryProto.decode(value); + let tensor = new tf.proto.TensorProto(); + tensor.dtype = entry.dtype; + tensor.tensor_shape = entry.shape; + const offset = (entry.offset instanceof long.Long) ? entry.offset.toNumber() : entry.offset; + const size = (entry.size instanceof long.Long) ? entry.size.toNumber() : entry.size; + if (shards) { + tensor.tensor_content = shards[entry.shard_id].slice(offset, offset + size); + } + this._tensors.push(new tf.Tensor(tensor, name, null)); + } + }); + break; + } + } + } + + get format() { + return this._format; + } + + get tensors() { + return this._tensors; + } +}; + +tf.TensorBundle.BinaryReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._dataView = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength); + this._position = 0; + } + + seek(position) { + this._position = position >= 0 ? position : this._buffer.length + position; + if (this._position > this._buffer.length) { + throw new tf.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + skip(offset) { + this._position += offset; + if (this._position > this._buffer.length) { + throw new tf.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + end() { + return this._position >= this._buffer.length; + } + + bytes(length) { + const position = this._position; + this.skip(length); + return this._buffer.subarray(position, this._position); + } + + byte() { + const position = this._position; + this.skip(1); + return this._dataView.getUint8(position); + } + + int32() { + const position = this._position; + this.skip(4); + return this._dataView.getInt32(position, true); + } + + varint32() { + return this.varint64(); + } + + varint64() { + let result = 0; + for (let shift = 0; shift <= 63; shift += 7) { + let byte = this.byte(); + if (byte & 128) { + result |= (byte & 127) << shift; + } + else { + result |= byte << shift; + break; + } + } + return result; + } +}; + +tf.GraphMetadata = class { + + constructor(metadata) { + this._metadata = metadata; + this._map = {}; + this._attributeCache = {}; + } + + type(operator) { + var schema = this._metadata.type(operator); + if (!schema) { + schema = this._map[operator]; + } + return schema; + } + + attribute(operator, name) { + let map = this._attributeCache[operator]; + if (!map) { + map = {}; + const schema = this.type(operator); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[operator] = map; + } + return map[name] || null; + } + + getAttributeVisibleMap(operator) { + const schema = this.type(operator); + if (schema) { + let map = schema.__visisbleAttributeMap__; + if (!map) { + map = {}; + if (schema.inputs) { + for (const input of schema.inputs) { + if (input.typeAttr) { + map[input.typeAttr] = true; + } + else if (input.typeListAttr) { + map[input.typeListAttr] = true; + } + if (input.numberAttr) { + map[input.numberAttr] = true; + } + } + } + if (schema.outputs) { + for (const output of schema.outputs) { + if (output.typeAttr) { + map[output.typeAttr] = true; + } + else if (output.typeListAttr) { + map[output.typeListAttr] = true; + } + if (output.numberAttr) { + map[output.numberAttr] = true; + } + } + } + schema.__visisbleAttributeMap__ = map; + } + return map; + } + return {}; + } + + static _formatAttributeValue(value) { + if (value == null) { + return null; + } + if (value && long.Long.isLong(value)) { + value = value.toNumber(); + } + if (Array.isArray(value)) { + return value.map((item) => tf.GraphMetadata._formatAttributeValue(item)); + } + if (value === Object(value)) { + switch (value.type) { + case 'type': + return tf.Tensor.formatDataType(value.value); + case 'shape': + return value.value; + case 'tensor': + return value.value; + } + } + if (typeof value === 'string') { + return '"' + value + '"'; + } + return value.toString(); + } +}; + +tf.Metadata = class { + + static open(host) { + tf.Metadata.textDecoder = tf.Metadata.textDecoder || new TextDecoder('utf-8'); + if (tf.Metadata._metadata) { + return Promise.resolve(tf.Metadata._metadata); + } + return host.request(null, 'tf-metadata.json', 'utf-8').then((data) => { + tf.Metadata._metadata = new tf.Metadata(data); + return tf.Metadata._metadata; + }).catch(() => { + tf.Metadata._metadata = new tf.Metadata(null); + return tf.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + if (data) { + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + if (item.name && item.schema) { + item.schema.name = item.name; + this._map[item.name] = item.schema; + } + } + } + } + } + } + + type(operator) { + return this._map[operator]; + } +}; + +tf.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading TensorFlow model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = tf.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/tflite-metadata.json b/frontend/packages/core/public/netron/tflite-metadata.json new file mode 100644 index 00000000..d235bfb0 --- /dev/null +++ b/frontend/packages/core/public/netron/tflite-metadata.json @@ -0,0 +1,750 @@ +[ + { + "name": "Conv2D", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input" }, + { "name": "weights" }, + { "name": "bias" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "stride_w", "type": "int32", "default": 1 }, + { "name": "stride_h", "type": "int32", "default": 1 }, + { "name": "dilation_w_factor", "type": "int32", "default": 1 }, + { "name": "dilation_h_factor", "type": "int32", "default": 1 } + ] + } + }, + { + "name": "LSTM", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input", "type": "T", "description": "Input tensor." }, + { "name": "input_input_weights", "type": "T", "option": "optional", "description": "Input to input weights tensor.", "visible": false }, + { "name": "input_forget_weights", "type": "T", "description": "Input to forget weights tensor.", "visible": false }, + { "name": "input_cell_weights", "type": "T", "description": "Input to cell weights tensor.", "visible": false }, + { "name": "input_output_weights", "type": "T", "description": "Input to output weights tensor.", "visible": false }, + { "name": "recurrent_input_weights", "type": "T", "option": "optional", "description": "Recurrent to input weights tensor.", "visible": false }, + { "name": "recurrent_forget_weights", "type": "T", "description": "Recurrent to forget weights tensor.", "visible": false }, + { "name": "recurrent_cell_weights", "type": "T", "description": "Recurrent to cell weights tensor.", "visible": false }, + { "name": "recurrent_output_weights", "type": "T", "description": "Recurrent to output weights tensor.", "visible": false }, + { "name": "cell_input_weights", "type": "T", "option": "optional", "description": "Cell to input weights tensor.", "visible": false }, + { "name": "cell_forget_weights", "type": "T", "option": "optional", "description": "Cell to forget weights tensor.", "visible": false }, + { "name": "cell_output_weights", "type": "T", "option": "optional", "description": "Cell to output weights tensor.", "visible": false }, + { "name": "input_bias", "type": "T", "option": "optional", "description": "Input gate bias tensor.", "visible": false }, + { "name": "forget_bias", "type": "T", "description": "Forget gate bias tensor.", "visible": false }, + { "name": "cell_bias", "type": "T", "description": "Cell gate bias tensor.", "visible": false }, + { "name": "output_bias", "type": "T", "description": "Output gate bias tensor.", "visible": false }, + { "name": "projection_weights", "type": "T", "option": "optional", "description": "Projection weights tensor.", "visible": false }, + { "name": "projection_bias", "type": "T", "option": "optional", "description": "Projection bias tensor.", "visible": false } + ], + "outputs": [ + { "name": "scratch", "type": "T" }, + { "name": "output_state", "type": "T" }, + { "name": "cell_state", "type": "T" }, + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "cell_clip", "type": "float32", "default": 0.0 }, + { "name": "proj_clip", "type": "float32", "default": 0.0 }, + { "name": "kernel_type", "type": "LSTMKernelType", "default": "FULL" } + ] + } + }, + { + "name": "RNN", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "X", "type": "T" }, + { "name": "W", "type": "T" }, + { "name": "R", "type": "T" }, + { "name": "b", "type": "T" } + ], + "outputs": [ + { "name": "hidden", "type": "T" }, + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ] + } + }, + { + "name": "FullyConnected", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "weights", "type": "T" }, + { "name": "bias", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "weights_format", "type": "FullyConnectedOptionsWeightsFormat", "default": "DEFAULT" }, + { "name": "keep_num_dims", "type": "boolean" } + ] + } + }, + { + "name": "DepthwiseConv2D", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "weights", "type": "T" }, + { "name": "bias", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "stride_w", "type": "int32", "default": 1 }, + { "name": "stride_h", "type": "int32", "default": 1 }, + { "name": "depth_multiplier", "type": "int32", "default": 1 }, + { "name": "dilation_w_factor", "type": "int32", "default": 1 }, + { "name": "dilation_h_factor", "type": "int32", "default": 1 } + ] + } + }, + { + "name": "AveragePool2D", + "schema": { + "category": "Pool", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "stride_w", "type": "int32" }, + { "name": "stride_h", "type": "int32" }, + { "name": "filter_width", "type": "int32" }, + { "name": "filter_height", "type": "int32" } + ] + } + }, + { + "name": "Softmax", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + } + }, + { + "name": "LogSoftmax", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + } + }, + { + "name": "Relu", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + } + }, + { + "name": "Relu6", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + } + }, + { + "name": "Prelu", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "slope", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + } + }, + { + "name": "Tanh", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ] + } + }, + { + "name": "Reshape", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "new_shape", "type": "shape"} + ], + "inputs": [ + { "name": "data", "type": "T" }, + { "name": "shape", "type": "T" } + ], + "outputs": [ + { "name": "reshaped", "type": "T" } + ] + } + }, + { + "name": "MaxPool2D", + "schema": { + "category": "Pool", + "inputs": [ + { "name": "input", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "padding", "type": "Padding", "default": "SAME" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" }, + { "name": "stride_w", "type": "int32" }, + { "name": "stride_h", "type": "int32" }, + { "name": "filter_width", "type": "int32" }, + { "name": "filter_height", "type": "int32" } + ] + } + }, + { + "name": "LSHProjection", + "schema": { + "inputs": [ + { "name": "hash" }, + { "name": "input" }, + { "name": "weight" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "type", "type": "LSHProjectionType" } + ] + } + }, + { + "name": "Normalize", + "schema": { + "category": "Normalization", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "LocalResponseNormalization", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "radius", "type": "int32", "default": 5 }, + { "name": "bias", "type": "float32", "default": 1 }, + { "name": "alpha", "type": "float32", "default": 1 }, + { "name": "beta", "type": "float32", "default": 0.5 } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Predict", + "schema": { + "inputs": [ + { "name": "hashes" }, + { "name": "keys" }, + { "name": "labels" }, + { "name": "weights" } + ], + "outputs": [ + { "name": "label" }, + { "name": "weight" } + ] + } + }, + { + "name": "HashtableLookup", + "schema": { + "inputs": [ + { "name": "key" }, + { "name": "keys" }, + { "name": "values" } + ], + "outputs": [ + { "name": "value" }, + { "name": "hits" } + ] + } + }, + { + "name": "ExtractFeatures", + "schema": { + "inputs": [ + { "name": "ngrams" } + ], + "outputs": [ + { "name": "features" }, + { "name": "weights" } + ] + } + }, + { + "name": "SkipGram", + "schema": { + "inputs": [ + { "name": "inputs" } + ], + "outputs": [ + { "name": "ngrams" } + ] + } + }, + { + "name": "Concatenation", + "schema": { + "category": "Tensor", + "inputs": [ + { "name": "inputs", "option": "variadic" } + ], + "outputs": [ + { "name": "output" } + ], + "attributes": [ + { "name": "axis", "type": "int32" }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ] + } + }, + { + "name": "Pad", + "schema": { + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "paddings" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Split", + "schema": { + "category": "Tensor", + "inputs": [ + { "name": "axis" }, + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Squeeze", + "schema": { + "category": "Transform", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "StridedSlice", + "schema": { + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "begin" }, + { "name": "end" }, + { "name": "strides" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "SVDF", + "schema": { + "category": "Layer", + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "feature", "type": "T" }, + { "name": "time", "type": "T" }, + { "name": "bias", "type": "T" } + ], + "outputs": [ + { "name": "state", "type": "T" }, + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ] + } + }, + { + "name": "Add", + "schema": { + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ], + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ] + } + }, + { + "name": "Sub", + "schema": { + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ], + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ] + } + }, + { + "name": "Mul", + "schema": { + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ], + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ] + } + }, + { + "name": "Div", + "schema": { + "inputs": [ + { "name": "A", "type": "T" }, + { "name": "B", "type": "T" } + ], + "outputs": [ + { "name": "C", "type": "T" } + ], + "attributes": [ + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } + ] + } + }, + { + "name": "Sum", + "schema": { + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ] + } + }, + { + "name": "ReduceMax", + "schema": { + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ] + } + }, + { + "name": "ReduceMin", + "schema": { + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ] + } + }, + { + "name": "Mean", + "schema": { + "inputs": [ + { "name": "input", "type": "T" }, + { "name": "axis", "type": "T" } + ], + "outputs": [ + { "name": "output", "type": "T" } + ], + "attributes": [ + { "name": "keep_dims", "type": "boolean" } + ] + } + }, + { + "name": "Logistic", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "ResizeBilinear", + "schema": { + "attributes": [ + { "name": "align_corners", "default": false } + ], + "inputs": [ + { "name": "input" }, + { "name": "size" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Gather", + "schema": { + "attributes": [ + { "name": "axis", "default": 0 } + ], + "inputs": [ + { "name": "input" }, + { "name": "positions" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Cast", + "schema": { + "attributes": [ + { "name": "in_data_type", "type": "TensorType" }, + { "name": "out_data_type", "type": "TensorType" } + ], + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "ArgMax", + "schema": { + "attributes": [ + { "name": "output_type", "type": "TensorType" } + ] + } + }, + { + "name": "ArgMin", + "schema": { + "attributes": [ + { "name": "output_type", "type": "TensorType" } + ] + } + }, + { + "name": "TransposeConv", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "padding", "type": "Padding" }, + { "name": "stride_w", "type": "int32" }, + { "name": "stride_h", "type": "int32" } + ], + "inputs": [ + { "name": "output_shape" }, + { "name": "weights" }, + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Shape", + "schema": { + "attributes": [ + { "name": "out_type", "type": "TensorType" } + ] + } + }, + { + "name": "Unique", + "schema": { + "attributes": [ + { "name": "idx_out_type", "type": "TensorType", "default": "int32" } + ] + } + }, + { + "name": "Slice", + "schema": { + "category": "Tensor", + "inputs": [ + { "name": "input" }, + { "name": "begin" }, + { "name": "size" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Transpose", + "schema": { + "category": "Transform", + "inputs": [ + { "name": "input" }, + { "name": "perm" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Quantize", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Dequantize", + "schema": { + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Minimum", + "schema": { + "inputs": [ + { "name": "input1" }, + { "name": "input2" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "Maximum", + "schema": { + "inputs": [ + { "name": "input1" }, + { "name": "input2" } + ], + "outputs": [ + { "name": "output" } + ] + } + }, + { + "name": "HardSwish", + "schema": { + "category": "Activation", + "inputs": [ + { "name": "input" } + ], + "outputs": [ + { "name": "output" } + ] + } + } +] diff --git a/frontend/packages/core/public/netron/tflite-schema.js b/frontend/packages/core/public/netron/tflite-schema.js new file mode 100644 index 00000000..b0d462bc --- /dev/null +++ b/frontend/packages/core/public/netron/tflite-schema.js @@ -0,0 +1,15558 @@ +// automatically generated by the FlatBuffers compiler, do not modify + +/** + * @const + * @namespace + */ +var tflite_schema = tflite_schema || {}; + +/** + * @enum {number} + */ +tflite_schema.TensorType = { + FLOAT32: 0, + FLOAT16: 1, + INT32: 2, + UINT8: 3, + INT64: 4, + STRING: 5, + BOOL: 6, + INT16: 7, + COMPLEX64: 8, + INT8: 9, + FLOAT64: 10 +}; + +/** + * @enum {string} + */ +tflite_schema.TensorTypeName = { + '0': 'FLOAT32', + '1': 'FLOAT16', + '2': 'INT32', + '3': 'UINT8', + '4': 'INT64', + '5': 'STRING', + '6': 'BOOL', + '7': 'INT16', + '8': 'COMPLEX64', + '9': 'INT8', + '10': 'FLOAT64' +}; + +/** + * @enum {number} + */ +tflite_schema.QuantizationDetails = { + NONE: 0, + CustomQuantization: 1 +}; + +/** + * @enum {string} + */ +tflite_schema.QuantizationDetailsName = { + '0': 'NONE', + '1': 'CustomQuantization' +}; + +/** + * @enum {number} + */ +tflite_schema.DimensionType = { + DENSE: 0, + SPARSE_CSR: 1 +}; + +/** + * @enum {string} + */ +tflite_schema.DimensionTypeName = { + '0': 'DENSE', + '1': 'SPARSE_CSR' +}; + +/** + * @enum {number} + */ +tflite_schema.SparseIndexVector = { + NONE: 0, + Int32Vector: 1, + Uint16Vector: 2, + Uint8Vector: 3 +}; + +/** + * @enum {string} + */ +tflite_schema.SparseIndexVectorName = { + '0': 'NONE', + '1': 'Int32Vector', + '2': 'Uint16Vector', + '3': 'Uint8Vector' +}; + +/** + * @enum {number} + */ +tflite_schema.BuiltinOperator = { + ADD: 0, + AVERAGE_POOL_2D: 1, + CONCATENATION: 2, + CONV_2D: 3, + DEPTHWISE_CONV_2D: 4, + DEPTH_TO_SPACE: 5, + DEQUANTIZE: 6, + EMBEDDING_LOOKUP: 7, + FLOOR: 8, + FULLY_CONNECTED: 9, + HASHTABLE_LOOKUP: 10, + L2_NORMALIZATION: 11, + L2_POOL_2D: 12, + LOCAL_RESPONSE_NORMALIZATION: 13, + LOGISTIC: 14, + LSH_PROJECTION: 15, + LSTM: 16, + MAX_POOL_2D: 17, + MUL: 18, + RELU: 19, + RELU_N1_TO_1: 20, + RELU6: 21, + RESHAPE: 22, + RESIZE_BILINEAR: 23, + RNN: 24, + SOFTMAX: 25, + SPACE_TO_DEPTH: 26, + SVDF: 27, + TANH: 28, + CONCAT_EMBEDDINGS: 29, + SKIP_GRAM: 30, + CALL: 31, + CUSTOM: 32, + EMBEDDING_LOOKUP_SPARSE: 33, + PAD: 34, + UNIDIRECTIONAL_SEQUENCE_RNN: 35, + GATHER: 36, + BATCH_TO_SPACE_ND: 37, + SPACE_TO_BATCH_ND: 38, + TRANSPOSE: 39, + MEAN: 40, + SUB: 41, + DIV: 42, + SQUEEZE: 43, + UNIDIRECTIONAL_SEQUENCE_LSTM: 44, + STRIDED_SLICE: 45, + BIDIRECTIONAL_SEQUENCE_RNN: 46, + EXP: 47, + TOPK_V2: 48, + SPLIT: 49, + LOG_SOFTMAX: 50, + DELEGATE: 51, + BIDIRECTIONAL_SEQUENCE_LSTM: 52, + CAST: 53, + PRELU: 54, + MAXIMUM: 55, + ARG_MAX: 56, + MINIMUM: 57, + LESS: 58, + NEG: 59, + PADV2: 60, + GREATER: 61, + GREATER_EQUAL: 62, + LESS_EQUAL: 63, + SELECT: 64, + SLICE: 65, + SIN: 66, + TRANSPOSE_CONV: 67, + SPARSE_TO_DENSE: 68, + TILE: 69, + EXPAND_DIMS: 70, + EQUAL: 71, + NOT_EQUAL: 72, + LOG: 73, + SUM: 74, + SQRT: 75, + RSQRT: 76, + SHAPE: 77, + POW: 78, + ARG_MIN: 79, + FAKE_QUANT: 80, + REDUCE_PROD: 81, + REDUCE_MAX: 82, + PACK: 83, + LOGICAL_OR: 84, + ONE_HOT: 85, + LOGICAL_AND: 86, + LOGICAL_NOT: 87, + UNPACK: 88, + REDUCE_MIN: 89, + FLOOR_DIV: 90, + REDUCE_ANY: 91, + SQUARE: 92, + ZEROS_LIKE: 93, + FILL: 94, + FLOOR_MOD: 95, + RANGE: 96, + RESIZE_NEAREST_NEIGHBOR: 97, + LEAKY_RELU: 98, + SQUARED_DIFFERENCE: 99, + MIRROR_PAD: 100, + ABS: 101, + SPLIT_V: 102, + UNIQUE: 103, + CEIL: 104, + REVERSE_V2: 105, + ADD_N: 106, + GATHER_ND: 107, + COS: 108, + WHERE: 109, + RANK: 110, + ELU: 111, + REVERSE_SEQUENCE: 112, + MATRIX_DIAG: 113, + QUANTIZE: 114, + MATRIX_SET_DIAG: 115, + ROUND: 116, + HARD_SWISH: 117, + IF: 118, + WHILE: 119, + NON_MAX_SUPPRESSION_V4: 120, + NON_MAX_SUPPRESSION_V5: 121, + SCATTER_ND: 122, + SELECT_V2: 123, + DENSIFY: 124, + SEGMENT_SUM: 125, + BATCH_MATMUL: 126 +}; + +/** + * @enum {string} + */ +tflite_schema.BuiltinOperatorName = { + '0': 'ADD', + '1': 'AVERAGE_POOL_2D', + '2': 'CONCATENATION', + '3': 'CONV_2D', + '4': 'DEPTHWISE_CONV_2D', + '5': 'DEPTH_TO_SPACE', + '6': 'DEQUANTIZE', + '7': 'EMBEDDING_LOOKUP', + '8': 'FLOOR', + '9': 'FULLY_CONNECTED', + '10': 'HASHTABLE_LOOKUP', + '11': 'L2_NORMALIZATION', + '12': 'L2_POOL_2D', + '13': 'LOCAL_RESPONSE_NORMALIZATION', + '14': 'LOGISTIC', + '15': 'LSH_PROJECTION', + '16': 'LSTM', + '17': 'MAX_POOL_2D', + '18': 'MUL', + '19': 'RELU', + '20': 'RELU_N1_TO_1', + '21': 'RELU6', + '22': 'RESHAPE', + '23': 'RESIZE_BILINEAR', + '24': 'RNN', + '25': 'SOFTMAX', + '26': 'SPACE_TO_DEPTH', + '27': 'SVDF', + '28': 'TANH', + '29': 'CONCAT_EMBEDDINGS', + '30': 'SKIP_GRAM', + '31': 'CALL', + '32': 'CUSTOM', + '33': 'EMBEDDING_LOOKUP_SPARSE', + '34': 'PAD', + '35': 'UNIDIRECTIONAL_SEQUENCE_RNN', + '36': 'GATHER', + '37': 'BATCH_TO_SPACE_ND', + '38': 'SPACE_TO_BATCH_ND', + '39': 'TRANSPOSE', + '40': 'MEAN', + '41': 'SUB', + '42': 'DIV', + '43': 'SQUEEZE', + '44': 'UNIDIRECTIONAL_SEQUENCE_LSTM', + '45': 'STRIDED_SLICE', + '46': 'BIDIRECTIONAL_SEQUENCE_RNN', + '47': 'EXP', + '48': 'TOPK_V2', + '49': 'SPLIT', + '50': 'LOG_SOFTMAX', + '51': 'DELEGATE', + '52': 'BIDIRECTIONAL_SEQUENCE_LSTM', + '53': 'CAST', + '54': 'PRELU', + '55': 'MAXIMUM', + '56': 'ARG_MAX', + '57': 'MINIMUM', + '58': 'LESS', + '59': 'NEG', + '60': 'PADV2', + '61': 'GREATER', + '62': 'GREATER_EQUAL', + '63': 'LESS_EQUAL', + '64': 'SELECT', + '65': 'SLICE', + '66': 'SIN', + '67': 'TRANSPOSE_CONV', + '68': 'SPARSE_TO_DENSE', + '69': 'TILE', + '70': 'EXPAND_DIMS', + '71': 'EQUAL', + '72': 'NOT_EQUAL', + '73': 'LOG', + '74': 'SUM', + '75': 'SQRT', + '76': 'RSQRT', + '77': 'SHAPE', + '78': 'POW', + '79': 'ARG_MIN', + '80': 'FAKE_QUANT', + '81': 'REDUCE_PROD', + '82': 'REDUCE_MAX', + '83': 'PACK', + '84': 'LOGICAL_OR', + '85': 'ONE_HOT', + '86': 'LOGICAL_AND', + '87': 'LOGICAL_NOT', + '88': 'UNPACK', + '89': 'REDUCE_MIN', + '90': 'FLOOR_DIV', + '91': 'REDUCE_ANY', + '92': 'SQUARE', + '93': 'ZEROS_LIKE', + '94': 'FILL', + '95': 'FLOOR_MOD', + '96': 'RANGE', + '97': 'RESIZE_NEAREST_NEIGHBOR', + '98': 'LEAKY_RELU', + '99': 'SQUARED_DIFFERENCE', + '100': 'MIRROR_PAD', + '101': 'ABS', + '102': 'SPLIT_V', + '103': 'UNIQUE', + '104': 'CEIL', + '105': 'REVERSE_V2', + '106': 'ADD_N', + '107': 'GATHER_ND', + '108': 'COS', + '109': 'WHERE', + '110': 'RANK', + '111': 'ELU', + '112': 'REVERSE_SEQUENCE', + '113': 'MATRIX_DIAG', + '114': 'QUANTIZE', + '115': 'MATRIX_SET_DIAG', + '116': 'ROUND', + '117': 'HARD_SWISH', + '118': 'IF', + '119': 'WHILE', + '120': 'NON_MAX_SUPPRESSION_V4', + '121': 'NON_MAX_SUPPRESSION_V5', + '122': 'SCATTER_ND', + '123': 'SELECT_V2', + '124': 'DENSIFY', + '125': 'SEGMENT_SUM', + '126': 'BATCH_MATMUL' +}; + +/** + * @enum {number} + */ +tflite_schema.BuiltinOptions = { + NONE: 0, + Conv2DOptions: 1, + DepthwiseConv2DOptions: 2, + ConcatEmbeddingsOptions: 3, + LSHProjectionOptions: 4, + Pool2DOptions: 5, + SVDFOptions: 6, + RNNOptions: 7, + FullyConnectedOptions: 8, + SoftmaxOptions: 9, + ConcatenationOptions: 10, + AddOptions: 11, + L2NormOptions: 12, + LocalResponseNormalizationOptions: 13, + LSTMOptions: 14, + ResizeBilinearOptions: 15, + CallOptions: 16, + ReshapeOptions: 17, + SkipGramOptions: 18, + SpaceToDepthOptions: 19, + EmbeddingLookupSparseOptions: 20, + MulOptions: 21, + PadOptions: 22, + GatherOptions: 23, + BatchToSpaceNDOptions: 24, + SpaceToBatchNDOptions: 25, + TransposeOptions: 26, + ReducerOptions: 27, + SubOptions: 28, + DivOptions: 29, + SqueezeOptions: 30, + SequenceRNNOptions: 31, + StridedSliceOptions: 32, + ExpOptions: 33, + TopKV2Options: 34, + SplitOptions: 35, + LogSoftmaxOptions: 36, + CastOptions: 37, + DequantizeOptions: 38, + MaximumMinimumOptions: 39, + ArgMaxOptions: 40, + LessOptions: 41, + NegOptions: 42, + PadV2Options: 43, + GreaterOptions: 44, + GreaterEqualOptions: 45, + LessEqualOptions: 46, + SelectOptions: 47, + SliceOptions: 48, + TransposeConvOptions: 49, + SparseToDenseOptions: 50, + TileOptions: 51, + ExpandDimsOptions: 52, + EqualOptions: 53, + NotEqualOptions: 54, + ShapeOptions: 55, + PowOptions: 56, + ArgMinOptions: 57, + FakeQuantOptions: 58, + PackOptions: 59, + LogicalOrOptions: 60, + OneHotOptions: 61, + LogicalAndOptions: 62, + LogicalNotOptions: 63, + UnpackOptions: 64, + FloorDivOptions: 65, + SquareOptions: 66, + ZerosLikeOptions: 67, + FillOptions: 68, + BidirectionalSequenceLSTMOptions: 69, + BidirectionalSequenceRNNOptions: 70, + UnidirectionalSequenceLSTMOptions: 71, + FloorModOptions: 72, + RangeOptions: 73, + ResizeNearestNeighborOptions: 74, + LeakyReluOptions: 75, + SquaredDifferenceOptions: 76, + MirrorPadOptions: 77, + AbsOptions: 78, + SplitVOptions: 79, + UniqueOptions: 80, + ReverseV2Options: 81, + AddNOptions: 82, + GatherNdOptions: 83, + CosOptions: 84, + WhereOptions: 85, + RankOptions: 86, + ReverseSequenceOptions: 87, + MatrixDiagOptions: 88, + QuantizeOptions: 89, + MatrixSetDiagOptions: 90, + HardSwishOptions: 91, + IfOptions: 92, + WhileOptions: 93, + DepthToSpaceOptions: 94, + NonMaxSuppressionV4Options: 95, + NonMaxSuppressionV5Options: 96, + ScatterNdOptions: 97, + SelectV2Options: 98, + DensifyOptions: 99, + SegmentSumOptions: 100, + BatchMatMulOptions: 101 +}; + +/** + * @enum {string} + */ +tflite_schema.BuiltinOptionsName = { + '0': 'NONE', + '1': 'Conv2DOptions', + '2': 'DepthwiseConv2DOptions', + '3': 'ConcatEmbeddingsOptions', + '4': 'LSHProjectionOptions', + '5': 'Pool2DOptions', + '6': 'SVDFOptions', + '7': 'RNNOptions', + '8': 'FullyConnectedOptions', + '9': 'SoftmaxOptions', + '10': 'ConcatenationOptions', + '11': 'AddOptions', + '12': 'L2NormOptions', + '13': 'LocalResponseNormalizationOptions', + '14': 'LSTMOptions', + '15': 'ResizeBilinearOptions', + '16': 'CallOptions', + '17': 'ReshapeOptions', + '18': 'SkipGramOptions', + '19': 'SpaceToDepthOptions', + '20': 'EmbeddingLookupSparseOptions', + '21': 'MulOptions', + '22': 'PadOptions', + '23': 'GatherOptions', + '24': 'BatchToSpaceNDOptions', + '25': 'SpaceToBatchNDOptions', + '26': 'TransposeOptions', + '27': 'ReducerOptions', + '28': 'SubOptions', + '29': 'DivOptions', + '30': 'SqueezeOptions', + '31': 'SequenceRNNOptions', + '32': 'StridedSliceOptions', + '33': 'ExpOptions', + '34': 'TopKV2Options', + '35': 'SplitOptions', + '36': 'LogSoftmaxOptions', + '37': 'CastOptions', + '38': 'DequantizeOptions', + '39': 'MaximumMinimumOptions', + '40': 'ArgMaxOptions', + '41': 'LessOptions', + '42': 'NegOptions', + '43': 'PadV2Options', + '44': 'GreaterOptions', + '45': 'GreaterEqualOptions', + '46': 'LessEqualOptions', + '47': 'SelectOptions', + '48': 'SliceOptions', + '49': 'TransposeConvOptions', + '50': 'SparseToDenseOptions', + '51': 'TileOptions', + '52': 'ExpandDimsOptions', + '53': 'EqualOptions', + '54': 'NotEqualOptions', + '55': 'ShapeOptions', + '56': 'PowOptions', + '57': 'ArgMinOptions', + '58': 'FakeQuantOptions', + '59': 'PackOptions', + '60': 'LogicalOrOptions', + '61': 'OneHotOptions', + '62': 'LogicalAndOptions', + '63': 'LogicalNotOptions', + '64': 'UnpackOptions', + '65': 'FloorDivOptions', + '66': 'SquareOptions', + '67': 'ZerosLikeOptions', + '68': 'FillOptions', + '69': 'BidirectionalSequenceLSTMOptions', + '70': 'BidirectionalSequenceRNNOptions', + '71': 'UnidirectionalSequenceLSTMOptions', + '72': 'FloorModOptions', + '73': 'RangeOptions', + '74': 'ResizeNearestNeighborOptions', + '75': 'LeakyReluOptions', + '76': 'SquaredDifferenceOptions', + '77': 'MirrorPadOptions', + '78': 'AbsOptions', + '79': 'SplitVOptions', + '80': 'UniqueOptions', + '81': 'ReverseV2Options', + '82': 'AddNOptions', + '83': 'GatherNdOptions', + '84': 'CosOptions', + '85': 'WhereOptions', + '86': 'RankOptions', + '87': 'ReverseSequenceOptions', + '88': 'MatrixDiagOptions', + '89': 'QuantizeOptions', + '90': 'MatrixSetDiagOptions', + '91': 'HardSwishOptions', + '92': 'IfOptions', + '93': 'WhileOptions', + '94': 'DepthToSpaceOptions', + '95': 'NonMaxSuppressionV4Options', + '96': 'NonMaxSuppressionV5Options', + '97': 'ScatterNdOptions', + '98': 'SelectV2Options', + '99': 'DensifyOptions', + '100': 'SegmentSumOptions', + '101': 'BatchMatMulOptions' +}; + +/** + * @enum {number} + */ +tflite_schema.Padding = { + SAME: 0, + VALID: 1 +}; + +/** + * @enum {string} + */ +tflite_schema.PaddingName = { + '0': 'SAME', + '1': 'VALID' +}; + +/** + * @enum {number} + */ +tflite_schema.ActivationFunctionType = { + NONE: 0, + RELU: 1, + RELU_N1_TO_1: 2, + RELU6: 3, + TANH: 4, + SIGN_BIT: 5 +}; + +/** + * @enum {string} + */ +tflite_schema.ActivationFunctionTypeName = { + '0': 'NONE', + '1': 'RELU', + '2': 'RELU_N1_TO_1', + '3': 'RELU6', + '4': 'TANH', + '5': 'SIGN_BIT' +}; + +/** + * @enum {number} + */ +tflite_schema.LSHProjectionType = { + UNKNOWN: 0, + SPARSE: 1, + DENSE: 2 +}; + +/** + * @enum {string} + */ +tflite_schema.LSHProjectionTypeName = { + '0': 'UNKNOWN', + '1': 'SPARSE', + '2': 'DENSE' +}; + +/** + * @enum {number} + */ +tflite_schema.FullyConnectedOptionsWeightsFormat = { + DEFAULT: 0, + SHUFFLED4x16INT8: 1 +}; + +/** + * @enum {string} + */ +tflite_schema.FullyConnectedOptionsWeightsFormatName = { + '0': 'DEFAULT', + '1': 'SHUFFLED4x16INT8' +}; + +/** + * @enum {number} + */ +tflite_schema.LSTMKernelType = { + FULL: 0, + BASIC: 1 +}; + +/** + * @enum {string} + */ +tflite_schema.LSTMKernelTypeName = { + '0': 'FULL', + '1': 'BASIC' +}; + +/** + * @enum {number} + */ +tflite_schema.CombinerType = { + SUM: 0, + MEAN: 1, + SQRTN: 2 +}; + +/** + * @enum {string} + */ +tflite_schema.CombinerTypeName = { + '0': 'SUM', + '1': 'MEAN', + '2': 'SQRTN' +}; + +/** + * @enum {number} + */ +tflite_schema.MirrorPadMode = { + REFLECT: 0, + SYMMETRIC: 1 +}; + +/** + * @enum {string} + */ +tflite_schema.MirrorPadModeName = { + '0': 'REFLECT', + '1': 'SYMMETRIC' +}; + +/** + * @enum {number} + */ +tflite_schema.CustomOptionsFormat = { + FLEXBUFFERS: 0 +}; + +/** + * @enum {string} + */ +tflite_schema.CustomOptionsFormatName = { + '0': 'FLEXBUFFERS' +}; + +/** + * @constructor + */ +tflite_schema.CustomQuantization = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.CustomQuantization} + */ +tflite_schema.CustomQuantization.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.CustomQuantization=} obj + * @returns {tflite_schema.CustomQuantization} + */ +tflite_schema.CustomQuantization.getRootAsCustomQuantization = function(bb, obj) { + return (obj || new tflite_schema.CustomQuantization).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.CustomQuantization=} obj + * @returns {tflite_schema.CustomQuantization} + */ +tflite_schema.CustomQuantization.getSizePrefixedRootAsCustomQuantization = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.CustomQuantization).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.CustomQuantization.prototype.custom = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.CustomQuantization.prototype.customLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint8Array} + */ +tflite_schema.CustomQuantization.prototype.customArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.CustomQuantization.startCustomQuantization = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} customOffset + */ +tflite_schema.CustomQuantization.addCustom = function(builder, customOffset) { + builder.addFieldOffset(0, customOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.CustomQuantization.createCustomVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.CustomQuantization.startCustomVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.CustomQuantization.endCustomQuantization = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} customOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.CustomQuantization.createCustomQuantization = function(builder, customOffset) { + tflite_schema.CustomQuantization.startCustomQuantization(builder); + tflite_schema.CustomQuantization.addCustom(builder, customOffset); + return tflite_schema.CustomQuantization.endCustomQuantization(builder); +} + +/** + * @constructor + */ +tflite_schema.QuantizationParameters = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.QuantizationParameters} + */ +tflite_schema.QuantizationParameters.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.QuantizationParameters=} obj + * @returns {tflite_schema.QuantizationParameters} + */ +tflite_schema.QuantizationParameters.getRootAsQuantizationParameters = function(bb, obj) { + return (obj || new tflite_schema.QuantizationParameters).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.QuantizationParameters=} obj + * @returns {tflite_schema.QuantizationParameters} + */ +tflite_schema.QuantizationParameters.getSizePrefixedRootAsQuantizationParameters = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.QuantizationParameters).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.QuantizationParameters.prototype.min = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.QuantizationParameters.prototype.minLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +tflite_schema.QuantizationParameters.prototype.minArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.QuantizationParameters.prototype.max = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.QuantizationParameters.prototype.maxLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +tflite_schema.QuantizationParameters.prototype.maxArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.QuantizationParameters.prototype.scale = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.QuantizationParameters.prototype.scaleLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +tflite_schema.QuantizationParameters.prototype.scaleArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {flatbuffers.Long} + */ +tflite_schema.QuantizationParameters.prototype.zeroPoint = function(index) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt64(this.bb.__vector(this.bb_pos + offset) + index * 8) : this.bb.createLong(0, 0); +}; + +/** + * @returns {number} + */ +tflite_schema.QuantizationParameters.prototype.zeroPointLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {tflite_schema.QuantizationDetails} + */ +tflite_schema.QuantizationParameters.prototype.detailsType = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? /** @type {tflite_schema.QuantizationDetails} */ (this.bb.readUint8(this.bb_pos + offset)) : tflite_schema.QuantizationDetails.NONE; +}; + +/** + * @param {flatbuffers.Table} obj + * @returns {?flatbuffers.Table} + */ +tflite_schema.QuantizationParameters.prototype.details = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.__union(obj, this.bb_pos + offset) : null; +}; + +/** + * @returns {number} + */ +tflite_schema.QuantizationParameters.prototype.quantizedDimension = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.QuantizationParameters.startQuantizationParameters = function(builder) { + builder.startObject(7); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} minOffset + */ +tflite_schema.QuantizationParameters.addMin = function(builder, minOffset) { + builder.addFieldOffset(0, minOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.QuantizationParameters.createMinVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.QuantizationParameters.startMinVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} maxOffset + */ +tflite_schema.QuantizationParameters.addMax = function(builder, maxOffset) { + builder.addFieldOffset(1, maxOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.QuantizationParameters.createMaxVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.QuantizationParameters.startMaxVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} scaleOffset + */ +tflite_schema.QuantizationParameters.addScale = function(builder, scaleOffset) { + builder.addFieldOffset(2, scaleOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.QuantizationParameters.createScaleVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.QuantizationParameters.startScaleVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} zeroPointOffset + */ +tflite_schema.QuantizationParameters.addZeroPoint = function(builder, zeroPointOffset) { + builder.addFieldOffset(3, zeroPointOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.QuantizationParameters.createZeroPointVector = function(builder, data) { + builder.startVector(8, data.length, 8); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt64(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.QuantizationParameters.startZeroPointVector = function(builder, numElems) { + builder.startVector(8, numElems, 8); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.QuantizationDetails} detailsType + */ +tflite_schema.QuantizationParameters.addDetailsType = function(builder, detailsType) { + builder.addFieldInt8(4, detailsType, tflite_schema.QuantizationDetails.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} detailsOffset + */ +tflite_schema.QuantizationParameters.addDetails = function(builder, detailsOffset) { + builder.addFieldOffset(5, detailsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} quantizedDimension + */ +tflite_schema.QuantizationParameters.addQuantizedDimension = function(builder, quantizedDimension) { + builder.addFieldInt32(6, quantizedDimension, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.QuantizationParameters.endQuantizationParameters = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} minOffset + * @param {flatbuffers.Offset} maxOffset + * @param {flatbuffers.Offset} scaleOffset + * @param {flatbuffers.Offset} zeroPointOffset + * @param {tflite_schema.QuantizationDetails} detailsType + * @param {flatbuffers.Offset} detailsOffset + * @param {number} quantizedDimension + * @returns {flatbuffers.Offset} + */ +tflite_schema.QuantizationParameters.createQuantizationParameters = function(builder, minOffset, maxOffset, scaleOffset, zeroPointOffset, detailsType, detailsOffset, quantizedDimension) { + tflite_schema.QuantizationParameters.startQuantizationParameters(builder); + tflite_schema.QuantizationParameters.addMin(builder, minOffset); + tflite_schema.QuantizationParameters.addMax(builder, maxOffset); + tflite_schema.QuantizationParameters.addScale(builder, scaleOffset); + tflite_schema.QuantizationParameters.addZeroPoint(builder, zeroPointOffset); + tflite_schema.QuantizationParameters.addDetailsType(builder, detailsType); + tflite_schema.QuantizationParameters.addDetails(builder, detailsOffset); + tflite_schema.QuantizationParameters.addQuantizedDimension(builder, quantizedDimension); + return tflite_schema.QuantizationParameters.endQuantizationParameters(builder); +} + +/** + * @constructor + */ +tflite_schema.Int32Vector = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.Int32Vector} + */ +tflite_schema.Int32Vector.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Int32Vector=} obj + * @returns {tflite_schema.Int32Vector} + */ +tflite_schema.Int32Vector.getRootAsInt32Vector = function(bb, obj) { + return (obj || new tflite_schema.Int32Vector).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Int32Vector=} obj + * @returns {tflite_schema.Int32Vector} + */ +tflite_schema.Int32Vector.getSizePrefixedRootAsInt32Vector = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.Int32Vector).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.Int32Vector.prototype.values = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Int32Vector.prototype.valuesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.Int32Vector.prototype.valuesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.Int32Vector.startInt32Vector = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} valuesOffset + */ +tflite_schema.Int32Vector.addValues = function(builder, valuesOffset) { + builder.addFieldOffset(0, valuesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Int32Vector.createValuesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Int32Vector.startValuesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.Int32Vector.endInt32Vector = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} valuesOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.Int32Vector.createInt32Vector = function(builder, valuesOffset) { + tflite_schema.Int32Vector.startInt32Vector(builder); + tflite_schema.Int32Vector.addValues(builder, valuesOffset); + return tflite_schema.Int32Vector.endInt32Vector(builder); +} + +/** + * @constructor + */ +tflite_schema.Uint16Vector = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.Uint16Vector} + */ +tflite_schema.Uint16Vector.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Uint16Vector=} obj + * @returns {tflite_schema.Uint16Vector} + */ +tflite_schema.Uint16Vector.getRootAsUint16Vector = function(bb, obj) { + return (obj || new tflite_schema.Uint16Vector).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Uint16Vector=} obj + * @returns {tflite_schema.Uint16Vector} + */ +tflite_schema.Uint16Vector.getSizePrefixedRootAsUint16Vector = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.Uint16Vector).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.Uint16Vector.prototype.values = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint16(this.bb.__vector(this.bb_pos + offset) + index * 2) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Uint16Vector.prototype.valuesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint16Array} + */ +tflite_schema.Uint16Vector.prototype.valuesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint16Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.Uint16Vector.startUint16Vector = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} valuesOffset + */ +tflite_schema.Uint16Vector.addValues = function(builder, valuesOffset) { + builder.addFieldOffset(0, valuesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Uint16Vector.createValuesVector = function(builder, data) { + builder.startVector(2, data.length, 2); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt16(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Uint16Vector.startValuesVector = function(builder, numElems) { + builder.startVector(2, numElems, 2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.Uint16Vector.endUint16Vector = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} valuesOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.Uint16Vector.createUint16Vector = function(builder, valuesOffset) { + tflite_schema.Uint16Vector.startUint16Vector(builder); + tflite_schema.Uint16Vector.addValues(builder, valuesOffset); + return tflite_schema.Uint16Vector.endUint16Vector(builder); +} + +/** + * @constructor + */ +tflite_schema.Uint8Vector = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.Uint8Vector} + */ +tflite_schema.Uint8Vector.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Uint8Vector=} obj + * @returns {tflite_schema.Uint8Vector} + */ +tflite_schema.Uint8Vector.getRootAsUint8Vector = function(bb, obj) { + return (obj || new tflite_schema.Uint8Vector).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Uint8Vector=} obj + * @returns {tflite_schema.Uint8Vector} + */ +tflite_schema.Uint8Vector.getSizePrefixedRootAsUint8Vector = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.Uint8Vector).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.Uint8Vector.prototype.values = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Uint8Vector.prototype.valuesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint8Array} + */ +tflite_schema.Uint8Vector.prototype.valuesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.Uint8Vector.startUint8Vector = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} valuesOffset + */ +tflite_schema.Uint8Vector.addValues = function(builder, valuesOffset) { + builder.addFieldOffset(0, valuesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Uint8Vector.createValuesVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Uint8Vector.startValuesVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.Uint8Vector.endUint8Vector = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} valuesOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.Uint8Vector.createUint8Vector = function(builder, valuesOffset) { + tflite_schema.Uint8Vector.startUint8Vector(builder); + tflite_schema.Uint8Vector.addValues(builder, valuesOffset); + return tflite_schema.Uint8Vector.endUint8Vector(builder); +} + +/** + * @constructor + */ +tflite_schema.DimensionMetadata = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.DimensionMetadata} + */ +tflite_schema.DimensionMetadata.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DimensionMetadata=} obj + * @returns {tflite_schema.DimensionMetadata} + */ +tflite_schema.DimensionMetadata.getRootAsDimensionMetadata = function(bb, obj) { + return (obj || new tflite_schema.DimensionMetadata).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DimensionMetadata=} obj + * @returns {tflite_schema.DimensionMetadata} + */ +tflite_schema.DimensionMetadata.getSizePrefixedRootAsDimensionMetadata = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.DimensionMetadata).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.DimensionType} + */ +tflite_schema.DimensionMetadata.prototype.format = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.DimensionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.DimensionType.DENSE; +}; + +/** + * @returns {number} + */ +tflite_schema.DimensionMetadata.prototype.denseSize = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {tflite_schema.SparseIndexVector} + */ +tflite_schema.DimensionMetadata.prototype.arraySegmentsType = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {tflite_schema.SparseIndexVector} */ (this.bb.readUint8(this.bb_pos + offset)) : tflite_schema.SparseIndexVector.NONE; +}; + +/** + * @param {flatbuffers.Table} obj + * @returns {?flatbuffers.Table} + */ +tflite_schema.DimensionMetadata.prototype.arraySegments = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__union(obj, this.bb_pos + offset) : null; +}; + +/** + * @returns {tflite_schema.SparseIndexVector} + */ +tflite_schema.DimensionMetadata.prototype.arrayIndicesType = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? /** @type {tflite_schema.SparseIndexVector} */ (this.bb.readUint8(this.bb_pos + offset)) : tflite_schema.SparseIndexVector.NONE; +}; + +/** + * @param {flatbuffers.Table} obj + * @returns {?flatbuffers.Table} + */ +tflite_schema.DimensionMetadata.prototype.arrayIndices = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.__union(obj, this.bb_pos + offset) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.DimensionMetadata.startDimensionMetadata = function(builder) { + builder.startObject(6); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.DimensionType} format + */ +tflite_schema.DimensionMetadata.addFormat = function(builder, format) { + builder.addFieldInt8(0, format, tflite_schema.DimensionType.DENSE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} denseSize + */ +tflite_schema.DimensionMetadata.addDenseSize = function(builder, denseSize) { + builder.addFieldInt32(1, denseSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.SparseIndexVector} arraySegmentsType + */ +tflite_schema.DimensionMetadata.addArraySegmentsType = function(builder, arraySegmentsType) { + builder.addFieldInt8(2, arraySegmentsType, tflite_schema.SparseIndexVector.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} arraySegmentsOffset + */ +tflite_schema.DimensionMetadata.addArraySegments = function(builder, arraySegmentsOffset) { + builder.addFieldOffset(3, arraySegmentsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.SparseIndexVector} arrayIndicesType + */ +tflite_schema.DimensionMetadata.addArrayIndicesType = function(builder, arrayIndicesType) { + builder.addFieldInt8(4, arrayIndicesType, tflite_schema.SparseIndexVector.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} arrayIndicesOffset + */ +tflite_schema.DimensionMetadata.addArrayIndices = function(builder, arrayIndicesOffset) { + builder.addFieldOffset(5, arrayIndicesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.DimensionMetadata.endDimensionMetadata = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.DimensionType} format + * @param {number} denseSize + * @param {tflite_schema.SparseIndexVector} arraySegmentsType + * @param {flatbuffers.Offset} arraySegmentsOffset + * @param {tflite_schema.SparseIndexVector} arrayIndicesType + * @param {flatbuffers.Offset} arrayIndicesOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.DimensionMetadata.createDimensionMetadata = function(builder, format, denseSize, arraySegmentsType, arraySegmentsOffset, arrayIndicesType, arrayIndicesOffset) { + tflite_schema.DimensionMetadata.startDimensionMetadata(builder); + tflite_schema.DimensionMetadata.addFormat(builder, format); + tflite_schema.DimensionMetadata.addDenseSize(builder, denseSize); + tflite_schema.DimensionMetadata.addArraySegmentsType(builder, arraySegmentsType); + tflite_schema.DimensionMetadata.addArraySegments(builder, arraySegmentsOffset); + tflite_schema.DimensionMetadata.addArrayIndicesType(builder, arrayIndicesType); + tflite_schema.DimensionMetadata.addArrayIndices(builder, arrayIndicesOffset); + return tflite_schema.DimensionMetadata.endDimensionMetadata(builder); +} + +/** + * @constructor + */ +tflite_schema.SparsityParameters = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SparsityParameters} + */ +tflite_schema.SparsityParameters.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SparsityParameters=} obj + * @returns {tflite_schema.SparsityParameters} + */ +tflite_schema.SparsityParameters.getRootAsSparsityParameters = function(bb, obj) { + return (obj || new tflite_schema.SparsityParameters).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SparsityParameters=} obj + * @returns {tflite_schema.SparsityParameters} + */ +tflite_schema.SparsityParameters.getSizePrefixedRootAsSparsityParameters = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SparsityParameters).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.SparsityParameters.prototype.traversalOrder = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.SparsityParameters.prototype.traversalOrderLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.SparsityParameters.prototype.traversalOrderArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.SparsityParameters.prototype.blockMap = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.SparsityParameters.prototype.blockMapLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.SparsityParameters.prototype.blockMapArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @param {tflite_schema.DimensionMetadata=} obj + * @returns {tflite_schema.DimensionMetadata} + */ +tflite_schema.SparsityParameters.prototype.dimMetadata = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new tflite_schema.DimensionMetadata).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_schema.SparsityParameters.prototype.dimMetadataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SparsityParameters.startSparsityParameters = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} traversalOrderOffset + */ +tflite_schema.SparsityParameters.addTraversalOrder = function(builder, traversalOrderOffset) { + builder.addFieldOffset(0, traversalOrderOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.SparsityParameters.createTraversalOrderVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.SparsityParameters.startTraversalOrderVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} blockMapOffset + */ +tflite_schema.SparsityParameters.addBlockMap = function(builder, blockMapOffset) { + builder.addFieldOffset(1, blockMapOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.SparsityParameters.createBlockMapVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.SparsityParameters.startBlockMapVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimMetadataOffset + */ +tflite_schema.SparsityParameters.addDimMetadata = function(builder, dimMetadataOffset) { + builder.addFieldOffset(2, dimMetadataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.SparsityParameters.createDimMetadataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.SparsityParameters.startDimMetadataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SparsityParameters.endSparsityParameters = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} traversalOrderOffset + * @param {flatbuffers.Offset} blockMapOffset + * @param {flatbuffers.Offset} dimMetadataOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.SparsityParameters.createSparsityParameters = function(builder, traversalOrderOffset, blockMapOffset, dimMetadataOffset) { + tflite_schema.SparsityParameters.startSparsityParameters(builder); + tflite_schema.SparsityParameters.addTraversalOrder(builder, traversalOrderOffset); + tflite_schema.SparsityParameters.addBlockMap(builder, blockMapOffset); + tflite_schema.SparsityParameters.addDimMetadata(builder, dimMetadataOffset); + return tflite_schema.SparsityParameters.endSparsityParameters(builder); +} + +/** + * @constructor + */ +tflite_schema.Tensor = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.Tensor} + */ +tflite_schema.Tensor.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Tensor=} obj + * @returns {tflite_schema.Tensor} + */ +tflite_schema.Tensor.getRootAsTensor = function(bb, obj) { + return (obj || new tflite_schema.Tensor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Tensor=} obj + * @returns {tflite_schema.Tensor} + */ +tflite_schema.Tensor.getSizePrefixedRootAsTensor = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.Tensor).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.Tensor.prototype.shape = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Tensor.prototype.shapeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.Tensor.prototype.shapeArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {tflite_schema.TensorType} + */ +tflite_schema.Tensor.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {tflite_schema.TensorType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.TensorType.FLOAT32; +}; + +/** + * @returns {number} + */ +tflite_schema.Tensor.prototype.buffer = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_schema.Tensor.prototype.name = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {tflite_schema.QuantizationParameters=} obj + * @returns {tflite_schema.QuantizationParameters|null} + */ +tflite_schema.Tensor.prototype.quantization = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new tflite_schema.QuantizationParameters).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @returns {boolean} + */ +tflite_schema.Tensor.prototype.isVariable = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {tflite_schema.SparsityParameters=} obj + * @returns {tflite_schema.SparsityParameters|null} + */ +tflite_schema.Tensor.prototype.sparsity = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? (obj || new tflite_schema.SparsityParameters).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.Tensor.prototype.shapeSignature = function(index) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Tensor.prototype.shapeSignatureLength = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.Tensor.prototype.shapeSignatureArray = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.Tensor.startTensor = function(builder) { + builder.startObject(8); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} shapeOffset + */ +tflite_schema.Tensor.addShape = function(builder, shapeOffset) { + builder.addFieldOffset(0, shapeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Tensor.createShapeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Tensor.startShapeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} type + */ +tflite_schema.Tensor.addType = function(builder, type) { + builder.addFieldInt8(1, type, tflite_schema.TensorType.FLOAT32); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} buffer + */ +tflite_schema.Tensor.addBuffer = function(builder, buffer) { + builder.addFieldInt32(2, buffer, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + */ +tflite_schema.Tensor.addName = function(builder, nameOffset) { + builder.addFieldOffset(3, nameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} quantizationOffset + */ +tflite_schema.Tensor.addQuantization = function(builder, quantizationOffset) { + builder.addFieldOffset(4, quantizationOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} isVariable + */ +tflite_schema.Tensor.addIsVariable = function(builder, isVariable) { + builder.addFieldInt8(5, +isVariable, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} sparsityOffset + */ +tflite_schema.Tensor.addSparsity = function(builder, sparsityOffset) { + builder.addFieldOffset(6, sparsityOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} shapeSignatureOffset + */ +tflite_schema.Tensor.addShapeSignature = function(builder, shapeSignatureOffset) { + builder.addFieldOffset(7, shapeSignatureOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Tensor.createShapeSignatureVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Tensor.startShapeSignatureVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.Tensor.endTensor = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} shapeOffset + * @param {tflite_schema.TensorType} type + * @param {number} buffer + * @param {flatbuffers.Offset} nameOffset + * @param {flatbuffers.Offset} quantizationOffset + * @param {boolean} isVariable + * @param {flatbuffers.Offset} sparsityOffset + * @param {flatbuffers.Offset} shapeSignatureOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.Tensor.createTensor = function(builder, shapeOffset, type, buffer, nameOffset, quantizationOffset, isVariable, sparsityOffset, shapeSignatureOffset) { + tflite_schema.Tensor.startTensor(builder); + tflite_schema.Tensor.addShape(builder, shapeOffset); + tflite_schema.Tensor.addType(builder, type); + tflite_schema.Tensor.addBuffer(builder, buffer); + tflite_schema.Tensor.addName(builder, nameOffset); + tflite_schema.Tensor.addQuantization(builder, quantizationOffset); + tflite_schema.Tensor.addIsVariable(builder, isVariable); + tflite_schema.Tensor.addSparsity(builder, sparsityOffset); + tflite_schema.Tensor.addShapeSignature(builder, shapeSignatureOffset); + return tflite_schema.Tensor.endTensor(builder); +} + +/** + * @constructor + */ +tflite_schema.Conv2DOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.Conv2DOptions} + */ +tflite_schema.Conv2DOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Conv2DOptions=} obj + * @returns {tflite_schema.Conv2DOptions} + */ +tflite_schema.Conv2DOptions.getRootAsConv2DOptions = function(bb, obj) { + return (obj || new tflite_schema.Conv2DOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Conv2DOptions=} obj + * @returns {tflite_schema.Conv2DOptions} + */ +tflite_schema.Conv2DOptions.getSizePrefixedRootAsConv2DOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.Conv2DOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.Padding} + */ +tflite_schema.Conv2DOptions.prototype.padding = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.Padding} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.Padding.SAME; +}; + +/** + * @returns {number} + */ +tflite_schema.Conv2DOptions.prototype.strideW = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Conv2DOptions.prototype.strideH = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.Conv2DOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @returns {number} + */ +tflite_schema.Conv2DOptions.prototype.dilationWFactor = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {number} + */ +tflite_schema.Conv2DOptions.prototype.dilationHFactor = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.Conv2DOptions.startConv2DOptions = function(builder) { + builder.startObject(6); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.Padding} padding + */ +tflite_schema.Conv2DOptions.addPadding = function(builder, padding) { + builder.addFieldInt8(0, padding, tflite_schema.Padding.SAME); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideW + */ +tflite_schema.Conv2DOptions.addStrideW = function(builder, strideW) { + builder.addFieldInt32(1, strideW, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideH + */ +tflite_schema.Conv2DOptions.addStrideH = function(builder, strideH) { + builder.addFieldInt32(2, strideH, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.Conv2DOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(3, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} dilationWFactor + */ +tflite_schema.Conv2DOptions.addDilationWFactor = function(builder, dilationWFactor) { + builder.addFieldInt32(4, dilationWFactor, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} dilationHFactor + */ +tflite_schema.Conv2DOptions.addDilationHFactor = function(builder, dilationHFactor) { + builder.addFieldInt32(5, dilationHFactor, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.Conv2DOptions.endConv2DOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.Padding} padding + * @param {number} strideW + * @param {number} strideH + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @param {number} dilationWFactor + * @param {number} dilationHFactor + * @returns {flatbuffers.Offset} + */ +tflite_schema.Conv2DOptions.createConv2DOptions = function(builder, padding, strideW, strideH, fusedActivationFunction, dilationWFactor, dilationHFactor) { + tflite_schema.Conv2DOptions.startConv2DOptions(builder); + tflite_schema.Conv2DOptions.addPadding(builder, padding); + tflite_schema.Conv2DOptions.addStrideW(builder, strideW); + tflite_schema.Conv2DOptions.addStrideH(builder, strideH); + tflite_schema.Conv2DOptions.addFusedActivationFunction(builder, fusedActivationFunction); + tflite_schema.Conv2DOptions.addDilationWFactor(builder, dilationWFactor); + tflite_schema.Conv2DOptions.addDilationHFactor(builder, dilationHFactor); + return tflite_schema.Conv2DOptions.endConv2DOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.Pool2DOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.Pool2DOptions} + */ +tflite_schema.Pool2DOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Pool2DOptions=} obj + * @returns {tflite_schema.Pool2DOptions} + */ +tflite_schema.Pool2DOptions.getRootAsPool2DOptions = function(bb, obj) { + return (obj || new tflite_schema.Pool2DOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Pool2DOptions=} obj + * @returns {tflite_schema.Pool2DOptions} + */ +tflite_schema.Pool2DOptions.getSizePrefixedRootAsPool2DOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.Pool2DOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.Padding} + */ +tflite_schema.Pool2DOptions.prototype.padding = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.Padding} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.Padding.SAME; +}; + +/** + * @returns {number} + */ +tflite_schema.Pool2DOptions.prototype.strideW = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Pool2DOptions.prototype.strideH = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Pool2DOptions.prototype.filterWidth = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Pool2DOptions.prototype.filterHeight = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.Pool2DOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.Pool2DOptions.startPool2DOptions = function(builder) { + builder.startObject(6); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.Padding} padding + */ +tflite_schema.Pool2DOptions.addPadding = function(builder, padding) { + builder.addFieldInt8(0, padding, tflite_schema.Padding.SAME); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideW + */ +tflite_schema.Pool2DOptions.addStrideW = function(builder, strideW) { + builder.addFieldInt32(1, strideW, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideH + */ +tflite_schema.Pool2DOptions.addStrideH = function(builder, strideH) { + builder.addFieldInt32(2, strideH, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} filterWidth + */ +tflite_schema.Pool2DOptions.addFilterWidth = function(builder, filterWidth) { + builder.addFieldInt32(3, filterWidth, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} filterHeight + */ +tflite_schema.Pool2DOptions.addFilterHeight = function(builder, filterHeight) { + builder.addFieldInt32(4, filterHeight, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.Pool2DOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(5, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.Pool2DOptions.endPool2DOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.Padding} padding + * @param {number} strideW + * @param {number} strideH + * @param {number} filterWidth + * @param {number} filterHeight + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @returns {flatbuffers.Offset} + */ +tflite_schema.Pool2DOptions.createPool2DOptions = function(builder, padding, strideW, strideH, filterWidth, filterHeight, fusedActivationFunction) { + tflite_schema.Pool2DOptions.startPool2DOptions(builder); + tflite_schema.Pool2DOptions.addPadding(builder, padding); + tflite_schema.Pool2DOptions.addStrideW(builder, strideW); + tflite_schema.Pool2DOptions.addStrideH(builder, strideH); + tflite_schema.Pool2DOptions.addFilterWidth(builder, filterWidth); + tflite_schema.Pool2DOptions.addFilterHeight(builder, filterHeight); + tflite_schema.Pool2DOptions.addFusedActivationFunction(builder, fusedActivationFunction); + return tflite_schema.Pool2DOptions.endPool2DOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.DepthwiseConv2DOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.DepthwiseConv2DOptions} + */ +tflite_schema.DepthwiseConv2DOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DepthwiseConv2DOptions=} obj + * @returns {tflite_schema.DepthwiseConv2DOptions} + */ +tflite_schema.DepthwiseConv2DOptions.getRootAsDepthwiseConv2DOptions = function(bb, obj) { + return (obj || new tflite_schema.DepthwiseConv2DOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DepthwiseConv2DOptions=} obj + * @returns {tflite_schema.DepthwiseConv2DOptions} + */ +tflite_schema.DepthwiseConv2DOptions.getSizePrefixedRootAsDepthwiseConv2DOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.DepthwiseConv2DOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.Padding} + */ +tflite_schema.DepthwiseConv2DOptions.prototype.padding = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.Padding} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.Padding.SAME; +}; + +/** + * @returns {number} + */ +tflite_schema.DepthwiseConv2DOptions.prototype.strideW = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.DepthwiseConv2DOptions.prototype.strideH = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.DepthwiseConv2DOptions.prototype.depthMultiplier = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.DepthwiseConv2DOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @returns {number} + */ +tflite_schema.DepthwiseConv2DOptions.prototype.dilationWFactor = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @returns {number} + */ +tflite_schema.DepthwiseConv2DOptions.prototype.dilationHFactor = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.DepthwiseConv2DOptions.startDepthwiseConv2DOptions = function(builder) { + builder.startObject(7); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.Padding} padding + */ +tflite_schema.DepthwiseConv2DOptions.addPadding = function(builder, padding) { + builder.addFieldInt8(0, padding, tflite_schema.Padding.SAME); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideW + */ +tflite_schema.DepthwiseConv2DOptions.addStrideW = function(builder, strideW) { + builder.addFieldInt32(1, strideW, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideH + */ +tflite_schema.DepthwiseConv2DOptions.addStrideH = function(builder, strideH) { + builder.addFieldInt32(2, strideH, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} depthMultiplier + */ +tflite_schema.DepthwiseConv2DOptions.addDepthMultiplier = function(builder, depthMultiplier) { + builder.addFieldInt32(3, depthMultiplier, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.DepthwiseConv2DOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(4, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} dilationWFactor + */ +tflite_schema.DepthwiseConv2DOptions.addDilationWFactor = function(builder, dilationWFactor) { + builder.addFieldInt32(5, dilationWFactor, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} dilationHFactor + */ +tflite_schema.DepthwiseConv2DOptions.addDilationHFactor = function(builder, dilationHFactor) { + builder.addFieldInt32(6, dilationHFactor, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.DepthwiseConv2DOptions.endDepthwiseConv2DOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.Padding} padding + * @param {number} strideW + * @param {number} strideH + * @param {number} depthMultiplier + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @param {number} dilationWFactor + * @param {number} dilationHFactor + * @returns {flatbuffers.Offset} + */ +tflite_schema.DepthwiseConv2DOptions.createDepthwiseConv2DOptions = function(builder, padding, strideW, strideH, depthMultiplier, fusedActivationFunction, dilationWFactor, dilationHFactor) { + tflite_schema.DepthwiseConv2DOptions.startDepthwiseConv2DOptions(builder); + tflite_schema.DepthwiseConv2DOptions.addPadding(builder, padding); + tflite_schema.DepthwiseConv2DOptions.addStrideW(builder, strideW); + tflite_schema.DepthwiseConv2DOptions.addStrideH(builder, strideH); + tflite_schema.DepthwiseConv2DOptions.addDepthMultiplier(builder, depthMultiplier); + tflite_schema.DepthwiseConv2DOptions.addFusedActivationFunction(builder, fusedActivationFunction); + tflite_schema.DepthwiseConv2DOptions.addDilationWFactor(builder, dilationWFactor); + tflite_schema.DepthwiseConv2DOptions.addDilationHFactor(builder, dilationHFactor); + return tflite_schema.DepthwiseConv2DOptions.endDepthwiseConv2DOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ConcatEmbeddingsOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ConcatEmbeddingsOptions} + */ +tflite_schema.ConcatEmbeddingsOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ConcatEmbeddingsOptions=} obj + * @returns {tflite_schema.ConcatEmbeddingsOptions} + */ +tflite_schema.ConcatEmbeddingsOptions.getRootAsConcatEmbeddingsOptions = function(bb, obj) { + return (obj || new tflite_schema.ConcatEmbeddingsOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ConcatEmbeddingsOptions=} obj + * @returns {tflite_schema.ConcatEmbeddingsOptions} + */ +tflite_schema.ConcatEmbeddingsOptions.getSizePrefixedRootAsConcatEmbeddingsOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ConcatEmbeddingsOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.ConcatEmbeddingsOptions.prototype.numChannels = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.ConcatEmbeddingsOptions.prototype.numColumnsPerChannel = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.ConcatEmbeddingsOptions.prototype.numColumnsPerChannelLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.ConcatEmbeddingsOptions.prototype.numColumnsPerChannelArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.ConcatEmbeddingsOptions.prototype.embeddingDimPerChannel = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.ConcatEmbeddingsOptions.prototype.embeddingDimPerChannelLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.ConcatEmbeddingsOptions.prototype.embeddingDimPerChannelArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ConcatEmbeddingsOptions.startConcatEmbeddingsOptions = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numChannels + */ +tflite_schema.ConcatEmbeddingsOptions.addNumChannels = function(builder, numChannels) { + builder.addFieldInt32(0, numChannels, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} numColumnsPerChannelOffset + */ +tflite_schema.ConcatEmbeddingsOptions.addNumColumnsPerChannel = function(builder, numColumnsPerChannelOffset) { + builder.addFieldOffset(1, numColumnsPerChannelOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.ConcatEmbeddingsOptions.createNumColumnsPerChannelVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.ConcatEmbeddingsOptions.startNumColumnsPerChannelVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} embeddingDimPerChannelOffset + */ +tflite_schema.ConcatEmbeddingsOptions.addEmbeddingDimPerChannel = function(builder, embeddingDimPerChannelOffset) { + builder.addFieldOffset(2, embeddingDimPerChannelOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.ConcatEmbeddingsOptions.createEmbeddingDimPerChannelVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.ConcatEmbeddingsOptions.startEmbeddingDimPerChannelVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ConcatEmbeddingsOptions.endConcatEmbeddingsOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numChannels + * @param {flatbuffers.Offset} numColumnsPerChannelOffset + * @param {flatbuffers.Offset} embeddingDimPerChannelOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.ConcatEmbeddingsOptions.createConcatEmbeddingsOptions = function(builder, numChannels, numColumnsPerChannelOffset, embeddingDimPerChannelOffset) { + tflite_schema.ConcatEmbeddingsOptions.startConcatEmbeddingsOptions(builder); + tflite_schema.ConcatEmbeddingsOptions.addNumChannels(builder, numChannels); + tflite_schema.ConcatEmbeddingsOptions.addNumColumnsPerChannel(builder, numColumnsPerChannelOffset); + tflite_schema.ConcatEmbeddingsOptions.addEmbeddingDimPerChannel(builder, embeddingDimPerChannelOffset); + return tflite_schema.ConcatEmbeddingsOptions.endConcatEmbeddingsOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.LSHProjectionOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.LSHProjectionOptions} + */ +tflite_schema.LSHProjectionOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LSHProjectionOptions=} obj + * @returns {tflite_schema.LSHProjectionOptions} + */ +tflite_schema.LSHProjectionOptions.getRootAsLSHProjectionOptions = function(bb, obj) { + return (obj || new tflite_schema.LSHProjectionOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LSHProjectionOptions=} obj + * @returns {tflite_schema.LSHProjectionOptions} + */ +tflite_schema.LSHProjectionOptions.getSizePrefixedRootAsLSHProjectionOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.LSHProjectionOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.LSHProjectionType} + */ +tflite_schema.LSHProjectionOptions.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.LSHProjectionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.LSHProjectionType.UNKNOWN; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.LSHProjectionOptions.startLSHProjectionOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.LSHProjectionType} type + */ +tflite_schema.LSHProjectionOptions.addType = function(builder, type) { + builder.addFieldInt8(0, type, tflite_schema.LSHProjectionType.UNKNOWN); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LSHProjectionOptions.endLSHProjectionOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.LSHProjectionType} type + * @returns {flatbuffers.Offset} + */ +tflite_schema.LSHProjectionOptions.createLSHProjectionOptions = function(builder, type) { + tflite_schema.LSHProjectionOptions.startLSHProjectionOptions(builder); + tflite_schema.LSHProjectionOptions.addType(builder, type); + return tflite_schema.LSHProjectionOptions.endLSHProjectionOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SVDFOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SVDFOptions} + */ +tflite_schema.SVDFOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SVDFOptions=} obj + * @returns {tflite_schema.SVDFOptions} + */ +tflite_schema.SVDFOptions.getRootAsSVDFOptions = function(bb, obj) { + return (obj || new tflite_schema.SVDFOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SVDFOptions=} obj + * @returns {tflite_schema.SVDFOptions} + */ +tflite_schema.SVDFOptions.getSizePrefixedRootAsSVDFOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SVDFOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.SVDFOptions.prototype.rank = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.SVDFOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @returns {boolean} + */ +tflite_schema.SVDFOptions.prototype.asymmetricQuantizeInputs = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SVDFOptions.startSVDFOptions = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} rank + */ +tflite_schema.SVDFOptions.addRank = function(builder, rank) { + builder.addFieldInt32(0, rank, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.SVDFOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(1, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} asymmetricQuantizeInputs + */ +tflite_schema.SVDFOptions.addAsymmetricQuantizeInputs = function(builder, asymmetricQuantizeInputs) { + builder.addFieldInt8(2, +asymmetricQuantizeInputs, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SVDFOptions.endSVDFOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} rank + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @param {boolean} asymmetricQuantizeInputs + * @returns {flatbuffers.Offset} + */ +tflite_schema.SVDFOptions.createSVDFOptions = function(builder, rank, fusedActivationFunction, asymmetricQuantizeInputs) { + tflite_schema.SVDFOptions.startSVDFOptions(builder); + tflite_schema.SVDFOptions.addRank(builder, rank); + tflite_schema.SVDFOptions.addFusedActivationFunction(builder, fusedActivationFunction); + tflite_schema.SVDFOptions.addAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs); + return tflite_schema.SVDFOptions.endSVDFOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.RNNOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.RNNOptions} + */ +tflite_schema.RNNOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.RNNOptions=} obj + * @returns {tflite_schema.RNNOptions} + */ +tflite_schema.RNNOptions.getRootAsRNNOptions = function(bb, obj) { + return (obj || new tflite_schema.RNNOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.RNNOptions=} obj + * @returns {tflite_schema.RNNOptions} + */ +tflite_schema.RNNOptions.getSizePrefixedRootAsRNNOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.RNNOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.RNNOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @returns {boolean} + */ +tflite_schema.RNNOptions.prototype.asymmetricQuantizeInputs = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.RNNOptions.startRNNOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.RNNOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(0, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} asymmetricQuantizeInputs + */ +tflite_schema.RNNOptions.addAsymmetricQuantizeInputs = function(builder, asymmetricQuantizeInputs) { + builder.addFieldInt8(1, +asymmetricQuantizeInputs, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.RNNOptions.endRNNOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @param {boolean} asymmetricQuantizeInputs + * @returns {flatbuffers.Offset} + */ +tflite_schema.RNNOptions.createRNNOptions = function(builder, fusedActivationFunction, asymmetricQuantizeInputs) { + tflite_schema.RNNOptions.startRNNOptions(builder); + tflite_schema.RNNOptions.addFusedActivationFunction(builder, fusedActivationFunction); + tflite_schema.RNNOptions.addAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs); + return tflite_schema.RNNOptions.endRNNOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SequenceRNNOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SequenceRNNOptions} + */ +tflite_schema.SequenceRNNOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SequenceRNNOptions=} obj + * @returns {tflite_schema.SequenceRNNOptions} + */ +tflite_schema.SequenceRNNOptions.getRootAsSequenceRNNOptions = function(bb, obj) { + return (obj || new tflite_schema.SequenceRNNOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SequenceRNNOptions=} obj + * @returns {tflite_schema.SequenceRNNOptions} + */ +tflite_schema.SequenceRNNOptions.getSizePrefixedRootAsSequenceRNNOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SequenceRNNOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +tflite_schema.SequenceRNNOptions.prototype.timeMajor = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.SequenceRNNOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @returns {boolean} + */ +tflite_schema.SequenceRNNOptions.prototype.asymmetricQuantizeInputs = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SequenceRNNOptions.startSequenceRNNOptions = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} timeMajor + */ +tflite_schema.SequenceRNNOptions.addTimeMajor = function(builder, timeMajor) { + builder.addFieldInt8(0, +timeMajor, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.SequenceRNNOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(1, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} asymmetricQuantizeInputs + */ +tflite_schema.SequenceRNNOptions.addAsymmetricQuantizeInputs = function(builder, asymmetricQuantizeInputs) { + builder.addFieldInt8(2, +asymmetricQuantizeInputs, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SequenceRNNOptions.endSequenceRNNOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} timeMajor + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @param {boolean} asymmetricQuantizeInputs + * @returns {flatbuffers.Offset} + */ +tflite_schema.SequenceRNNOptions.createSequenceRNNOptions = function(builder, timeMajor, fusedActivationFunction, asymmetricQuantizeInputs) { + tflite_schema.SequenceRNNOptions.startSequenceRNNOptions(builder); + tflite_schema.SequenceRNNOptions.addTimeMajor(builder, timeMajor); + tflite_schema.SequenceRNNOptions.addFusedActivationFunction(builder, fusedActivationFunction); + tflite_schema.SequenceRNNOptions.addAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs); + return tflite_schema.SequenceRNNOptions.endSequenceRNNOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.BidirectionalSequenceRNNOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.BidirectionalSequenceRNNOptions} + */ +tflite_schema.BidirectionalSequenceRNNOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.BidirectionalSequenceRNNOptions=} obj + * @returns {tflite_schema.BidirectionalSequenceRNNOptions} + */ +tflite_schema.BidirectionalSequenceRNNOptions.getRootAsBidirectionalSequenceRNNOptions = function(bb, obj) { + return (obj || new tflite_schema.BidirectionalSequenceRNNOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.BidirectionalSequenceRNNOptions=} obj + * @returns {tflite_schema.BidirectionalSequenceRNNOptions} + */ +tflite_schema.BidirectionalSequenceRNNOptions.getSizePrefixedRootAsBidirectionalSequenceRNNOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.BidirectionalSequenceRNNOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +tflite_schema.BidirectionalSequenceRNNOptions.prototype.timeMajor = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.BidirectionalSequenceRNNOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @returns {boolean} + */ +tflite_schema.BidirectionalSequenceRNNOptions.prototype.mergeOutputs = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +tflite_schema.BidirectionalSequenceRNNOptions.prototype.asymmetricQuantizeInputs = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.BidirectionalSequenceRNNOptions.startBidirectionalSequenceRNNOptions = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} timeMajor + */ +tflite_schema.BidirectionalSequenceRNNOptions.addTimeMajor = function(builder, timeMajor) { + builder.addFieldInt8(0, +timeMajor, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.BidirectionalSequenceRNNOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(1, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} mergeOutputs + */ +tflite_schema.BidirectionalSequenceRNNOptions.addMergeOutputs = function(builder, mergeOutputs) { + builder.addFieldInt8(2, +mergeOutputs, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} asymmetricQuantizeInputs + */ +tflite_schema.BidirectionalSequenceRNNOptions.addAsymmetricQuantizeInputs = function(builder, asymmetricQuantizeInputs) { + builder.addFieldInt8(3, +asymmetricQuantizeInputs, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.BidirectionalSequenceRNNOptions.endBidirectionalSequenceRNNOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} timeMajor + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @param {boolean} mergeOutputs + * @param {boolean} asymmetricQuantizeInputs + * @returns {flatbuffers.Offset} + */ +tflite_schema.BidirectionalSequenceRNNOptions.createBidirectionalSequenceRNNOptions = function(builder, timeMajor, fusedActivationFunction, mergeOutputs, asymmetricQuantizeInputs) { + tflite_schema.BidirectionalSequenceRNNOptions.startBidirectionalSequenceRNNOptions(builder); + tflite_schema.BidirectionalSequenceRNNOptions.addTimeMajor(builder, timeMajor); + tflite_schema.BidirectionalSequenceRNNOptions.addFusedActivationFunction(builder, fusedActivationFunction); + tflite_schema.BidirectionalSequenceRNNOptions.addMergeOutputs(builder, mergeOutputs); + tflite_schema.BidirectionalSequenceRNNOptions.addAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs); + return tflite_schema.BidirectionalSequenceRNNOptions.endBidirectionalSequenceRNNOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.FullyConnectedOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.FullyConnectedOptions} + */ +tflite_schema.FullyConnectedOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.FullyConnectedOptions=} obj + * @returns {tflite_schema.FullyConnectedOptions} + */ +tflite_schema.FullyConnectedOptions.getRootAsFullyConnectedOptions = function(bb, obj) { + return (obj || new tflite_schema.FullyConnectedOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.FullyConnectedOptions=} obj + * @returns {tflite_schema.FullyConnectedOptions} + */ +tflite_schema.FullyConnectedOptions.getSizePrefixedRootAsFullyConnectedOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.FullyConnectedOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.FullyConnectedOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @returns {tflite_schema.FullyConnectedOptionsWeightsFormat} + */ +tflite_schema.FullyConnectedOptions.prototype.weightsFormat = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {tflite_schema.FullyConnectedOptionsWeightsFormat} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.FullyConnectedOptionsWeightsFormat.DEFAULT; +}; + +/** + * @returns {boolean} + */ +tflite_schema.FullyConnectedOptions.prototype.keepNumDims = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +tflite_schema.FullyConnectedOptions.prototype.asymmetricQuantizeInputs = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.FullyConnectedOptions.startFullyConnectedOptions = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.FullyConnectedOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(0, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.FullyConnectedOptionsWeightsFormat} weightsFormat + */ +tflite_schema.FullyConnectedOptions.addWeightsFormat = function(builder, weightsFormat) { + builder.addFieldInt8(1, weightsFormat, tflite_schema.FullyConnectedOptionsWeightsFormat.DEFAULT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} keepNumDims + */ +tflite_schema.FullyConnectedOptions.addKeepNumDims = function(builder, keepNumDims) { + builder.addFieldInt8(2, +keepNumDims, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} asymmetricQuantizeInputs + */ +tflite_schema.FullyConnectedOptions.addAsymmetricQuantizeInputs = function(builder, asymmetricQuantizeInputs) { + builder.addFieldInt8(3, +asymmetricQuantizeInputs, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.FullyConnectedOptions.endFullyConnectedOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @param {tflite_schema.FullyConnectedOptionsWeightsFormat} weightsFormat + * @param {boolean} keepNumDims + * @param {boolean} asymmetricQuantizeInputs + * @returns {flatbuffers.Offset} + */ +tflite_schema.FullyConnectedOptions.createFullyConnectedOptions = function(builder, fusedActivationFunction, weightsFormat, keepNumDims, asymmetricQuantizeInputs) { + tflite_schema.FullyConnectedOptions.startFullyConnectedOptions(builder); + tflite_schema.FullyConnectedOptions.addFusedActivationFunction(builder, fusedActivationFunction); + tflite_schema.FullyConnectedOptions.addWeightsFormat(builder, weightsFormat); + tflite_schema.FullyConnectedOptions.addKeepNumDims(builder, keepNumDims); + tflite_schema.FullyConnectedOptions.addAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs); + return tflite_schema.FullyConnectedOptions.endFullyConnectedOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SoftmaxOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SoftmaxOptions} + */ +tflite_schema.SoftmaxOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SoftmaxOptions=} obj + * @returns {tflite_schema.SoftmaxOptions} + */ +tflite_schema.SoftmaxOptions.getRootAsSoftmaxOptions = function(bb, obj) { + return (obj || new tflite_schema.SoftmaxOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SoftmaxOptions=} obj + * @returns {tflite_schema.SoftmaxOptions} + */ +tflite_schema.SoftmaxOptions.getSizePrefixedRootAsSoftmaxOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SoftmaxOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.SoftmaxOptions.prototype.beta = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SoftmaxOptions.startSoftmaxOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + */ +tflite_schema.SoftmaxOptions.addBeta = function(builder, beta) { + builder.addFieldFloat32(0, beta, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SoftmaxOptions.endSoftmaxOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + * @returns {flatbuffers.Offset} + */ +tflite_schema.SoftmaxOptions.createSoftmaxOptions = function(builder, beta) { + tflite_schema.SoftmaxOptions.startSoftmaxOptions(builder); + tflite_schema.SoftmaxOptions.addBeta(builder, beta); + return tflite_schema.SoftmaxOptions.endSoftmaxOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ConcatenationOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ConcatenationOptions} + */ +tflite_schema.ConcatenationOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ConcatenationOptions=} obj + * @returns {tflite_schema.ConcatenationOptions} + */ +tflite_schema.ConcatenationOptions.getRootAsConcatenationOptions = function(bb, obj) { + return (obj || new tflite_schema.ConcatenationOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ConcatenationOptions=} obj + * @returns {tflite_schema.ConcatenationOptions} + */ +tflite_schema.ConcatenationOptions.getSizePrefixedRootAsConcatenationOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ConcatenationOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.ConcatenationOptions.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.ConcatenationOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ConcatenationOptions.startConcatenationOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +tflite_schema.ConcatenationOptions.addAxis = function(builder, axis) { + builder.addFieldInt32(0, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.ConcatenationOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(1, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ConcatenationOptions.endConcatenationOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @returns {flatbuffers.Offset} + */ +tflite_schema.ConcatenationOptions.createConcatenationOptions = function(builder, axis, fusedActivationFunction) { + tflite_schema.ConcatenationOptions.startConcatenationOptions(builder); + tflite_schema.ConcatenationOptions.addAxis(builder, axis); + tflite_schema.ConcatenationOptions.addFusedActivationFunction(builder, fusedActivationFunction); + return tflite_schema.ConcatenationOptions.endConcatenationOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.AddOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.AddOptions} + */ +tflite_schema.AddOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.AddOptions=} obj + * @returns {tflite_schema.AddOptions} + */ +tflite_schema.AddOptions.getRootAsAddOptions = function(bb, obj) { + return (obj || new tflite_schema.AddOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.AddOptions=} obj + * @returns {tflite_schema.AddOptions} + */ +tflite_schema.AddOptions.getSizePrefixedRootAsAddOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.AddOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.AddOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.AddOptions.startAddOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.AddOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(0, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.AddOptions.endAddOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @returns {flatbuffers.Offset} + */ +tflite_schema.AddOptions.createAddOptions = function(builder, fusedActivationFunction) { + tflite_schema.AddOptions.startAddOptions(builder); + tflite_schema.AddOptions.addFusedActivationFunction(builder, fusedActivationFunction); + return tflite_schema.AddOptions.endAddOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.MulOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.MulOptions} + */ +tflite_schema.MulOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.MulOptions=} obj + * @returns {tflite_schema.MulOptions} + */ +tflite_schema.MulOptions.getRootAsMulOptions = function(bb, obj) { + return (obj || new tflite_schema.MulOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.MulOptions=} obj + * @returns {tflite_schema.MulOptions} + */ +tflite_schema.MulOptions.getSizePrefixedRootAsMulOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.MulOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.MulOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.MulOptions.startMulOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.MulOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(0, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.MulOptions.endMulOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @returns {flatbuffers.Offset} + */ +tflite_schema.MulOptions.createMulOptions = function(builder, fusedActivationFunction) { + tflite_schema.MulOptions.startMulOptions(builder); + tflite_schema.MulOptions.addFusedActivationFunction(builder, fusedActivationFunction); + return tflite_schema.MulOptions.endMulOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.L2NormOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.L2NormOptions} + */ +tflite_schema.L2NormOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.L2NormOptions=} obj + * @returns {tflite_schema.L2NormOptions} + */ +tflite_schema.L2NormOptions.getRootAsL2NormOptions = function(bb, obj) { + return (obj || new tflite_schema.L2NormOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.L2NormOptions=} obj + * @returns {tflite_schema.L2NormOptions} + */ +tflite_schema.L2NormOptions.getSizePrefixedRootAsL2NormOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.L2NormOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.L2NormOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.L2NormOptions.startL2NormOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.L2NormOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(0, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.L2NormOptions.endL2NormOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @returns {flatbuffers.Offset} + */ +tflite_schema.L2NormOptions.createL2NormOptions = function(builder, fusedActivationFunction) { + tflite_schema.L2NormOptions.startL2NormOptions(builder); + tflite_schema.L2NormOptions.addFusedActivationFunction(builder, fusedActivationFunction); + return tflite_schema.L2NormOptions.endL2NormOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.LocalResponseNormalizationOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.LocalResponseNormalizationOptions} + */ +tflite_schema.LocalResponseNormalizationOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LocalResponseNormalizationOptions=} obj + * @returns {tflite_schema.LocalResponseNormalizationOptions} + */ +tflite_schema.LocalResponseNormalizationOptions.getRootAsLocalResponseNormalizationOptions = function(bb, obj) { + return (obj || new tflite_schema.LocalResponseNormalizationOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LocalResponseNormalizationOptions=} obj + * @returns {tflite_schema.LocalResponseNormalizationOptions} + */ +tflite_schema.LocalResponseNormalizationOptions.getSizePrefixedRootAsLocalResponseNormalizationOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.LocalResponseNormalizationOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.LocalResponseNormalizationOptions.prototype.radius = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.LocalResponseNormalizationOptions.prototype.bias = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +tflite_schema.LocalResponseNormalizationOptions.prototype.alpha = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +tflite_schema.LocalResponseNormalizationOptions.prototype.beta = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.LocalResponseNormalizationOptions.startLocalResponseNormalizationOptions = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} radius + */ +tflite_schema.LocalResponseNormalizationOptions.addRadius = function(builder, radius) { + builder.addFieldInt32(0, radius, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} bias + */ +tflite_schema.LocalResponseNormalizationOptions.addBias = function(builder, bias) { + builder.addFieldFloat32(1, bias, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} alpha + */ +tflite_schema.LocalResponseNormalizationOptions.addAlpha = function(builder, alpha) { + builder.addFieldFloat32(2, alpha, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beta + */ +tflite_schema.LocalResponseNormalizationOptions.addBeta = function(builder, beta) { + builder.addFieldFloat32(3, beta, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LocalResponseNormalizationOptions.endLocalResponseNormalizationOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} radius + * @param {number} bias + * @param {number} alpha + * @param {number} beta + * @returns {flatbuffers.Offset} + */ +tflite_schema.LocalResponseNormalizationOptions.createLocalResponseNormalizationOptions = function(builder, radius, bias, alpha, beta) { + tflite_schema.LocalResponseNormalizationOptions.startLocalResponseNormalizationOptions(builder); + tflite_schema.LocalResponseNormalizationOptions.addRadius(builder, radius); + tflite_schema.LocalResponseNormalizationOptions.addBias(builder, bias); + tflite_schema.LocalResponseNormalizationOptions.addAlpha(builder, alpha); + tflite_schema.LocalResponseNormalizationOptions.addBeta(builder, beta); + return tflite_schema.LocalResponseNormalizationOptions.endLocalResponseNormalizationOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.LSTMOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.LSTMOptions} + */ +tflite_schema.LSTMOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LSTMOptions=} obj + * @returns {tflite_schema.LSTMOptions} + */ +tflite_schema.LSTMOptions.getRootAsLSTMOptions = function(bb, obj) { + return (obj || new tflite_schema.LSTMOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LSTMOptions=} obj + * @returns {tflite_schema.LSTMOptions} + */ +tflite_schema.LSTMOptions.getSizePrefixedRootAsLSTMOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.LSTMOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.LSTMOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @returns {number} + */ +tflite_schema.LSTMOptions.prototype.cellClip = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +tflite_schema.LSTMOptions.prototype.projClip = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {tflite_schema.LSTMKernelType} + */ +tflite_schema.LSTMOptions.prototype.kernelType = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? /** @type {tflite_schema.LSTMKernelType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.LSTMKernelType.FULL; +}; + +/** + * @returns {boolean} + */ +tflite_schema.LSTMOptions.prototype.asymmetricQuantizeInputs = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.LSTMOptions.startLSTMOptions = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.LSTMOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(0, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} cellClip + */ +tflite_schema.LSTMOptions.addCellClip = function(builder, cellClip) { + builder.addFieldFloat32(1, cellClip, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} projClip + */ +tflite_schema.LSTMOptions.addProjClip = function(builder, projClip) { + builder.addFieldFloat32(2, projClip, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.LSTMKernelType} kernelType + */ +tflite_schema.LSTMOptions.addKernelType = function(builder, kernelType) { + builder.addFieldInt8(3, kernelType, tflite_schema.LSTMKernelType.FULL); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} asymmetricQuantizeInputs + */ +tflite_schema.LSTMOptions.addAsymmetricQuantizeInputs = function(builder, asymmetricQuantizeInputs) { + builder.addFieldInt8(4, +asymmetricQuantizeInputs, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LSTMOptions.endLSTMOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @param {number} cellClip + * @param {number} projClip + * @param {tflite_schema.LSTMKernelType} kernelType + * @param {boolean} asymmetricQuantizeInputs + * @returns {flatbuffers.Offset} + */ +tflite_schema.LSTMOptions.createLSTMOptions = function(builder, fusedActivationFunction, cellClip, projClip, kernelType, asymmetricQuantizeInputs) { + tflite_schema.LSTMOptions.startLSTMOptions(builder); + tflite_schema.LSTMOptions.addFusedActivationFunction(builder, fusedActivationFunction); + tflite_schema.LSTMOptions.addCellClip(builder, cellClip); + tflite_schema.LSTMOptions.addProjClip(builder, projClip); + tflite_schema.LSTMOptions.addKernelType(builder, kernelType); + tflite_schema.LSTMOptions.addAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs); + return tflite_schema.LSTMOptions.endLSTMOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.UnidirectionalSequenceLSTMOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.UnidirectionalSequenceLSTMOptions} + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.UnidirectionalSequenceLSTMOptions=} obj + * @returns {tflite_schema.UnidirectionalSequenceLSTMOptions} + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.getRootAsUnidirectionalSequenceLSTMOptions = function(bb, obj) { + return (obj || new tflite_schema.UnidirectionalSequenceLSTMOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.UnidirectionalSequenceLSTMOptions=} obj + * @returns {tflite_schema.UnidirectionalSequenceLSTMOptions} + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.getSizePrefixedRootAsUnidirectionalSequenceLSTMOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.UnidirectionalSequenceLSTMOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @returns {number} + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.prototype.cellClip = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.prototype.projClip = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {boolean} + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.prototype.timeMajor = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.prototype.asymmetricQuantizeInputs = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.startUnidirectionalSequenceLSTMOptions = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(0, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} cellClip + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.addCellClip = function(builder, cellClip) { + builder.addFieldFloat32(1, cellClip, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} projClip + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.addProjClip = function(builder, projClip) { + builder.addFieldFloat32(2, projClip, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} timeMajor + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.addTimeMajor = function(builder, timeMajor) { + builder.addFieldInt8(3, +timeMajor, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} asymmetricQuantizeInputs + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.addAsymmetricQuantizeInputs = function(builder, asymmetricQuantizeInputs) { + builder.addFieldInt8(4, +asymmetricQuantizeInputs, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.endUnidirectionalSequenceLSTMOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @param {number} cellClip + * @param {number} projClip + * @param {boolean} timeMajor + * @param {boolean} asymmetricQuantizeInputs + * @returns {flatbuffers.Offset} + */ +tflite_schema.UnidirectionalSequenceLSTMOptions.createUnidirectionalSequenceLSTMOptions = function(builder, fusedActivationFunction, cellClip, projClip, timeMajor, asymmetricQuantizeInputs) { + tflite_schema.UnidirectionalSequenceLSTMOptions.startUnidirectionalSequenceLSTMOptions(builder); + tflite_schema.UnidirectionalSequenceLSTMOptions.addFusedActivationFunction(builder, fusedActivationFunction); + tflite_schema.UnidirectionalSequenceLSTMOptions.addCellClip(builder, cellClip); + tflite_schema.UnidirectionalSequenceLSTMOptions.addProjClip(builder, projClip); + tflite_schema.UnidirectionalSequenceLSTMOptions.addTimeMajor(builder, timeMajor); + tflite_schema.UnidirectionalSequenceLSTMOptions.addAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs); + return tflite_schema.UnidirectionalSequenceLSTMOptions.endUnidirectionalSequenceLSTMOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.BidirectionalSequenceLSTMOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.BidirectionalSequenceLSTMOptions} + */ +tflite_schema.BidirectionalSequenceLSTMOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.BidirectionalSequenceLSTMOptions=} obj + * @returns {tflite_schema.BidirectionalSequenceLSTMOptions} + */ +tflite_schema.BidirectionalSequenceLSTMOptions.getRootAsBidirectionalSequenceLSTMOptions = function(bb, obj) { + return (obj || new tflite_schema.BidirectionalSequenceLSTMOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.BidirectionalSequenceLSTMOptions=} obj + * @returns {tflite_schema.BidirectionalSequenceLSTMOptions} + */ +tflite_schema.BidirectionalSequenceLSTMOptions.getSizePrefixedRootAsBidirectionalSequenceLSTMOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.BidirectionalSequenceLSTMOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.BidirectionalSequenceLSTMOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @returns {number} + */ +tflite_schema.BidirectionalSequenceLSTMOptions.prototype.cellClip = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +tflite_schema.BidirectionalSequenceLSTMOptions.prototype.projClip = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {boolean} + */ +tflite_schema.BidirectionalSequenceLSTMOptions.prototype.mergeOutputs = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +tflite_schema.BidirectionalSequenceLSTMOptions.prototype.timeMajor = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : true; +}; + +/** + * @returns {boolean} + */ +tflite_schema.BidirectionalSequenceLSTMOptions.prototype.asymmetricQuantizeInputs = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.BidirectionalSequenceLSTMOptions.startBidirectionalSequenceLSTMOptions = function(builder) { + builder.startObject(6); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.BidirectionalSequenceLSTMOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(0, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} cellClip + */ +tflite_schema.BidirectionalSequenceLSTMOptions.addCellClip = function(builder, cellClip) { + builder.addFieldFloat32(1, cellClip, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} projClip + */ +tflite_schema.BidirectionalSequenceLSTMOptions.addProjClip = function(builder, projClip) { + builder.addFieldFloat32(2, projClip, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} mergeOutputs + */ +tflite_schema.BidirectionalSequenceLSTMOptions.addMergeOutputs = function(builder, mergeOutputs) { + builder.addFieldInt8(3, +mergeOutputs, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} timeMajor + */ +tflite_schema.BidirectionalSequenceLSTMOptions.addTimeMajor = function(builder, timeMajor) { + builder.addFieldInt8(4, +timeMajor, +true); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} asymmetricQuantizeInputs + */ +tflite_schema.BidirectionalSequenceLSTMOptions.addAsymmetricQuantizeInputs = function(builder, asymmetricQuantizeInputs) { + builder.addFieldInt8(5, +asymmetricQuantizeInputs, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.BidirectionalSequenceLSTMOptions.endBidirectionalSequenceLSTMOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @param {number} cellClip + * @param {number} projClip + * @param {boolean} mergeOutputs + * @param {boolean} timeMajor + * @param {boolean} asymmetricQuantizeInputs + * @returns {flatbuffers.Offset} + */ +tflite_schema.BidirectionalSequenceLSTMOptions.createBidirectionalSequenceLSTMOptions = function(builder, fusedActivationFunction, cellClip, projClip, mergeOutputs, timeMajor, asymmetricQuantizeInputs) { + tflite_schema.BidirectionalSequenceLSTMOptions.startBidirectionalSequenceLSTMOptions(builder); + tflite_schema.BidirectionalSequenceLSTMOptions.addFusedActivationFunction(builder, fusedActivationFunction); + tflite_schema.BidirectionalSequenceLSTMOptions.addCellClip(builder, cellClip); + tflite_schema.BidirectionalSequenceLSTMOptions.addProjClip(builder, projClip); + tflite_schema.BidirectionalSequenceLSTMOptions.addMergeOutputs(builder, mergeOutputs); + tflite_schema.BidirectionalSequenceLSTMOptions.addTimeMajor(builder, timeMajor); + tflite_schema.BidirectionalSequenceLSTMOptions.addAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs); + return tflite_schema.BidirectionalSequenceLSTMOptions.endBidirectionalSequenceLSTMOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ResizeBilinearOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ResizeBilinearOptions} + */ +tflite_schema.ResizeBilinearOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ResizeBilinearOptions=} obj + * @returns {tflite_schema.ResizeBilinearOptions} + */ +tflite_schema.ResizeBilinearOptions.getRootAsResizeBilinearOptions = function(bb, obj) { + return (obj || new tflite_schema.ResizeBilinearOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ResizeBilinearOptions=} obj + * @returns {tflite_schema.ResizeBilinearOptions} + */ +tflite_schema.ResizeBilinearOptions.getSizePrefixedRootAsResizeBilinearOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ResizeBilinearOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +tflite_schema.ResizeBilinearOptions.prototype.alignCorners = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +tflite_schema.ResizeBilinearOptions.prototype.halfPixelCenters = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ResizeBilinearOptions.startResizeBilinearOptions = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} alignCorners + */ +tflite_schema.ResizeBilinearOptions.addAlignCorners = function(builder, alignCorners) { + builder.addFieldInt8(2, +alignCorners, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} halfPixelCenters + */ +tflite_schema.ResizeBilinearOptions.addHalfPixelCenters = function(builder, halfPixelCenters) { + builder.addFieldInt8(3, +halfPixelCenters, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ResizeBilinearOptions.endResizeBilinearOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} alignCorners + * @param {boolean} halfPixelCenters + * @returns {flatbuffers.Offset} + */ +tflite_schema.ResizeBilinearOptions.createResizeBilinearOptions = function(builder, alignCorners, halfPixelCenters) { + tflite_schema.ResizeBilinearOptions.startResizeBilinearOptions(builder); + tflite_schema.ResizeBilinearOptions.addAlignCorners(builder, alignCorners); + tflite_schema.ResizeBilinearOptions.addHalfPixelCenters(builder, halfPixelCenters); + return tflite_schema.ResizeBilinearOptions.endResizeBilinearOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ResizeNearestNeighborOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ResizeNearestNeighborOptions} + */ +tflite_schema.ResizeNearestNeighborOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ResizeNearestNeighborOptions=} obj + * @returns {tflite_schema.ResizeNearestNeighborOptions} + */ +tflite_schema.ResizeNearestNeighborOptions.getRootAsResizeNearestNeighborOptions = function(bb, obj) { + return (obj || new tflite_schema.ResizeNearestNeighborOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ResizeNearestNeighborOptions=} obj + * @returns {tflite_schema.ResizeNearestNeighborOptions} + */ +tflite_schema.ResizeNearestNeighborOptions.getSizePrefixedRootAsResizeNearestNeighborOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ResizeNearestNeighborOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +tflite_schema.ResizeNearestNeighborOptions.prototype.alignCorners = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +tflite_schema.ResizeNearestNeighborOptions.prototype.halfPixelCenters = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ResizeNearestNeighborOptions.startResizeNearestNeighborOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} alignCorners + */ +tflite_schema.ResizeNearestNeighborOptions.addAlignCorners = function(builder, alignCorners) { + builder.addFieldInt8(0, +alignCorners, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} halfPixelCenters + */ +tflite_schema.ResizeNearestNeighborOptions.addHalfPixelCenters = function(builder, halfPixelCenters) { + builder.addFieldInt8(1, +halfPixelCenters, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ResizeNearestNeighborOptions.endResizeNearestNeighborOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} alignCorners + * @param {boolean} halfPixelCenters + * @returns {flatbuffers.Offset} + */ +tflite_schema.ResizeNearestNeighborOptions.createResizeNearestNeighborOptions = function(builder, alignCorners, halfPixelCenters) { + tflite_schema.ResizeNearestNeighborOptions.startResizeNearestNeighborOptions(builder); + tflite_schema.ResizeNearestNeighborOptions.addAlignCorners(builder, alignCorners); + tflite_schema.ResizeNearestNeighborOptions.addHalfPixelCenters(builder, halfPixelCenters); + return tflite_schema.ResizeNearestNeighborOptions.endResizeNearestNeighborOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.CallOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.CallOptions} + */ +tflite_schema.CallOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.CallOptions=} obj + * @returns {tflite_schema.CallOptions} + */ +tflite_schema.CallOptions.getRootAsCallOptions = function(bb, obj) { + return (obj || new tflite_schema.CallOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.CallOptions=} obj + * @returns {tflite_schema.CallOptions} + */ +tflite_schema.CallOptions.getSizePrefixedRootAsCallOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.CallOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.CallOptions.prototype.subgraph = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.CallOptions.startCallOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} subgraph + */ +tflite_schema.CallOptions.addSubgraph = function(builder, subgraph) { + builder.addFieldInt32(0, subgraph, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.CallOptions.endCallOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} subgraph + * @returns {flatbuffers.Offset} + */ +tflite_schema.CallOptions.createCallOptions = function(builder, subgraph) { + tflite_schema.CallOptions.startCallOptions(builder); + tflite_schema.CallOptions.addSubgraph(builder, subgraph); + return tflite_schema.CallOptions.endCallOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.PadOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.PadOptions} + */ +tflite_schema.PadOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.PadOptions=} obj + * @returns {tflite_schema.PadOptions} + */ +tflite_schema.PadOptions.getRootAsPadOptions = function(bb, obj) { + return (obj || new tflite_schema.PadOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.PadOptions=} obj + * @returns {tflite_schema.PadOptions} + */ +tflite_schema.PadOptions.getSizePrefixedRootAsPadOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.PadOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.PadOptions.startPadOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.PadOptions.endPadOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.PadOptions.createPadOptions = function(builder) { + tflite_schema.PadOptions.startPadOptions(builder); + return tflite_schema.PadOptions.endPadOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.PadV2Options = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.PadV2Options} + */ +tflite_schema.PadV2Options.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.PadV2Options=} obj + * @returns {tflite_schema.PadV2Options} + */ +tflite_schema.PadV2Options.getRootAsPadV2Options = function(bb, obj) { + return (obj || new tflite_schema.PadV2Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.PadV2Options=} obj + * @returns {tflite_schema.PadV2Options} + */ +tflite_schema.PadV2Options.getSizePrefixedRootAsPadV2Options = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.PadV2Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.PadV2Options.startPadV2Options = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.PadV2Options.endPadV2Options = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.PadV2Options.createPadV2Options = function(builder) { + tflite_schema.PadV2Options.startPadV2Options(builder); + return tflite_schema.PadV2Options.endPadV2Options(builder); +} + +/** + * @constructor + */ +tflite_schema.ReshapeOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ReshapeOptions} + */ +tflite_schema.ReshapeOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ReshapeOptions=} obj + * @returns {tflite_schema.ReshapeOptions} + */ +tflite_schema.ReshapeOptions.getRootAsReshapeOptions = function(bb, obj) { + return (obj || new tflite_schema.ReshapeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ReshapeOptions=} obj + * @returns {tflite_schema.ReshapeOptions} + */ +tflite_schema.ReshapeOptions.getSizePrefixedRootAsReshapeOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ReshapeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.ReshapeOptions.prototype.newShape = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.ReshapeOptions.prototype.newShapeLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.ReshapeOptions.prototype.newShapeArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ReshapeOptions.startReshapeOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} newShapeOffset + */ +tflite_schema.ReshapeOptions.addNewShape = function(builder, newShapeOffset) { + builder.addFieldOffset(0, newShapeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.ReshapeOptions.createNewShapeVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.ReshapeOptions.startNewShapeVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ReshapeOptions.endReshapeOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} newShapeOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.ReshapeOptions.createReshapeOptions = function(builder, newShapeOffset) { + tflite_schema.ReshapeOptions.startReshapeOptions(builder); + tflite_schema.ReshapeOptions.addNewShape(builder, newShapeOffset); + return tflite_schema.ReshapeOptions.endReshapeOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SpaceToBatchNDOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SpaceToBatchNDOptions} + */ +tflite_schema.SpaceToBatchNDOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SpaceToBatchNDOptions=} obj + * @returns {tflite_schema.SpaceToBatchNDOptions} + */ +tflite_schema.SpaceToBatchNDOptions.getRootAsSpaceToBatchNDOptions = function(bb, obj) { + return (obj || new tflite_schema.SpaceToBatchNDOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SpaceToBatchNDOptions=} obj + * @returns {tflite_schema.SpaceToBatchNDOptions} + */ +tflite_schema.SpaceToBatchNDOptions.getSizePrefixedRootAsSpaceToBatchNDOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SpaceToBatchNDOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SpaceToBatchNDOptions.startSpaceToBatchNDOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SpaceToBatchNDOptions.endSpaceToBatchNDOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SpaceToBatchNDOptions.createSpaceToBatchNDOptions = function(builder) { + tflite_schema.SpaceToBatchNDOptions.startSpaceToBatchNDOptions(builder); + return tflite_schema.SpaceToBatchNDOptions.endSpaceToBatchNDOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.BatchToSpaceNDOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.BatchToSpaceNDOptions} + */ +tflite_schema.BatchToSpaceNDOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.BatchToSpaceNDOptions=} obj + * @returns {tflite_schema.BatchToSpaceNDOptions} + */ +tflite_schema.BatchToSpaceNDOptions.getRootAsBatchToSpaceNDOptions = function(bb, obj) { + return (obj || new tflite_schema.BatchToSpaceNDOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.BatchToSpaceNDOptions=} obj + * @returns {tflite_schema.BatchToSpaceNDOptions} + */ +tflite_schema.BatchToSpaceNDOptions.getSizePrefixedRootAsBatchToSpaceNDOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.BatchToSpaceNDOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.BatchToSpaceNDOptions.startBatchToSpaceNDOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.BatchToSpaceNDOptions.endBatchToSpaceNDOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.BatchToSpaceNDOptions.createBatchToSpaceNDOptions = function(builder) { + tflite_schema.BatchToSpaceNDOptions.startBatchToSpaceNDOptions(builder); + return tflite_schema.BatchToSpaceNDOptions.endBatchToSpaceNDOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SkipGramOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SkipGramOptions} + */ +tflite_schema.SkipGramOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SkipGramOptions=} obj + * @returns {tflite_schema.SkipGramOptions} + */ +tflite_schema.SkipGramOptions.getRootAsSkipGramOptions = function(bb, obj) { + return (obj || new tflite_schema.SkipGramOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SkipGramOptions=} obj + * @returns {tflite_schema.SkipGramOptions} + */ +tflite_schema.SkipGramOptions.getSizePrefixedRootAsSkipGramOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SkipGramOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.SkipGramOptions.prototype.ngramSize = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.SkipGramOptions.prototype.maxSkipSize = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +tflite_schema.SkipGramOptions.prototype.includeAllNgrams = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SkipGramOptions.startSkipGramOptions = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} ngramSize + */ +tflite_schema.SkipGramOptions.addNgramSize = function(builder, ngramSize) { + builder.addFieldInt32(0, ngramSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} maxSkipSize + */ +tflite_schema.SkipGramOptions.addMaxSkipSize = function(builder, maxSkipSize) { + builder.addFieldInt32(1, maxSkipSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} includeAllNgrams + */ +tflite_schema.SkipGramOptions.addIncludeAllNgrams = function(builder, includeAllNgrams) { + builder.addFieldInt8(2, +includeAllNgrams, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SkipGramOptions.endSkipGramOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} ngramSize + * @param {number} maxSkipSize + * @param {boolean} includeAllNgrams + * @returns {flatbuffers.Offset} + */ +tflite_schema.SkipGramOptions.createSkipGramOptions = function(builder, ngramSize, maxSkipSize, includeAllNgrams) { + tflite_schema.SkipGramOptions.startSkipGramOptions(builder); + tflite_schema.SkipGramOptions.addNgramSize(builder, ngramSize); + tflite_schema.SkipGramOptions.addMaxSkipSize(builder, maxSkipSize); + tflite_schema.SkipGramOptions.addIncludeAllNgrams(builder, includeAllNgrams); + return tflite_schema.SkipGramOptions.endSkipGramOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SpaceToDepthOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SpaceToDepthOptions} + */ +tflite_schema.SpaceToDepthOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SpaceToDepthOptions=} obj + * @returns {tflite_schema.SpaceToDepthOptions} + */ +tflite_schema.SpaceToDepthOptions.getRootAsSpaceToDepthOptions = function(bb, obj) { + return (obj || new tflite_schema.SpaceToDepthOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SpaceToDepthOptions=} obj + * @returns {tflite_schema.SpaceToDepthOptions} + */ +tflite_schema.SpaceToDepthOptions.getSizePrefixedRootAsSpaceToDepthOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SpaceToDepthOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.SpaceToDepthOptions.prototype.blockSize = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SpaceToDepthOptions.startSpaceToDepthOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} blockSize + */ +tflite_schema.SpaceToDepthOptions.addBlockSize = function(builder, blockSize) { + builder.addFieldInt32(0, blockSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SpaceToDepthOptions.endSpaceToDepthOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} blockSize + * @returns {flatbuffers.Offset} + */ +tflite_schema.SpaceToDepthOptions.createSpaceToDepthOptions = function(builder, blockSize) { + tflite_schema.SpaceToDepthOptions.startSpaceToDepthOptions(builder); + tflite_schema.SpaceToDepthOptions.addBlockSize(builder, blockSize); + return tflite_schema.SpaceToDepthOptions.endSpaceToDepthOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.DepthToSpaceOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.DepthToSpaceOptions} + */ +tflite_schema.DepthToSpaceOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DepthToSpaceOptions=} obj + * @returns {tflite_schema.DepthToSpaceOptions} + */ +tflite_schema.DepthToSpaceOptions.getRootAsDepthToSpaceOptions = function(bb, obj) { + return (obj || new tflite_schema.DepthToSpaceOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DepthToSpaceOptions=} obj + * @returns {tflite_schema.DepthToSpaceOptions} + */ +tflite_schema.DepthToSpaceOptions.getSizePrefixedRootAsDepthToSpaceOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.DepthToSpaceOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.DepthToSpaceOptions.prototype.blockSize = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.DepthToSpaceOptions.startDepthToSpaceOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} blockSize + */ +tflite_schema.DepthToSpaceOptions.addBlockSize = function(builder, blockSize) { + builder.addFieldInt32(0, blockSize, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.DepthToSpaceOptions.endDepthToSpaceOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} blockSize + * @returns {flatbuffers.Offset} + */ +tflite_schema.DepthToSpaceOptions.createDepthToSpaceOptions = function(builder, blockSize) { + tflite_schema.DepthToSpaceOptions.startDepthToSpaceOptions(builder); + tflite_schema.DepthToSpaceOptions.addBlockSize(builder, blockSize); + return tflite_schema.DepthToSpaceOptions.endDepthToSpaceOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SubOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SubOptions} + */ +tflite_schema.SubOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SubOptions=} obj + * @returns {tflite_schema.SubOptions} + */ +tflite_schema.SubOptions.getRootAsSubOptions = function(bb, obj) { + return (obj || new tflite_schema.SubOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SubOptions=} obj + * @returns {tflite_schema.SubOptions} + */ +tflite_schema.SubOptions.getSizePrefixedRootAsSubOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SubOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.SubOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SubOptions.startSubOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.SubOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(0, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SubOptions.endSubOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @returns {flatbuffers.Offset} + */ +tflite_schema.SubOptions.createSubOptions = function(builder, fusedActivationFunction) { + tflite_schema.SubOptions.startSubOptions(builder); + tflite_schema.SubOptions.addFusedActivationFunction(builder, fusedActivationFunction); + return tflite_schema.SubOptions.endSubOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.DivOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.DivOptions} + */ +tflite_schema.DivOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DivOptions=} obj + * @returns {tflite_schema.DivOptions} + */ +tflite_schema.DivOptions.getRootAsDivOptions = function(bb, obj) { + return (obj || new tflite_schema.DivOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DivOptions=} obj + * @returns {tflite_schema.DivOptions} + */ +tflite_schema.DivOptions.getSizePrefixedRootAsDivOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.DivOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.ActivationFunctionType} + */ +tflite_schema.DivOptions.prototype.fusedActivationFunction = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.ActivationFunctionType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.ActivationFunctionType.NONE; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.DivOptions.startDivOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + */ +tflite_schema.DivOptions.addFusedActivationFunction = function(builder, fusedActivationFunction) { + builder.addFieldInt8(0, fusedActivationFunction, tflite_schema.ActivationFunctionType.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.DivOptions.endDivOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.ActivationFunctionType} fusedActivationFunction + * @returns {flatbuffers.Offset} + */ +tflite_schema.DivOptions.createDivOptions = function(builder, fusedActivationFunction) { + tflite_schema.DivOptions.startDivOptions(builder); + tflite_schema.DivOptions.addFusedActivationFunction(builder, fusedActivationFunction); + return tflite_schema.DivOptions.endDivOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.TopKV2Options = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.TopKV2Options} + */ +tflite_schema.TopKV2Options.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.TopKV2Options=} obj + * @returns {tflite_schema.TopKV2Options} + */ +tflite_schema.TopKV2Options.getRootAsTopKV2Options = function(bb, obj) { + return (obj || new tflite_schema.TopKV2Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.TopKV2Options=} obj + * @returns {tflite_schema.TopKV2Options} + */ +tflite_schema.TopKV2Options.getSizePrefixedRootAsTopKV2Options = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.TopKV2Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.TopKV2Options.startTopKV2Options = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.TopKV2Options.endTopKV2Options = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.TopKV2Options.createTopKV2Options = function(builder) { + tflite_schema.TopKV2Options.startTopKV2Options(builder); + return tflite_schema.TopKV2Options.endTopKV2Options(builder); +} + +/** + * @constructor + */ +tflite_schema.EmbeddingLookupSparseOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.EmbeddingLookupSparseOptions} + */ +tflite_schema.EmbeddingLookupSparseOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.EmbeddingLookupSparseOptions=} obj + * @returns {tflite_schema.EmbeddingLookupSparseOptions} + */ +tflite_schema.EmbeddingLookupSparseOptions.getRootAsEmbeddingLookupSparseOptions = function(bb, obj) { + return (obj || new tflite_schema.EmbeddingLookupSparseOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.EmbeddingLookupSparseOptions=} obj + * @returns {tflite_schema.EmbeddingLookupSparseOptions} + */ +tflite_schema.EmbeddingLookupSparseOptions.getSizePrefixedRootAsEmbeddingLookupSparseOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.EmbeddingLookupSparseOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.CombinerType} + */ +tflite_schema.EmbeddingLookupSparseOptions.prototype.combiner = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.CombinerType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.CombinerType.SUM; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.EmbeddingLookupSparseOptions.startEmbeddingLookupSparseOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.CombinerType} combiner + */ +tflite_schema.EmbeddingLookupSparseOptions.addCombiner = function(builder, combiner) { + builder.addFieldInt8(0, combiner, tflite_schema.CombinerType.SUM); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.EmbeddingLookupSparseOptions.endEmbeddingLookupSparseOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.CombinerType} combiner + * @returns {flatbuffers.Offset} + */ +tflite_schema.EmbeddingLookupSparseOptions.createEmbeddingLookupSparseOptions = function(builder, combiner) { + tflite_schema.EmbeddingLookupSparseOptions.startEmbeddingLookupSparseOptions(builder); + tflite_schema.EmbeddingLookupSparseOptions.addCombiner(builder, combiner); + return tflite_schema.EmbeddingLookupSparseOptions.endEmbeddingLookupSparseOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.GatherOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.GatherOptions} + */ +tflite_schema.GatherOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.GatherOptions=} obj + * @returns {tflite_schema.GatherOptions} + */ +tflite_schema.GatherOptions.getRootAsGatherOptions = function(bb, obj) { + return (obj || new tflite_schema.GatherOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.GatherOptions=} obj + * @returns {tflite_schema.GatherOptions} + */ +tflite_schema.GatherOptions.getSizePrefixedRootAsGatherOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.GatherOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.GatherOptions.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.GatherOptions.startGatherOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +tflite_schema.GatherOptions.addAxis = function(builder, axis) { + builder.addFieldInt32(0, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.GatherOptions.endGatherOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + * @returns {flatbuffers.Offset} + */ +tflite_schema.GatherOptions.createGatherOptions = function(builder, axis) { + tflite_schema.GatherOptions.startGatherOptions(builder); + tflite_schema.GatherOptions.addAxis(builder, axis); + return tflite_schema.GatherOptions.endGatherOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.TransposeOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.TransposeOptions} + */ +tflite_schema.TransposeOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.TransposeOptions=} obj + * @returns {tflite_schema.TransposeOptions} + */ +tflite_schema.TransposeOptions.getRootAsTransposeOptions = function(bb, obj) { + return (obj || new tflite_schema.TransposeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.TransposeOptions=} obj + * @returns {tflite_schema.TransposeOptions} + */ +tflite_schema.TransposeOptions.getSizePrefixedRootAsTransposeOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.TransposeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.TransposeOptions.startTransposeOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.TransposeOptions.endTransposeOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.TransposeOptions.createTransposeOptions = function(builder) { + tflite_schema.TransposeOptions.startTransposeOptions(builder); + return tflite_schema.TransposeOptions.endTransposeOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ExpOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ExpOptions} + */ +tflite_schema.ExpOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ExpOptions=} obj + * @returns {tflite_schema.ExpOptions} + */ +tflite_schema.ExpOptions.getRootAsExpOptions = function(bb, obj) { + return (obj || new tflite_schema.ExpOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ExpOptions=} obj + * @returns {tflite_schema.ExpOptions} + */ +tflite_schema.ExpOptions.getSizePrefixedRootAsExpOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ExpOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ExpOptions.startExpOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ExpOptions.endExpOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ExpOptions.createExpOptions = function(builder) { + tflite_schema.ExpOptions.startExpOptions(builder); + return tflite_schema.ExpOptions.endExpOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.CosOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.CosOptions} + */ +tflite_schema.CosOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.CosOptions=} obj + * @returns {tflite_schema.CosOptions} + */ +tflite_schema.CosOptions.getRootAsCosOptions = function(bb, obj) { + return (obj || new tflite_schema.CosOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.CosOptions=} obj + * @returns {tflite_schema.CosOptions} + */ +tflite_schema.CosOptions.getSizePrefixedRootAsCosOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.CosOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.CosOptions.startCosOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.CosOptions.endCosOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.CosOptions.createCosOptions = function(builder) { + tflite_schema.CosOptions.startCosOptions(builder); + return tflite_schema.CosOptions.endCosOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ReducerOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ReducerOptions} + */ +tflite_schema.ReducerOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ReducerOptions=} obj + * @returns {tflite_schema.ReducerOptions} + */ +tflite_schema.ReducerOptions.getRootAsReducerOptions = function(bb, obj) { + return (obj || new tflite_schema.ReducerOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ReducerOptions=} obj + * @returns {tflite_schema.ReducerOptions} + */ +tflite_schema.ReducerOptions.getSizePrefixedRootAsReducerOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ReducerOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +tflite_schema.ReducerOptions.prototype.keepDims = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ReducerOptions.startReducerOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} keepDims + */ +tflite_schema.ReducerOptions.addKeepDims = function(builder, keepDims) { + builder.addFieldInt8(0, +keepDims, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ReducerOptions.endReducerOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} keepDims + * @returns {flatbuffers.Offset} + */ +tflite_schema.ReducerOptions.createReducerOptions = function(builder, keepDims) { + tflite_schema.ReducerOptions.startReducerOptions(builder); + tflite_schema.ReducerOptions.addKeepDims(builder, keepDims); + return tflite_schema.ReducerOptions.endReducerOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SqueezeOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SqueezeOptions} + */ +tflite_schema.SqueezeOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SqueezeOptions=} obj + * @returns {tflite_schema.SqueezeOptions} + */ +tflite_schema.SqueezeOptions.getRootAsSqueezeOptions = function(bb, obj) { + return (obj || new tflite_schema.SqueezeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SqueezeOptions=} obj + * @returns {tflite_schema.SqueezeOptions} + */ +tflite_schema.SqueezeOptions.getSizePrefixedRootAsSqueezeOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SqueezeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.SqueezeOptions.prototype.squeezeDims = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.SqueezeOptions.prototype.squeezeDimsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.SqueezeOptions.prototype.squeezeDimsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SqueezeOptions.startSqueezeOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} squeezeDimsOffset + */ +tflite_schema.SqueezeOptions.addSqueezeDims = function(builder, squeezeDimsOffset) { + builder.addFieldOffset(0, squeezeDimsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.SqueezeOptions.createSqueezeDimsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.SqueezeOptions.startSqueezeDimsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SqueezeOptions.endSqueezeOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} squeezeDimsOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.SqueezeOptions.createSqueezeOptions = function(builder, squeezeDimsOffset) { + tflite_schema.SqueezeOptions.startSqueezeOptions(builder); + tflite_schema.SqueezeOptions.addSqueezeDims(builder, squeezeDimsOffset); + return tflite_schema.SqueezeOptions.endSqueezeOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SplitOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SplitOptions} + */ +tflite_schema.SplitOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SplitOptions=} obj + * @returns {tflite_schema.SplitOptions} + */ +tflite_schema.SplitOptions.getRootAsSplitOptions = function(bb, obj) { + return (obj || new tflite_schema.SplitOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SplitOptions=} obj + * @returns {tflite_schema.SplitOptions} + */ +tflite_schema.SplitOptions.getSizePrefixedRootAsSplitOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SplitOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.SplitOptions.prototype.numSplits = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SplitOptions.startSplitOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numSplits + */ +tflite_schema.SplitOptions.addNumSplits = function(builder, numSplits) { + builder.addFieldInt32(0, numSplits, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SplitOptions.endSplitOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numSplits + * @returns {flatbuffers.Offset} + */ +tflite_schema.SplitOptions.createSplitOptions = function(builder, numSplits) { + tflite_schema.SplitOptions.startSplitOptions(builder); + tflite_schema.SplitOptions.addNumSplits(builder, numSplits); + return tflite_schema.SplitOptions.endSplitOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SplitVOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SplitVOptions} + */ +tflite_schema.SplitVOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SplitVOptions=} obj + * @returns {tflite_schema.SplitVOptions} + */ +tflite_schema.SplitVOptions.getRootAsSplitVOptions = function(bb, obj) { + return (obj || new tflite_schema.SplitVOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SplitVOptions=} obj + * @returns {tflite_schema.SplitVOptions} + */ +tflite_schema.SplitVOptions.getSizePrefixedRootAsSplitVOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SplitVOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.SplitVOptions.prototype.numSplits = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SplitVOptions.startSplitVOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numSplits + */ +tflite_schema.SplitVOptions.addNumSplits = function(builder, numSplits) { + builder.addFieldInt32(0, numSplits, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SplitVOptions.endSplitVOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numSplits + * @returns {flatbuffers.Offset} + */ +tflite_schema.SplitVOptions.createSplitVOptions = function(builder, numSplits) { + tflite_schema.SplitVOptions.startSplitVOptions(builder); + tflite_schema.SplitVOptions.addNumSplits(builder, numSplits); + return tflite_schema.SplitVOptions.endSplitVOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.StridedSliceOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.StridedSliceOptions} + */ +tflite_schema.StridedSliceOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.StridedSliceOptions=} obj + * @returns {tflite_schema.StridedSliceOptions} + */ +tflite_schema.StridedSliceOptions.getRootAsStridedSliceOptions = function(bb, obj) { + return (obj || new tflite_schema.StridedSliceOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.StridedSliceOptions=} obj + * @returns {tflite_schema.StridedSliceOptions} + */ +tflite_schema.StridedSliceOptions.getSizePrefixedRootAsStridedSliceOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.StridedSliceOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.StridedSliceOptions.prototype.beginMask = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.StridedSliceOptions.prototype.endMask = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.StridedSliceOptions.prototype.ellipsisMask = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.StridedSliceOptions.prototype.newAxisMask = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.StridedSliceOptions.prototype.shrinkAxisMask = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.StridedSliceOptions.startStridedSliceOptions = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beginMask + */ +tflite_schema.StridedSliceOptions.addBeginMask = function(builder, beginMask) { + builder.addFieldInt32(0, beginMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} endMask + */ +tflite_schema.StridedSliceOptions.addEndMask = function(builder, endMask) { + builder.addFieldInt32(1, endMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} ellipsisMask + */ +tflite_schema.StridedSliceOptions.addEllipsisMask = function(builder, ellipsisMask) { + builder.addFieldInt32(2, ellipsisMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} newAxisMask + */ +tflite_schema.StridedSliceOptions.addNewAxisMask = function(builder, newAxisMask) { + builder.addFieldInt32(3, newAxisMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} shrinkAxisMask + */ +tflite_schema.StridedSliceOptions.addShrinkAxisMask = function(builder, shrinkAxisMask) { + builder.addFieldInt32(4, shrinkAxisMask, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.StridedSliceOptions.endStridedSliceOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} beginMask + * @param {number} endMask + * @param {number} ellipsisMask + * @param {number} newAxisMask + * @param {number} shrinkAxisMask + * @returns {flatbuffers.Offset} + */ +tflite_schema.StridedSliceOptions.createStridedSliceOptions = function(builder, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask) { + tflite_schema.StridedSliceOptions.startStridedSliceOptions(builder); + tflite_schema.StridedSliceOptions.addBeginMask(builder, beginMask); + tflite_schema.StridedSliceOptions.addEndMask(builder, endMask); + tflite_schema.StridedSliceOptions.addEllipsisMask(builder, ellipsisMask); + tflite_schema.StridedSliceOptions.addNewAxisMask(builder, newAxisMask); + tflite_schema.StridedSliceOptions.addShrinkAxisMask(builder, shrinkAxisMask); + return tflite_schema.StridedSliceOptions.endStridedSliceOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.LogSoftmaxOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.LogSoftmaxOptions} + */ +tflite_schema.LogSoftmaxOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LogSoftmaxOptions=} obj + * @returns {tflite_schema.LogSoftmaxOptions} + */ +tflite_schema.LogSoftmaxOptions.getRootAsLogSoftmaxOptions = function(bb, obj) { + return (obj || new tflite_schema.LogSoftmaxOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LogSoftmaxOptions=} obj + * @returns {tflite_schema.LogSoftmaxOptions} + */ +tflite_schema.LogSoftmaxOptions.getSizePrefixedRootAsLogSoftmaxOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.LogSoftmaxOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.LogSoftmaxOptions.startLogSoftmaxOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LogSoftmaxOptions.endLogSoftmaxOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LogSoftmaxOptions.createLogSoftmaxOptions = function(builder) { + tflite_schema.LogSoftmaxOptions.startLogSoftmaxOptions(builder); + return tflite_schema.LogSoftmaxOptions.endLogSoftmaxOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.CastOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.CastOptions} + */ +tflite_schema.CastOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.CastOptions=} obj + * @returns {tflite_schema.CastOptions} + */ +tflite_schema.CastOptions.getRootAsCastOptions = function(bb, obj) { + return (obj || new tflite_schema.CastOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.CastOptions=} obj + * @returns {tflite_schema.CastOptions} + */ +tflite_schema.CastOptions.getSizePrefixedRootAsCastOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.CastOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.TensorType} + */ +tflite_schema.CastOptions.prototype.inDataType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.TensorType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.TensorType.FLOAT32; +}; + +/** + * @returns {tflite_schema.TensorType} + */ +tflite_schema.CastOptions.prototype.outDataType = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {tflite_schema.TensorType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.TensorType.FLOAT32; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.CastOptions.startCastOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} inDataType + */ +tflite_schema.CastOptions.addInDataType = function(builder, inDataType) { + builder.addFieldInt8(0, inDataType, tflite_schema.TensorType.FLOAT32); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} outDataType + */ +tflite_schema.CastOptions.addOutDataType = function(builder, outDataType) { + builder.addFieldInt8(1, outDataType, tflite_schema.TensorType.FLOAT32); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.CastOptions.endCastOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} inDataType + * @param {tflite_schema.TensorType} outDataType + * @returns {flatbuffers.Offset} + */ +tflite_schema.CastOptions.createCastOptions = function(builder, inDataType, outDataType) { + tflite_schema.CastOptions.startCastOptions(builder); + tflite_schema.CastOptions.addInDataType(builder, inDataType); + tflite_schema.CastOptions.addOutDataType(builder, outDataType); + return tflite_schema.CastOptions.endCastOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.DequantizeOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.DequantizeOptions} + */ +tflite_schema.DequantizeOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DequantizeOptions=} obj + * @returns {tflite_schema.DequantizeOptions} + */ +tflite_schema.DequantizeOptions.getRootAsDequantizeOptions = function(bb, obj) { + return (obj || new tflite_schema.DequantizeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DequantizeOptions=} obj + * @returns {tflite_schema.DequantizeOptions} + */ +tflite_schema.DequantizeOptions.getSizePrefixedRootAsDequantizeOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.DequantizeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.DequantizeOptions.startDequantizeOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.DequantizeOptions.endDequantizeOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.DequantizeOptions.createDequantizeOptions = function(builder) { + tflite_schema.DequantizeOptions.startDequantizeOptions(builder); + return tflite_schema.DequantizeOptions.endDequantizeOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.MaximumMinimumOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.MaximumMinimumOptions} + */ +tflite_schema.MaximumMinimumOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.MaximumMinimumOptions=} obj + * @returns {tflite_schema.MaximumMinimumOptions} + */ +tflite_schema.MaximumMinimumOptions.getRootAsMaximumMinimumOptions = function(bb, obj) { + return (obj || new tflite_schema.MaximumMinimumOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.MaximumMinimumOptions=} obj + * @returns {tflite_schema.MaximumMinimumOptions} + */ +tflite_schema.MaximumMinimumOptions.getSizePrefixedRootAsMaximumMinimumOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.MaximumMinimumOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.MaximumMinimumOptions.startMaximumMinimumOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.MaximumMinimumOptions.endMaximumMinimumOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.MaximumMinimumOptions.createMaximumMinimumOptions = function(builder) { + tflite_schema.MaximumMinimumOptions.startMaximumMinimumOptions(builder); + return tflite_schema.MaximumMinimumOptions.endMaximumMinimumOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.TileOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.TileOptions} + */ +tflite_schema.TileOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.TileOptions=} obj + * @returns {tflite_schema.TileOptions} + */ +tflite_schema.TileOptions.getRootAsTileOptions = function(bb, obj) { + return (obj || new tflite_schema.TileOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.TileOptions=} obj + * @returns {tflite_schema.TileOptions} + */ +tflite_schema.TileOptions.getSizePrefixedRootAsTileOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.TileOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.TileOptions.startTileOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.TileOptions.endTileOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.TileOptions.createTileOptions = function(builder) { + tflite_schema.TileOptions.startTileOptions(builder); + return tflite_schema.TileOptions.endTileOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ArgMaxOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ArgMaxOptions} + */ +tflite_schema.ArgMaxOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ArgMaxOptions=} obj + * @returns {tflite_schema.ArgMaxOptions} + */ +tflite_schema.ArgMaxOptions.getRootAsArgMaxOptions = function(bb, obj) { + return (obj || new tflite_schema.ArgMaxOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ArgMaxOptions=} obj + * @returns {tflite_schema.ArgMaxOptions} + */ +tflite_schema.ArgMaxOptions.getSizePrefixedRootAsArgMaxOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ArgMaxOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.TensorType} + */ +tflite_schema.ArgMaxOptions.prototype.outputType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.TensorType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.TensorType.FLOAT32; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ArgMaxOptions.startArgMaxOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} outputType + */ +tflite_schema.ArgMaxOptions.addOutputType = function(builder, outputType) { + builder.addFieldInt8(0, outputType, tflite_schema.TensorType.FLOAT32); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ArgMaxOptions.endArgMaxOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} outputType + * @returns {flatbuffers.Offset} + */ +tflite_schema.ArgMaxOptions.createArgMaxOptions = function(builder, outputType) { + tflite_schema.ArgMaxOptions.startArgMaxOptions(builder); + tflite_schema.ArgMaxOptions.addOutputType(builder, outputType); + return tflite_schema.ArgMaxOptions.endArgMaxOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ArgMinOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ArgMinOptions} + */ +tflite_schema.ArgMinOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ArgMinOptions=} obj + * @returns {tflite_schema.ArgMinOptions} + */ +tflite_schema.ArgMinOptions.getRootAsArgMinOptions = function(bb, obj) { + return (obj || new tflite_schema.ArgMinOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ArgMinOptions=} obj + * @returns {tflite_schema.ArgMinOptions} + */ +tflite_schema.ArgMinOptions.getSizePrefixedRootAsArgMinOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ArgMinOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.TensorType} + */ +tflite_schema.ArgMinOptions.prototype.outputType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.TensorType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.TensorType.FLOAT32; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ArgMinOptions.startArgMinOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} outputType + */ +tflite_schema.ArgMinOptions.addOutputType = function(builder, outputType) { + builder.addFieldInt8(0, outputType, tflite_schema.TensorType.FLOAT32); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ArgMinOptions.endArgMinOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} outputType + * @returns {flatbuffers.Offset} + */ +tflite_schema.ArgMinOptions.createArgMinOptions = function(builder, outputType) { + tflite_schema.ArgMinOptions.startArgMinOptions(builder); + tflite_schema.ArgMinOptions.addOutputType(builder, outputType); + return tflite_schema.ArgMinOptions.endArgMinOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.GreaterOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.GreaterOptions} + */ +tflite_schema.GreaterOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.GreaterOptions=} obj + * @returns {tflite_schema.GreaterOptions} + */ +tflite_schema.GreaterOptions.getRootAsGreaterOptions = function(bb, obj) { + return (obj || new tflite_schema.GreaterOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.GreaterOptions=} obj + * @returns {tflite_schema.GreaterOptions} + */ +tflite_schema.GreaterOptions.getSizePrefixedRootAsGreaterOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.GreaterOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.GreaterOptions.startGreaterOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.GreaterOptions.endGreaterOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.GreaterOptions.createGreaterOptions = function(builder) { + tflite_schema.GreaterOptions.startGreaterOptions(builder); + return tflite_schema.GreaterOptions.endGreaterOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.GreaterEqualOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.GreaterEqualOptions} + */ +tflite_schema.GreaterEqualOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.GreaterEqualOptions=} obj + * @returns {tflite_schema.GreaterEqualOptions} + */ +tflite_schema.GreaterEqualOptions.getRootAsGreaterEqualOptions = function(bb, obj) { + return (obj || new tflite_schema.GreaterEqualOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.GreaterEqualOptions=} obj + * @returns {tflite_schema.GreaterEqualOptions} + */ +tflite_schema.GreaterEqualOptions.getSizePrefixedRootAsGreaterEqualOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.GreaterEqualOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.GreaterEqualOptions.startGreaterEqualOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.GreaterEqualOptions.endGreaterEqualOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.GreaterEqualOptions.createGreaterEqualOptions = function(builder) { + tflite_schema.GreaterEqualOptions.startGreaterEqualOptions(builder); + return tflite_schema.GreaterEqualOptions.endGreaterEqualOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.LessOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.LessOptions} + */ +tflite_schema.LessOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LessOptions=} obj + * @returns {tflite_schema.LessOptions} + */ +tflite_schema.LessOptions.getRootAsLessOptions = function(bb, obj) { + return (obj || new tflite_schema.LessOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LessOptions=} obj + * @returns {tflite_schema.LessOptions} + */ +tflite_schema.LessOptions.getSizePrefixedRootAsLessOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.LessOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.LessOptions.startLessOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LessOptions.endLessOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LessOptions.createLessOptions = function(builder) { + tflite_schema.LessOptions.startLessOptions(builder); + return tflite_schema.LessOptions.endLessOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.LessEqualOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.LessEqualOptions} + */ +tflite_schema.LessEqualOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LessEqualOptions=} obj + * @returns {tflite_schema.LessEqualOptions} + */ +tflite_schema.LessEqualOptions.getRootAsLessEqualOptions = function(bb, obj) { + return (obj || new tflite_schema.LessEqualOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LessEqualOptions=} obj + * @returns {tflite_schema.LessEqualOptions} + */ +tflite_schema.LessEqualOptions.getSizePrefixedRootAsLessEqualOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.LessEqualOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.LessEqualOptions.startLessEqualOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LessEqualOptions.endLessEqualOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LessEqualOptions.createLessEqualOptions = function(builder) { + tflite_schema.LessEqualOptions.startLessEqualOptions(builder); + return tflite_schema.LessEqualOptions.endLessEqualOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.NegOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.NegOptions} + */ +tflite_schema.NegOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.NegOptions=} obj + * @returns {tflite_schema.NegOptions} + */ +tflite_schema.NegOptions.getRootAsNegOptions = function(bb, obj) { + return (obj || new tflite_schema.NegOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.NegOptions=} obj + * @returns {tflite_schema.NegOptions} + */ +tflite_schema.NegOptions.getSizePrefixedRootAsNegOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.NegOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.NegOptions.startNegOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.NegOptions.endNegOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.NegOptions.createNegOptions = function(builder) { + tflite_schema.NegOptions.startNegOptions(builder); + return tflite_schema.NegOptions.endNegOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SelectOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SelectOptions} + */ +tflite_schema.SelectOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SelectOptions=} obj + * @returns {tflite_schema.SelectOptions} + */ +tflite_schema.SelectOptions.getRootAsSelectOptions = function(bb, obj) { + return (obj || new tflite_schema.SelectOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SelectOptions=} obj + * @returns {tflite_schema.SelectOptions} + */ +tflite_schema.SelectOptions.getSizePrefixedRootAsSelectOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SelectOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SelectOptions.startSelectOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SelectOptions.endSelectOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SelectOptions.createSelectOptions = function(builder) { + tflite_schema.SelectOptions.startSelectOptions(builder); + return tflite_schema.SelectOptions.endSelectOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SliceOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SliceOptions} + */ +tflite_schema.SliceOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SliceOptions=} obj + * @returns {tflite_schema.SliceOptions} + */ +tflite_schema.SliceOptions.getRootAsSliceOptions = function(bb, obj) { + return (obj || new tflite_schema.SliceOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SliceOptions=} obj + * @returns {tflite_schema.SliceOptions} + */ +tflite_schema.SliceOptions.getSizePrefixedRootAsSliceOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SliceOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SliceOptions.startSliceOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SliceOptions.endSliceOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SliceOptions.createSliceOptions = function(builder) { + tflite_schema.SliceOptions.startSliceOptions(builder); + return tflite_schema.SliceOptions.endSliceOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.TransposeConvOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.TransposeConvOptions} + */ +tflite_schema.TransposeConvOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.TransposeConvOptions=} obj + * @returns {tflite_schema.TransposeConvOptions} + */ +tflite_schema.TransposeConvOptions.getRootAsTransposeConvOptions = function(bb, obj) { + return (obj || new tflite_schema.TransposeConvOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.TransposeConvOptions=} obj + * @returns {tflite_schema.TransposeConvOptions} + */ +tflite_schema.TransposeConvOptions.getSizePrefixedRootAsTransposeConvOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.TransposeConvOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.Padding} + */ +tflite_schema.TransposeConvOptions.prototype.padding = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.Padding} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.Padding.SAME; +}; + +/** + * @returns {number} + */ +tflite_schema.TransposeConvOptions.prototype.strideW = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.TransposeConvOptions.prototype.strideH = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.TransposeConvOptions.startTransposeConvOptions = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.Padding} padding + */ +tflite_schema.TransposeConvOptions.addPadding = function(builder, padding) { + builder.addFieldInt8(0, padding, tflite_schema.Padding.SAME); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideW + */ +tflite_schema.TransposeConvOptions.addStrideW = function(builder, strideW) { + builder.addFieldInt32(1, strideW, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} strideH + */ +tflite_schema.TransposeConvOptions.addStrideH = function(builder, strideH) { + builder.addFieldInt32(2, strideH, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.TransposeConvOptions.endTransposeConvOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.Padding} padding + * @param {number} strideW + * @param {number} strideH + * @returns {flatbuffers.Offset} + */ +tflite_schema.TransposeConvOptions.createTransposeConvOptions = function(builder, padding, strideW, strideH) { + tflite_schema.TransposeConvOptions.startTransposeConvOptions(builder); + tflite_schema.TransposeConvOptions.addPadding(builder, padding); + tflite_schema.TransposeConvOptions.addStrideW(builder, strideW); + tflite_schema.TransposeConvOptions.addStrideH(builder, strideH); + return tflite_schema.TransposeConvOptions.endTransposeConvOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ExpandDimsOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ExpandDimsOptions} + */ +tflite_schema.ExpandDimsOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ExpandDimsOptions=} obj + * @returns {tflite_schema.ExpandDimsOptions} + */ +tflite_schema.ExpandDimsOptions.getRootAsExpandDimsOptions = function(bb, obj) { + return (obj || new tflite_schema.ExpandDimsOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ExpandDimsOptions=} obj + * @returns {tflite_schema.ExpandDimsOptions} + */ +tflite_schema.ExpandDimsOptions.getSizePrefixedRootAsExpandDimsOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ExpandDimsOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ExpandDimsOptions.startExpandDimsOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ExpandDimsOptions.endExpandDimsOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ExpandDimsOptions.createExpandDimsOptions = function(builder) { + tflite_schema.ExpandDimsOptions.startExpandDimsOptions(builder); + return tflite_schema.ExpandDimsOptions.endExpandDimsOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SparseToDenseOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SparseToDenseOptions} + */ +tflite_schema.SparseToDenseOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SparseToDenseOptions=} obj + * @returns {tflite_schema.SparseToDenseOptions} + */ +tflite_schema.SparseToDenseOptions.getRootAsSparseToDenseOptions = function(bb, obj) { + return (obj || new tflite_schema.SparseToDenseOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SparseToDenseOptions=} obj + * @returns {tflite_schema.SparseToDenseOptions} + */ +tflite_schema.SparseToDenseOptions.getSizePrefixedRootAsSparseToDenseOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SparseToDenseOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +tflite_schema.SparseToDenseOptions.prototype.validateIndices = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SparseToDenseOptions.startSparseToDenseOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} validateIndices + */ +tflite_schema.SparseToDenseOptions.addValidateIndices = function(builder, validateIndices) { + builder.addFieldInt8(0, +validateIndices, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SparseToDenseOptions.endSparseToDenseOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} validateIndices + * @returns {flatbuffers.Offset} + */ +tflite_schema.SparseToDenseOptions.createSparseToDenseOptions = function(builder, validateIndices) { + tflite_schema.SparseToDenseOptions.startSparseToDenseOptions(builder); + tflite_schema.SparseToDenseOptions.addValidateIndices(builder, validateIndices); + return tflite_schema.SparseToDenseOptions.endSparseToDenseOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.EqualOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.EqualOptions} + */ +tflite_schema.EqualOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.EqualOptions=} obj + * @returns {tflite_schema.EqualOptions} + */ +tflite_schema.EqualOptions.getRootAsEqualOptions = function(bb, obj) { + return (obj || new tflite_schema.EqualOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.EqualOptions=} obj + * @returns {tflite_schema.EqualOptions} + */ +tflite_schema.EqualOptions.getSizePrefixedRootAsEqualOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.EqualOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.EqualOptions.startEqualOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.EqualOptions.endEqualOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.EqualOptions.createEqualOptions = function(builder) { + tflite_schema.EqualOptions.startEqualOptions(builder); + return tflite_schema.EqualOptions.endEqualOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.NotEqualOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.NotEqualOptions} + */ +tflite_schema.NotEqualOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.NotEqualOptions=} obj + * @returns {tflite_schema.NotEqualOptions} + */ +tflite_schema.NotEqualOptions.getRootAsNotEqualOptions = function(bb, obj) { + return (obj || new tflite_schema.NotEqualOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.NotEqualOptions=} obj + * @returns {tflite_schema.NotEqualOptions} + */ +tflite_schema.NotEqualOptions.getSizePrefixedRootAsNotEqualOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.NotEqualOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.NotEqualOptions.startNotEqualOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.NotEqualOptions.endNotEqualOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.NotEqualOptions.createNotEqualOptions = function(builder) { + tflite_schema.NotEqualOptions.startNotEqualOptions(builder); + return tflite_schema.NotEqualOptions.endNotEqualOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ShapeOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ShapeOptions} + */ +tflite_schema.ShapeOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ShapeOptions=} obj + * @returns {tflite_schema.ShapeOptions} + */ +tflite_schema.ShapeOptions.getRootAsShapeOptions = function(bb, obj) { + return (obj || new tflite_schema.ShapeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ShapeOptions=} obj + * @returns {tflite_schema.ShapeOptions} + */ +tflite_schema.ShapeOptions.getSizePrefixedRootAsShapeOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ShapeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.TensorType} + */ +tflite_schema.ShapeOptions.prototype.outType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.TensorType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.TensorType.FLOAT32; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ShapeOptions.startShapeOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} outType + */ +tflite_schema.ShapeOptions.addOutType = function(builder, outType) { + builder.addFieldInt8(0, outType, tflite_schema.TensorType.FLOAT32); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ShapeOptions.endShapeOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} outType + * @returns {flatbuffers.Offset} + */ +tflite_schema.ShapeOptions.createShapeOptions = function(builder, outType) { + tflite_schema.ShapeOptions.startShapeOptions(builder); + tflite_schema.ShapeOptions.addOutType(builder, outType); + return tflite_schema.ShapeOptions.endShapeOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.RankOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.RankOptions} + */ +tflite_schema.RankOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.RankOptions=} obj + * @returns {tflite_schema.RankOptions} + */ +tflite_schema.RankOptions.getRootAsRankOptions = function(bb, obj) { + return (obj || new tflite_schema.RankOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.RankOptions=} obj + * @returns {tflite_schema.RankOptions} + */ +tflite_schema.RankOptions.getSizePrefixedRootAsRankOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.RankOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.RankOptions.startRankOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.RankOptions.endRankOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.RankOptions.createRankOptions = function(builder) { + tflite_schema.RankOptions.startRankOptions(builder); + return tflite_schema.RankOptions.endRankOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.PowOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.PowOptions} + */ +tflite_schema.PowOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.PowOptions=} obj + * @returns {tflite_schema.PowOptions} + */ +tflite_schema.PowOptions.getRootAsPowOptions = function(bb, obj) { + return (obj || new tflite_schema.PowOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.PowOptions=} obj + * @returns {tflite_schema.PowOptions} + */ +tflite_schema.PowOptions.getSizePrefixedRootAsPowOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.PowOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.PowOptions.startPowOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.PowOptions.endPowOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.PowOptions.createPowOptions = function(builder) { + tflite_schema.PowOptions.startPowOptions(builder); + return tflite_schema.PowOptions.endPowOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.FakeQuantOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.FakeQuantOptions} + */ +tflite_schema.FakeQuantOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.FakeQuantOptions=} obj + * @returns {tflite_schema.FakeQuantOptions} + */ +tflite_schema.FakeQuantOptions.getRootAsFakeQuantOptions = function(bb, obj) { + return (obj || new tflite_schema.FakeQuantOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.FakeQuantOptions=} obj + * @returns {tflite_schema.FakeQuantOptions} + */ +tflite_schema.FakeQuantOptions.getSizePrefixedRootAsFakeQuantOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.FakeQuantOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.FakeQuantOptions.prototype.min = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +tflite_schema.FakeQuantOptions.prototype.max = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @returns {number} + */ +tflite_schema.FakeQuantOptions.prototype.numBits = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {boolean} + */ +tflite_schema.FakeQuantOptions.prototype.narrowRange = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.FakeQuantOptions.startFakeQuantOptions = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} min + */ +tflite_schema.FakeQuantOptions.addMin = function(builder, min) { + builder.addFieldFloat32(0, min, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} max + */ +tflite_schema.FakeQuantOptions.addMax = function(builder, max) { + builder.addFieldFloat32(1, max, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numBits + */ +tflite_schema.FakeQuantOptions.addNumBits = function(builder, numBits) { + builder.addFieldInt32(2, numBits, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} narrowRange + */ +tflite_schema.FakeQuantOptions.addNarrowRange = function(builder, narrowRange) { + builder.addFieldInt8(3, +narrowRange, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.FakeQuantOptions.endFakeQuantOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} min + * @param {number} max + * @param {number} numBits + * @param {boolean} narrowRange + * @returns {flatbuffers.Offset} + */ +tflite_schema.FakeQuantOptions.createFakeQuantOptions = function(builder, min, max, numBits, narrowRange) { + tflite_schema.FakeQuantOptions.startFakeQuantOptions(builder); + tflite_schema.FakeQuantOptions.addMin(builder, min); + tflite_schema.FakeQuantOptions.addMax(builder, max); + tflite_schema.FakeQuantOptions.addNumBits(builder, numBits); + tflite_schema.FakeQuantOptions.addNarrowRange(builder, narrowRange); + return tflite_schema.FakeQuantOptions.endFakeQuantOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.PackOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.PackOptions} + */ +tflite_schema.PackOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.PackOptions=} obj + * @returns {tflite_schema.PackOptions} + */ +tflite_schema.PackOptions.getRootAsPackOptions = function(bb, obj) { + return (obj || new tflite_schema.PackOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.PackOptions=} obj + * @returns {tflite_schema.PackOptions} + */ +tflite_schema.PackOptions.getSizePrefixedRootAsPackOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.PackOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.PackOptions.prototype.valuesCount = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.PackOptions.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.PackOptions.startPackOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} valuesCount + */ +tflite_schema.PackOptions.addValuesCount = function(builder, valuesCount) { + builder.addFieldInt32(0, valuesCount, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +tflite_schema.PackOptions.addAxis = function(builder, axis) { + builder.addFieldInt32(1, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.PackOptions.endPackOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} valuesCount + * @param {number} axis + * @returns {flatbuffers.Offset} + */ +tflite_schema.PackOptions.createPackOptions = function(builder, valuesCount, axis) { + tflite_schema.PackOptions.startPackOptions(builder); + tflite_schema.PackOptions.addValuesCount(builder, valuesCount); + tflite_schema.PackOptions.addAxis(builder, axis); + return tflite_schema.PackOptions.endPackOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.LogicalOrOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.LogicalOrOptions} + */ +tflite_schema.LogicalOrOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LogicalOrOptions=} obj + * @returns {tflite_schema.LogicalOrOptions} + */ +tflite_schema.LogicalOrOptions.getRootAsLogicalOrOptions = function(bb, obj) { + return (obj || new tflite_schema.LogicalOrOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LogicalOrOptions=} obj + * @returns {tflite_schema.LogicalOrOptions} + */ +tflite_schema.LogicalOrOptions.getSizePrefixedRootAsLogicalOrOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.LogicalOrOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.LogicalOrOptions.startLogicalOrOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LogicalOrOptions.endLogicalOrOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LogicalOrOptions.createLogicalOrOptions = function(builder) { + tflite_schema.LogicalOrOptions.startLogicalOrOptions(builder); + return tflite_schema.LogicalOrOptions.endLogicalOrOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.OneHotOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.OneHotOptions} + */ +tflite_schema.OneHotOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.OneHotOptions=} obj + * @returns {tflite_schema.OneHotOptions} + */ +tflite_schema.OneHotOptions.getRootAsOneHotOptions = function(bb, obj) { + return (obj || new tflite_schema.OneHotOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.OneHotOptions=} obj + * @returns {tflite_schema.OneHotOptions} + */ +tflite_schema.OneHotOptions.getSizePrefixedRootAsOneHotOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.OneHotOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.OneHotOptions.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.OneHotOptions.startOneHotOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +tflite_schema.OneHotOptions.addAxis = function(builder, axis) { + builder.addFieldInt32(0, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.OneHotOptions.endOneHotOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + * @returns {flatbuffers.Offset} + */ +tflite_schema.OneHotOptions.createOneHotOptions = function(builder, axis) { + tflite_schema.OneHotOptions.startOneHotOptions(builder); + tflite_schema.OneHotOptions.addAxis(builder, axis); + return tflite_schema.OneHotOptions.endOneHotOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.AbsOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.AbsOptions} + */ +tflite_schema.AbsOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.AbsOptions=} obj + * @returns {tflite_schema.AbsOptions} + */ +tflite_schema.AbsOptions.getRootAsAbsOptions = function(bb, obj) { + return (obj || new tflite_schema.AbsOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.AbsOptions=} obj + * @returns {tflite_schema.AbsOptions} + */ +tflite_schema.AbsOptions.getSizePrefixedRootAsAbsOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.AbsOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.AbsOptions.startAbsOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.AbsOptions.endAbsOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.AbsOptions.createAbsOptions = function(builder) { + tflite_schema.AbsOptions.startAbsOptions(builder); + return tflite_schema.AbsOptions.endAbsOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.HardSwishOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.HardSwishOptions} + */ +tflite_schema.HardSwishOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.HardSwishOptions=} obj + * @returns {tflite_schema.HardSwishOptions} + */ +tflite_schema.HardSwishOptions.getRootAsHardSwishOptions = function(bb, obj) { + return (obj || new tflite_schema.HardSwishOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.HardSwishOptions=} obj + * @returns {tflite_schema.HardSwishOptions} + */ +tflite_schema.HardSwishOptions.getSizePrefixedRootAsHardSwishOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.HardSwishOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.HardSwishOptions.startHardSwishOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.HardSwishOptions.endHardSwishOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.HardSwishOptions.createHardSwishOptions = function(builder) { + tflite_schema.HardSwishOptions.startHardSwishOptions(builder); + return tflite_schema.HardSwishOptions.endHardSwishOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.LogicalAndOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.LogicalAndOptions} + */ +tflite_schema.LogicalAndOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LogicalAndOptions=} obj + * @returns {tflite_schema.LogicalAndOptions} + */ +tflite_schema.LogicalAndOptions.getRootAsLogicalAndOptions = function(bb, obj) { + return (obj || new tflite_schema.LogicalAndOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LogicalAndOptions=} obj + * @returns {tflite_schema.LogicalAndOptions} + */ +tflite_schema.LogicalAndOptions.getSizePrefixedRootAsLogicalAndOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.LogicalAndOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.LogicalAndOptions.startLogicalAndOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LogicalAndOptions.endLogicalAndOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LogicalAndOptions.createLogicalAndOptions = function(builder) { + tflite_schema.LogicalAndOptions.startLogicalAndOptions(builder); + return tflite_schema.LogicalAndOptions.endLogicalAndOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.LogicalNotOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.LogicalNotOptions} + */ +tflite_schema.LogicalNotOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LogicalNotOptions=} obj + * @returns {tflite_schema.LogicalNotOptions} + */ +tflite_schema.LogicalNotOptions.getRootAsLogicalNotOptions = function(bb, obj) { + return (obj || new tflite_schema.LogicalNotOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LogicalNotOptions=} obj + * @returns {tflite_schema.LogicalNotOptions} + */ +tflite_schema.LogicalNotOptions.getSizePrefixedRootAsLogicalNotOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.LogicalNotOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.LogicalNotOptions.startLogicalNotOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LogicalNotOptions.endLogicalNotOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LogicalNotOptions.createLogicalNotOptions = function(builder) { + tflite_schema.LogicalNotOptions.startLogicalNotOptions(builder); + return tflite_schema.LogicalNotOptions.endLogicalNotOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.UnpackOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.UnpackOptions} + */ +tflite_schema.UnpackOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.UnpackOptions=} obj + * @returns {tflite_schema.UnpackOptions} + */ +tflite_schema.UnpackOptions.getRootAsUnpackOptions = function(bb, obj) { + return (obj || new tflite_schema.UnpackOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.UnpackOptions=} obj + * @returns {tflite_schema.UnpackOptions} + */ +tflite_schema.UnpackOptions.getSizePrefixedRootAsUnpackOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.UnpackOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.UnpackOptions.prototype.num = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.UnpackOptions.prototype.axis = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.UnpackOptions.startUnpackOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} num + */ +tflite_schema.UnpackOptions.addNum = function(builder, num) { + builder.addFieldInt32(0, num, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} axis + */ +tflite_schema.UnpackOptions.addAxis = function(builder, axis) { + builder.addFieldInt32(1, axis, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.UnpackOptions.endUnpackOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} num + * @param {number} axis + * @returns {flatbuffers.Offset} + */ +tflite_schema.UnpackOptions.createUnpackOptions = function(builder, num, axis) { + tflite_schema.UnpackOptions.startUnpackOptions(builder); + tflite_schema.UnpackOptions.addNum(builder, num); + tflite_schema.UnpackOptions.addAxis(builder, axis); + return tflite_schema.UnpackOptions.endUnpackOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.FloorDivOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.FloorDivOptions} + */ +tflite_schema.FloorDivOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.FloorDivOptions=} obj + * @returns {tflite_schema.FloorDivOptions} + */ +tflite_schema.FloorDivOptions.getRootAsFloorDivOptions = function(bb, obj) { + return (obj || new tflite_schema.FloorDivOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.FloorDivOptions=} obj + * @returns {tflite_schema.FloorDivOptions} + */ +tflite_schema.FloorDivOptions.getSizePrefixedRootAsFloorDivOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.FloorDivOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.FloorDivOptions.startFloorDivOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.FloorDivOptions.endFloorDivOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.FloorDivOptions.createFloorDivOptions = function(builder) { + tflite_schema.FloorDivOptions.startFloorDivOptions(builder); + return tflite_schema.FloorDivOptions.endFloorDivOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SquareOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SquareOptions} + */ +tflite_schema.SquareOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SquareOptions=} obj + * @returns {tflite_schema.SquareOptions} + */ +tflite_schema.SquareOptions.getRootAsSquareOptions = function(bb, obj) { + return (obj || new tflite_schema.SquareOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SquareOptions=} obj + * @returns {tflite_schema.SquareOptions} + */ +tflite_schema.SquareOptions.getSizePrefixedRootAsSquareOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SquareOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SquareOptions.startSquareOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SquareOptions.endSquareOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SquareOptions.createSquareOptions = function(builder) { + tflite_schema.SquareOptions.startSquareOptions(builder); + return tflite_schema.SquareOptions.endSquareOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ZerosLikeOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ZerosLikeOptions} + */ +tflite_schema.ZerosLikeOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ZerosLikeOptions=} obj + * @returns {tflite_schema.ZerosLikeOptions} + */ +tflite_schema.ZerosLikeOptions.getRootAsZerosLikeOptions = function(bb, obj) { + return (obj || new tflite_schema.ZerosLikeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ZerosLikeOptions=} obj + * @returns {tflite_schema.ZerosLikeOptions} + */ +tflite_schema.ZerosLikeOptions.getSizePrefixedRootAsZerosLikeOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ZerosLikeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ZerosLikeOptions.startZerosLikeOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ZerosLikeOptions.endZerosLikeOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ZerosLikeOptions.createZerosLikeOptions = function(builder) { + tflite_schema.ZerosLikeOptions.startZerosLikeOptions(builder); + return tflite_schema.ZerosLikeOptions.endZerosLikeOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.FillOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.FillOptions} + */ +tflite_schema.FillOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.FillOptions=} obj + * @returns {tflite_schema.FillOptions} + */ +tflite_schema.FillOptions.getRootAsFillOptions = function(bb, obj) { + return (obj || new tflite_schema.FillOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.FillOptions=} obj + * @returns {tflite_schema.FillOptions} + */ +tflite_schema.FillOptions.getSizePrefixedRootAsFillOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.FillOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.FillOptions.startFillOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.FillOptions.endFillOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.FillOptions.createFillOptions = function(builder) { + tflite_schema.FillOptions.startFillOptions(builder); + return tflite_schema.FillOptions.endFillOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.FloorModOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.FloorModOptions} + */ +tflite_schema.FloorModOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.FloorModOptions=} obj + * @returns {tflite_schema.FloorModOptions} + */ +tflite_schema.FloorModOptions.getRootAsFloorModOptions = function(bb, obj) { + return (obj || new tflite_schema.FloorModOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.FloorModOptions=} obj + * @returns {tflite_schema.FloorModOptions} + */ +tflite_schema.FloorModOptions.getSizePrefixedRootAsFloorModOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.FloorModOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.FloorModOptions.startFloorModOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.FloorModOptions.endFloorModOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.FloorModOptions.createFloorModOptions = function(builder) { + tflite_schema.FloorModOptions.startFloorModOptions(builder); + return tflite_schema.FloorModOptions.endFloorModOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.RangeOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.RangeOptions} + */ +tflite_schema.RangeOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.RangeOptions=} obj + * @returns {tflite_schema.RangeOptions} + */ +tflite_schema.RangeOptions.getRootAsRangeOptions = function(bb, obj) { + return (obj || new tflite_schema.RangeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.RangeOptions=} obj + * @returns {tflite_schema.RangeOptions} + */ +tflite_schema.RangeOptions.getSizePrefixedRootAsRangeOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.RangeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.RangeOptions.startRangeOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.RangeOptions.endRangeOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.RangeOptions.createRangeOptions = function(builder) { + tflite_schema.RangeOptions.startRangeOptions(builder); + return tflite_schema.RangeOptions.endRangeOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.LeakyReluOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.LeakyReluOptions} + */ +tflite_schema.LeakyReluOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LeakyReluOptions=} obj + * @returns {tflite_schema.LeakyReluOptions} + */ +tflite_schema.LeakyReluOptions.getRootAsLeakyReluOptions = function(bb, obj) { + return (obj || new tflite_schema.LeakyReluOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.LeakyReluOptions=} obj + * @returns {tflite_schema.LeakyReluOptions} + */ +tflite_schema.LeakyReluOptions.getSizePrefixedRootAsLeakyReluOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.LeakyReluOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.LeakyReluOptions.prototype.alpha = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.LeakyReluOptions.startLeakyReluOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} alpha + */ +tflite_schema.LeakyReluOptions.addAlpha = function(builder, alpha) { + builder.addFieldFloat32(0, alpha, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.LeakyReluOptions.endLeakyReluOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} alpha + * @returns {flatbuffers.Offset} + */ +tflite_schema.LeakyReluOptions.createLeakyReluOptions = function(builder, alpha) { + tflite_schema.LeakyReluOptions.startLeakyReluOptions(builder); + tflite_schema.LeakyReluOptions.addAlpha(builder, alpha); + return tflite_schema.LeakyReluOptions.endLeakyReluOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SquaredDifferenceOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SquaredDifferenceOptions} + */ +tflite_schema.SquaredDifferenceOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SquaredDifferenceOptions=} obj + * @returns {tflite_schema.SquaredDifferenceOptions} + */ +tflite_schema.SquaredDifferenceOptions.getRootAsSquaredDifferenceOptions = function(bb, obj) { + return (obj || new tflite_schema.SquaredDifferenceOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SquaredDifferenceOptions=} obj + * @returns {tflite_schema.SquaredDifferenceOptions} + */ +tflite_schema.SquaredDifferenceOptions.getSizePrefixedRootAsSquaredDifferenceOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SquaredDifferenceOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SquaredDifferenceOptions.startSquaredDifferenceOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SquaredDifferenceOptions.endSquaredDifferenceOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SquaredDifferenceOptions.createSquaredDifferenceOptions = function(builder) { + tflite_schema.SquaredDifferenceOptions.startSquaredDifferenceOptions(builder); + return tflite_schema.SquaredDifferenceOptions.endSquaredDifferenceOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.MirrorPadOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.MirrorPadOptions} + */ +tflite_schema.MirrorPadOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.MirrorPadOptions=} obj + * @returns {tflite_schema.MirrorPadOptions} + */ +tflite_schema.MirrorPadOptions.getRootAsMirrorPadOptions = function(bb, obj) { + return (obj || new tflite_schema.MirrorPadOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.MirrorPadOptions=} obj + * @returns {tflite_schema.MirrorPadOptions} + */ +tflite_schema.MirrorPadOptions.getSizePrefixedRootAsMirrorPadOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.MirrorPadOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.MirrorPadMode} + */ +tflite_schema.MirrorPadOptions.prototype.mode = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.MirrorPadMode} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.MirrorPadMode.REFLECT; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.MirrorPadOptions.startMirrorPadOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.MirrorPadMode} mode + */ +tflite_schema.MirrorPadOptions.addMode = function(builder, mode) { + builder.addFieldInt8(0, mode, tflite_schema.MirrorPadMode.REFLECT); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.MirrorPadOptions.endMirrorPadOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.MirrorPadMode} mode + * @returns {flatbuffers.Offset} + */ +tflite_schema.MirrorPadOptions.createMirrorPadOptions = function(builder, mode) { + tflite_schema.MirrorPadOptions.startMirrorPadOptions(builder); + tflite_schema.MirrorPadOptions.addMode(builder, mode); + return tflite_schema.MirrorPadOptions.endMirrorPadOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.UniqueOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.UniqueOptions} + */ +tflite_schema.UniqueOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.UniqueOptions=} obj + * @returns {tflite_schema.UniqueOptions} + */ +tflite_schema.UniqueOptions.getRootAsUniqueOptions = function(bb, obj) { + return (obj || new tflite_schema.UniqueOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.UniqueOptions=} obj + * @returns {tflite_schema.UniqueOptions} + */ +tflite_schema.UniqueOptions.getSizePrefixedRootAsUniqueOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.UniqueOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.TensorType} + */ +tflite_schema.UniqueOptions.prototype.idxOutType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.TensorType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.TensorType.INT32; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.UniqueOptions.startUniqueOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} idxOutType + */ +tflite_schema.UniqueOptions.addIdxOutType = function(builder, idxOutType) { + builder.addFieldInt8(0, idxOutType, tflite_schema.TensorType.INT32); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.UniqueOptions.endUniqueOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.TensorType} idxOutType + * @returns {flatbuffers.Offset} + */ +tflite_schema.UniqueOptions.createUniqueOptions = function(builder, idxOutType) { + tflite_schema.UniqueOptions.startUniqueOptions(builder); + tflite_schema.UniqueOptions.addIdxOutType(builder, idxOutType); + return tflite_schema.UniqueOptions.endUniqueOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ReverseV2Options = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ReverseV2Options} + */ +tflite_schema.ReverseV2Options.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ReverseV2Options=} obj + * @returns {tflite_schema.ReverseV2Options} + */ +tflite_schema.ReverseV2Options.getRootAsReverseV2Options = function(bb, obj) { + return (obj || new tflite_schema.ReverseV2Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ReverseV2Options=} obj + * @returns {tflite_schema.ReverseV2Options} + */ +tflite_schema.ReverseV2Options.getSizePrefixedRootAsReverseV2Options = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ReverseV2Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ReverseV2Options.startReverseV2Options = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ReverseV2Options.endReverseV2Options = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ReverseV2Options.createReverseV2Options = function(builder) { + tflite_schema.ReverseV2Options.startReverseV2Options(builder); + return tflite_schema.ReverseV2Options.endReverseV2Options(builder); +} + +/** + * @constructor + */ +tflite_schema.AddNOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.AddNOptions} + */ +tflite_schema.AddNOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.AddNOptions=} obj + * @returns {tflite_schema.AddNOptions} + */ +tflite_schema.AddNOptions.getRootAsAddNOptions = function(bb, obj) { + return (obj || new tflite_schema.AddNOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.AddNOptions=} obj + * @returns {tflite_schema.AddNOptions} + */ +tflite_schema.AddNOptions.getSizePrefixedRootAsAddNOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.AddNOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.AddNOptions.startAddNOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.AddNOptions.endAddNOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.AddNOptions.createAddNOptions = function(builder) { + tflite_schema.AddNOptions.startAddNOptions(builder); + return tflite_schema.AddNOptions.endAddNOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.GatherNdOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.GatherNdOptions} + */ +tflite_schema.GatherNdOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.GatherNdOptions=} obj + * @returns {tflite_schema.GatherNdOptions} + */ +tflite_schema.GatherNdOptions.getRootAsGatherNdOptions = function(bb, obj) { + return (obj || new tflite_schema.GatherNdOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.GatherNdOptions=} obj + * @returns {tflite_schema.GatherNdOptions} + */ +tflite_schema.GatherNdOptions.getSizePrefixedRootAsGatherNdOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.GatherNdOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.GatherNdOptions.startGatherNdOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.GatherNdOptions.endGatherNdOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.GatherNdOptions.createGatherNdOptions = function(builder) { + tflite_schema.GatherNdOptions.startGatherNdOptions(builder); + return tflite_schema.GatherNdOptions.endGatherNdOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.WhereOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.WhereOptions} + */ +tflite_schema.WhereOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.WhereOptions=} obj + * @returns {tflite_schema.WhereOptions} + */ +tflite_schema.WhereOptions.getRootAsWhereOptions = function(bb, obj) { + return (obj || new tflite_schema.WhereOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.WhereOptions=} obj + * @returns {tflite_schema.WhereOptions} + */ +tflite_schema.WhereOptions.getSizePrefixedRootAsWhereOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.WhereOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.WhereOptions.startWhereOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.WhereOptions.endWhereOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.WhereOptions.createWhereOptions = function(builder) { + tflite_schema.WhereOptions.startWhereOptions(builder); + return tflite_schema.WhereOptions.endWhereOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.ReverseSequenceOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ReverseSequenceOptions} + */ +tflite_schema.ReverseSequenceOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ReverseSequenceOptions=} obj + * @returns {tflite_schema.ReverseSequenceOptions} + */ +tflite_schema.ReverseSequenceOptions.getRootAsReverseSequenceOptions = function(bb, obj) { + return (obj || new tflite_schema.ReverseSequenceOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ReverseSequenceOptions=} obj + * @returns {tflite_schema.ReverseSequenceOptions} + */ +tflite_schema.ReverseSequenceOptions.getSizePrefixedRootAsReverseSequenceOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ReverseSequenceOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.ReverseSequenceOptions.prototype.seqDim = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.ReverseSequenceOptions.prototype.batchDim = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ReverseSequenceOptions.startReverseSequenceOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} seqDim + */ +tflite_schema.ReverseSequenceOptions.addSeqDim = function(builder, seqDim) { + builder.addFieldInt32(0, seqDim, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} batchDim + */ +tflite_schema.ReverseSequenceOptions.addBatchDim = function(builder, batchDim) { + builder.addFieldInt32(1, batchDim, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ReverseSequenceOptions.endReverseSequenceOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} seqDim + * @param {number} batchDim + * @returns {flatbuffers.Offset} + */ +tflite_schema.ReverseSequenceOptions.createReverseSequenceOptions = function(builder, seqDim, batchDim) { + tflite_schema.ReverseSequenceOptions.startReverseSequenceOptions(builder); + tflite_schema.ReverseSequenceOptions.addSeqDim(builder, seqDim); + tflite_schema.ReverseSequenceOptions.addBatchDim(builder, batchDim); + return tflite_schema.ReverseSequenceOptions.endReverseSequenceOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.MatrixDiagOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.MatrixDiagOptions} + */ +tflite_schema.MatrixDiagOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.MatrixDiagOptions=} obj + * @returns {tflite_schema.MatrixDiagOptions} + */ +tflite_schema.MatrixDiagOptions.getRootAsMatrixDiagOptions = function(bb, obj) { + return (obj || new tflite_schema.MatrixDiagOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.MatrixDiagOptions=} obj + * @returns {tflite_schema.MatrixDiagOptions} + */ +tflite_schema.MatrixDiagOptions.getSizePrefixedRootAsMatrixDiagOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.MatrixDiagOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.MatrixDiagOptions.startMatrixDiagOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.MatrixDiagOptions.endMatrixDiagOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.MatrixDiagOptions.createMatrixDiagOptions = function(builder) { + tflite_schema.MatrixDiagOptions.startMatrixDiagOptions(builder); + return tflite_schema.MatrixDiagOptions.endMatrixDiagOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.QuantizeOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.QuantizeOptions} + */ +tflite_schema.QuantizeOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.QuantizeOptions=} obj + * @returns {tflite_schema.QuantizeOptions} + */ +tflite_schema.QuantizeOptions.getRootAsQuantizeOptions = function(bb, obj) { + return (obj || new tflite_schema.QuantizeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.QuantizeOptions=} obj + * @returns {tflite_schema.QuantizeOptions} + */ +tflite_schema.QuantizeOptions.getSizePrefixedRootAsQuantizeOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.QuantizeOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.QuantizeOptions.startQuantizeOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.QuantizeOptions.endQuantizeOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.QuantizeOptions.createQuantizeOptions = function(builder) { + tflite_schema.QuantizeOptions.startQuantizeOptions(builder); + return tflite_schema.QuantizeOptions.endQuantizeOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.MatrixSetDiagOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.MatrixSetDiagOptions} + */ +tflite_schema.MatrixSetDiagOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.MatrixSetDiagOptions=} obj + * @returns {tflite_schema.MatrixSetDiagOptions} + */ +tflite_schema.MatrixSetDiagOptions.getRootAsMatrixSetDiagOptions = function(bb, obj) { + return (obj || new tflite_schema.MatrixSetDiagOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.MatrixSetDiagOptions=} obj + * @returns {tflite_schema.MatrixSetDiagOptions} + */ +tflite_schema.MatrixSetDiagOptions.getSizePrefixedRootAsMatrixSetDiagOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.MatrixSetDiagOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.MatrixSetDiagOptions.startMatrixSetDiagOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.MatrixSetDiagOptions.endMatrixSetDiagOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.MatrixSetDiagOptions.createMatrixSetDiagOptions = function(builder) { + tflite_schema.MatrixSetDiagOptions.startMatrixSetDiagOptions(builder); + return tflite_schema.MatrixSetDiagOptions.endMatrixSetDiagOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.IfOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.IfOptions} + */ +tflite_schema.IfOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.IfOptions=} obj + * @returns {tflite_schema.IfOptions} + */ +tflite_schema.IfOptions.getRootAsIfOptions = function(bb, obj) { + return (obj || new tflite_schema.IfOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.IfOptions=} obj + * @returns {tflite_schema.IfOptions} + */ +tflite_schema.IfOptions.getSizePrefixedRootAsIfOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.IfOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.IfOptions.prototype.thenSubgraphIndex = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.IfOptions.prototype.elseSubgraphIndex = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.IfOptions.startIfOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} thenSubgraphIndex + */ +tflite_schema.IfOptions.addThenSubgraphIndex = function(builder, thenSubgraphIndex) { + builder.addFieldInt32(0, thenSubgraphIndex, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} elseSubgraphIndex + */ +tflite_schema.IfOptions.addElseSubgraphIndex = function(builder, elseSubgraphIndex) { + builder.addFieldInt32(1, elseSubgraphIndex, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.IfOptions.endIfOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} thenSubgraphIndex + * @param {number} elseSubgraphIndex + * @returns {flatbuffers.Offset} + */ +tflite_schema.IfOptions.createIfOptions = function(builder, thenSubgraphIndex, elseSubgraphIndex) { + tflite_schema.IfOptions.startIfOptions(builder); + tflite_schema.IfOptions.addThenSubgraphIndex(builder, thenSubgraphIndex); + tflite_schema.IfOptions.addElseSubgraphIndex(builder, elseSubgraphIndex); + return tflite_schema.IfOptions.endIfOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.WhileOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.WhileOptions} + */ +tflite_schema.WhileOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.WhileOptions=} obj + * @returns {tflite_schema.WhileOptions} + */ +tflite_schema.WhileOptions.getRootAsWhileOptions = function(bb, obj) { + return (obj || new tflite_schema.WhileOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.WhileOptions=} obj + * @returns {tflite_schema.WhileOptions} + */ +tflite_schema.WhileOptions.getSizePrefixedRootAsWhileOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.WhileOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.WhileOptions.prototype.condSubgraphIndex = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.WhileOptions.prototype.bodySubgraphIndex = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.WhileOptions.startWhileOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} condSubgraphIndex + */ +tflite_schema.WhileOptions.addCondSubgraphIndex = function(builder, condSubgraphIndex) { + builder.addFieldInt32(0, condSubgraphIndex, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} bodySubgraphIndex + */ +tflite_schema.WhileOptions.addBodySubgraphIndex = function(builder, bodySubgraphIndex) { + builder.addFieldInt32(1, bodySubgraphIndex, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.WhileOptions.endWhileOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} condSubgraphIndex + * @param {number} bodySubgraphIndex + * @returns {flatbuffers.Offset} + */ +tflite_schema.WhileOptions.createWhileOptions = function(builder, condSubgraphIndex, bodySubgraphIndex) { + tflite_schema.WhileOptions.startWhileOptions(builder); + tflite_schema.WhileOptions.addCondSubgraphIndex(builder, condSubgraphIndex); + tflite_schema.WhileOptions.addBodySubgraphIndex(builder, bodySubgraphIndex); + return tflite_schema.WhileOptions.endWhileOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.NonMaxSuppressionV4Options = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.NonMaxSuppressionV4Options} + */ +tflite_schema.NonMaxSuppressionV4Options.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.NonMaxSuppressionV4Options=} obj + * @returns {tflite_schema.NonMaxSuppressionV4Options} + */ +tflite_schema.NonMaxSuppressionV4Options.getRootAsNonMaxSuppressionV4Options = function(bb, obj) { + return (obj || new tflite_schema.NonMaxSuppressionV4Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.NonMaxSuppressionV4Options=} obj + * @returns {tflite_schema.NonMaxSuppressionV4Options} + */ +tflite_schema.NonMaxSuppressionV4Options.getSizePrefixedRootAsNonMaxSuppressionV4Options = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.NonMaxSuppressionV4Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.NonMaxSuppressionV4Options.startNonMaxSuppressionV4Options = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.NonMaxSuppressionV4Options.endNonMaxSuppressionV4Options = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.NonMaxSuppressionV4Options.createNonMaxSuppressionV4Options = function(builder) { + tflite_schema.NonMaxSuppressionV4Options.startNonMaxSuppressionV4Options(builder); + return tflite_schema.NonMaxSuppressionV4Options.endNonMaxSuppressionV4Options(builder); +} + +/** + * @constructor + */ +tflite_schema.NonMaxSuppressionV5Options = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.NonMaxSuppressionV5Options} + */ +tflite_schema.NonMaxSuppressionV5Options.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.NonMaxSuppressionV5Options=} obj + * @returns {tflite_schema.NonMaxSuppressionV5Options} + */ +tflite_schema.NonMaxSuppressionV5Options.getRootAsNonMaxSuppressionV5Options = function(bb, obj) { + return (obj || new tflite_schema.NonMaxSuppressionV5Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.NonMaxSuppressionV5Options=} obj + * @returns {tflite_schema.NonMaxSuppressionV5Options} + */ +tflite_schema.NonMaxSuppressionV5Options.getSizePrefixedRootAsNonMaxSuppressionV5Options = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.NonMaxSuppressionV5Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.NonMaxSuppressionV5Options.startNonMaxSuppressionV5Options = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.NonMaxSuppressionV5Options.endNonMaxSuppressionV5Options = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.NonMaxSuppressionV5Options.createNonMaxSuppressionV5Options = function(builder) { + tflite_schema.NonMaxSuppressionV5Options.startNonMaxSuppressionV5Options(builder); + return tflite_schema.NonMaxSuppressionV5Options.endNonMaxSuppressionV5Options(builder); +} + +/** + * @constructor + */ +tflite_schema.ScatterNdOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.ScatterNdOptions} + */ +tflite_schema.ScatterNdOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ScatterNdOptions=} obj + * @returns {tflite_schema.ScatterNdOptions} + */ +tflite_schema.ScatterNdOptions.getRootAsScatterNdOptions = function(bb, obj) { + return (obj || new tflite_schema.ScatterNdOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.ScatterNdOptions=} obj + * @returns {tflite_schema.ScatterNdOptions} + */ +tflite_schema.ScatterNdOptions.getSizePrefixedRootAsScatterNdOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.ScatterNdOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.ScatterNdOptions.startScatterNdOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ScatterNdOptions.endScatterNdOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.ScatterNdOptions.createScatterNdOptions = function(builder) { + tflite_schema.ScatterNdOptions.startScatterNdOptions(builder); + return tflite_schema.ScatterNdOptions.endScatterNdOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SelectV2Options = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SelectV2Options} + */ +tflite_schema.SelectV2Options.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SelectV2Options=} obj + * @returns {tflite_schema.SelectV2Options} + */ +tflite_schema.SelectV2Options.getRootAsSelectV2Options = function(bb, obj) { + return (obj || new tflite_schema.SelectV2Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SelectV2Options=} obj + * @returns {tflite_schema.SelectV2Options} + */ +tflite_schema.SelectV2Options.getSizePrefixedRootAsSelectV2Options = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SelectV2Options).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SelectV2Options.startSelectV2Options = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SelectV2Options.endSelectV2Options = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SelectV2Options.createSelectV2Options = function(builder) { + tflite_schema.SelectV2Options.startSelectV2Options(builder); + return tflite_schema.SelectV2Options.endSelectV2Options(builder); +} + +/** + * @constructor + */ +tflite_schema.DensifyOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.DensifyOptions} + */ +tflite_schema.DensifyOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DensifyOptions=} obj + * @returns {tflite_schema.DensifyOptions} + */ +tflite_schema.DensifyOptions.getRootAsDensifyOptions = function(bb, obj) { + return (obj || new tflite_schema.DensifyOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.DensifyOptions=} obj + * @returns {tflite_schema.DensifyOptions} + */ +tflite_schema.DensifyOptions.getSizePrefixedRootAsDensifyOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.DensifyOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.DensifyOptions.startDensifyOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.DensifyOptions.endDensifyOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.DensifyOptions.createDensifyOptions = function(builder) { + tflite_schema.DensifyOptions.startDensifyOptions(builder); + return tflite_schema.DensifyOptions.endDensifyOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.SegmentSumOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SegmentSumOptions} + */ +tflite_schema.SegmentSumOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SegmentSumOptions=} obj + * @returns {tflite_schema.SegmentSumOptions} + */ +tflite_schema.SegmentSumOptions.getRootAsSegmentSumOptions = function(bb, obj) { + return (obj || new tflite_schema.SegmentSumOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SegmentSumOptions=} obj + * @returns {tflite_schema.SegmentSumOptions} + */ +tflite_schema.SegmentSumOptions.getSizePrefixedRootAsSegmentSumOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SegmentSumOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SegmentSumOptions.startSegmentSumOptions = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SegmentSumOptions.endSegmentSumOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SegmentSumOptions.createSegmentSumOptions = function(builder) { + tflite_schema.SegmentSumOptions.startSegmentSumOptions(builder); + return tflite_schema.SegmentSumOptions.endSegmentSumOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.BatchMatMulOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.BatchMatMulOptions} + */ +tflite_schema.BatchMatMulOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.BatchMatMulOptions=} obj + * @returns {tflite_schema.BatchMatMulOptions} + */ +tflite_schema.BatchMatMulOptions.getRootAsBatchMatMulOptions = function(bb, obj) { + return (obj || new tflite_schema.BatchMatMulOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.BatchMatMulOptions=} obj + * @returns {tflite_schema.BatchMatMulOptions} + */ +tflite_schema.BatchMatMulOptions.getSizePrefixedRootAsBatchMatMulOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.BatchMatMulOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {boolean} + */ +tflite_schema.BatchMatMulOptions.prototype.adjX = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @returns {boolean} + */ +tflite_schema.BatchMatMulOptions.prototype.adjY = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? !!this.bb.readInt8(this.bb_pos + offset) : false; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.BatchMatMulOptions.startBatchMatMulOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} adjX + */ +tflite_schema.BatchMatMulOptions.addAdjX = function(builder, adjX) { + builder.addFieldInt8(0, +adjX, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} adjY + */ +tflite_schema.BatchMatMulOptions.addAdjY = function(builder, adjY) { + builder.addFieldInt8(1, +adjY, +false); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.BatchMatMulOptions.endBatchMatMulOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {boolean} adjX + * @param {boolean} adjY + * @returns {flatbuffers.Offset} + */ +tflite_schema.BatchMatMulOptions.createBatchMatMulOptions = function(builder, adjX, adjY) { + tflite_schema.BatchMatMulOptions.startBatchMatMulOptions(builder); + tflite_schema.BatchMatMulOptions.addAdjX(builder, adjX); + tflite_schema.BatchMatMulOptions.addAdjY(builder, adjY); + return tflite_schema.BatchMatMulOptions.endBatchMatMulOptions(builder); +} + +/** + * @constructor + */ +tflite_schema.OperatorCode = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.OperatorCode} + */ +tflite_schema.OperatorCode.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.OperatorCode=} obj + * @returns {tflite_schema.OperatorCode} + */ +tflite_schema.OperatorCode.getRootAsOperatorCode = function(bb, obj) { + return (obj || new tflite_schema.OperatorCode).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.OperatorCode=} obj + * @returns {tflite_schema.OperatorCode} + */ +tflite_schema.OperatorCode.getSizePrefixedRootAsOperatorCode = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.OperatorCode).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_schema.BuiltinOperator} + */ +tflite_schema.OperatorCode.prototype.builtinCode = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_schema.BuiltinOperator} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.BuiltinOperator.ADD; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_schema.OperatorCode.prototype.customCode = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @returns {number} + */ +tflite_schema.OperatorCode.prototype.version = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 1; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.OperatorCode.startOperatorCode = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.BuiltinOperator} builtinCode + */ +tflite_schema.OperatorCode.addBuiltinCode = function(builder, builtinCode) { + builder.addFieldInt8(0, builtinCode, tflite_schema.BuiltinOperator.ADD); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} customCodeOffset + */ +tflite_schema.OperatorCode.addCustomCode = function(builder, customCodeOffset) { + builder.addFieldOffset(1, customCodeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} version + */ +tflite_schema.OperatorCode.addVersion = function(builder, version) { + builder.addFieldInt32(2, version, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.OperatorCode.endOperatorCode = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.BuiltinOperator} builtinCode + * @param {flatbuffers.Offset} customCodeOffset + * @param {number} version + * @returns {flatbuffers.Offset} + */ +tflite_schema.OperatorCode.createOperatorCode = function(builder, builtinCode, customCodeOffset, version) { + tflite_schema.OperatorCode.startOperatorCode(builder); + tflite_schema.OperatorCode.addBuiltinCode(builder, builtinCode); + tflite_schema.OperatorCode.addCustomCode(builder, customCodeOffset); + tflite_schema.OperatorCode.addVersion(builder, version); + return tflite_schema.OperatorCode.endOperatorCode(builder); +} + +/** + * @constructor + */ +tflite_schema.Operator = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.Operator} + */ +tflite_schema.Operator.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Operator=} obj + * @returns {tflite_schema.Operator} + */ +tflite_schema.Operator.getRootAsOperator = function(bb, obj) { + return (obj || new tflite_schema.Operator).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Operator=} obj + * @returns {tflite_schema.Operator} + */ +tflite_schema.Operator.getSizePrefixedRootAsOperator = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.Operator).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_schema.Operator.prototype.opcodeIndex = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.Operator.prototype.inputs = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Operator.prototype.inputsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.Operator.prototype.inputsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.Operator.prototype.outputs = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Operator.prototype.outputsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.Operator.prototype.outputsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {tflite_schema.BuiltinOptions} + */ +tflite_schema.Operator.prototype.builtinOptionsType = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? /** @type {tflite_schema.BuiltinOptions} */ (this.bb.readUint8(this.bb_pos + offset)) : tflite_schema.BuiltinOptions.NONE; +}; + +/** + * @param {flatbuffers.Table} obj + * @returns {?flatbuffers.Table} + */ +tflite_schema.Operator.prototype.builtinOptions = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__union(obj, this.bb_pos + offset) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.Operator.prototype.customOptions = function(index) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readUint8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Operator.prototype.customOptionsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint8Array} + */ +tflite_schema.Operator.prototype.customOptionsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? new Uint8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {tflite_schema.CustomOptionsFormat} + */ +tflite_schema.Operator.prototype.customOptionsFormat = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? /** @type {tflite_schema.CustomOptionsFormat} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_schema.CustomOptionsFormat.FLEXBUFFERS; +}; + +/** + * @param {number} index + * @returns {boolean} + */ +tflite_schema.Operator.prototype.mutatingVariableInputs = function(index) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? !!this.bb.readInt8(this.bb.__vector(this.bb_pos + offset) + index) : false; +}; + +/** + * @returns {number} + */ +tflite_schema.Operator.prototype.mutatingVariableInputsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int8Array} + */ +tflite_schema.Operator.prototype.mutatingVariableInputsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? new Int8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.Operator.prototype.intermediates = function(index) { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Operator.prototype.intermediatesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.Operator.prototype.intermediatesArray = function() { + var offset = this.bb.__offset(this.bb_pos, 20); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.Operator.startOperator = function(builder) { + builder.startObject(9); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} opcodeIndex + */ +tflite_schema.Operator.addOpcodeIndex = function(builder, opcodeIndex) { + builder.addFieldInt32(0, opcodeIndex, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputsOffset + */ +tflite_schema.Operator.addInputs = function(builder, inputsOffset) { + builder.addFieldOffset(1, inputsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Operator.createInputsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Operator.startInputsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputsOffset + */ +tflite_schema.Operator.addOutputs = function(builder, outputsOffset) { + builder.addFieldOffset(2, outputsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Operator.createOutputsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Operator.startOutputsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.BuiltinOptions} builtinOptionsType + */ +tflite_schema.Operator.addBuiltinOptionsType = function(builder, builtinOptionsType) { + builder.addFieldInt8(3, builtinOptionsType, tflite_schema.BuiltinOptions.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} builtinOptionsOffset + */ +tflite_schema.Operator.addBuiltinOptions = function(builder, builtinOptionsOffset) { + builder.addFieldOffset(4, builtinOptionsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} customOptionsOffset + */ +tflite_schema.Operator.addCustomOptions = function(builder, customOptionsOffset) { + builder.addFieldOffset(5, customOptionsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Operator.createCustomOptionsVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Operator.startCustomOptionsVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_schema.CustomOptionsFormat} customOptionsFormat + */ +tflite_schema.Operator.addCustomOptionsFormat = function(builder, customOptionsFormat) { + builder.addFieldInt8(6, customOptionsFormat, tflite_schema.CustomOptionsFormat.FLEXBUFFERS); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} mutatingVariableInputsOffset + */ +tflite_schema.Operator.addMutatingVariableInputs = function(builder, mutatingVariableInputsOffset) { + builder.addFieldOffset(7, mutatingVariableInputsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Operator.createMutatingVariableInputsVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(+data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Operator.startMutatingVariableInputsVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} intermediatesOffset + */ +tflite_schema.Operator.addIntermediates = function(builder, intermediatesOffset) { + builder.addFieldOffset(8, intermediatesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Operator.createIntermediatesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Operator.startIntermediatesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.Operator.endOperator = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} opcodeIndex + * @param {flatbuffers.Offset} inputsOffset + * @param {flatbuffers.Offset} outputsOffset + * @param {tflite_schema.BuiltinOptions} builtinOptionsType + * @param {flatbuffers.Offset} builtinOptionsOffset + * @param {flatbuffers.Offset} customOptionsOffset + * @param {tflite_schema.CustomOptionsFormat} customOptionsFormat + * @param {flatbuffers.Offset} mutatingVariableInputsOffset + * @param {flatbuffers.Offset} intermediatesOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.Operator.createOperator = function(builder, opcodeIndex, inputsOffset, outputsOffset, builtinOptionsType, builtinOptionsOffset, customOptionsOffset, customOptionsFormat, mutatingVariableInputsOffset, intermediatesOffset) { + tflite_schema.Operator.startOperator(builder); + tflite_schema.Operator.addOpcodeIndex(builder, opcodeIndex); + tflite_schema.Operator.addInputs(builder, inputsOffset); + tflite_schema.Operator.addOutputs(builder, outputsOffset); + tflite_schema.Operator.addBuiltinOptionsType(builder, builtinOptionsType); + tflite_schema.Operator.addBuiltinOptions(builder, builtinOptionsOffset); + tflite_schema.Operator.addCustomOptions(builder, customOptionsOffset); + tflite_schema.Operator.addCustomOptionsFormat(builder, customOptionsFormat); + tflite_schema.Operator.addMutatingVariableInputs(builder, mutatingVariableInputsOffset); + tflite_schema.Operator.addIntermediates(builder, intermediatesOffset); + return tflite_schema.Operator.endOperator(builder); +} + +/** + * @constructor + */ +tflite_schema.SubGraph = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.SubGraph} + */ +tflite_schema.SubGraph.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SubGraph=} obj + * @returns {tflite_schema.SubGraph} + */ +tflite_schema.SubGraph.getRootAsSubGraph = function(bb, obj) { + return (obj || new tflite_schema.SubGraph).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.SubGraph=} obj + * @returns {tflite_schema.SubGraph} + */ +tflite_schema.SubGraph.getSizePrefixedRootAsSubGraph = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.SubGraph).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @param {tflite_schema.Tensor=} obj + * @returns {tflite_schema.Tensor} + */ +tflite_schema.SubGraph.prototype.tensors = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? (obj || new tflite_schema.Tensor).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_schema.SubGraph.prototype.tensorsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.SubGraph.prototype.inputs = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.SubGraph.prototype.inputsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.SubGraph.prototype.inputsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.SubGraph.prototype.outputs = function(index) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.SubGraph.prototype.outputsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.SubGraph.prototype.outputsArray = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @param {tflite_schema.Operator=} obj + * @returns {tflite_schema.Operator} + */ +tflite_schema.SubGraph.prototype.operators = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new tflite_schema.Operator).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_schema.SubGraph.prototype.operatorsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_schema.SubGraph.prototype.name = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.SubGraph.startSubGraph = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} tensorsOffset + */ +tflite_schema.SubGraph.addTensors = function(builder, tensorsOffset) { + builder.addFieldOffset(0, tensorsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.SubGraph.createTensorsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.SubGraph.startTensorsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputsOffset + */ +tflite_schema.SubGraph.addInputs = function(builder, inputsOffset) { + builder.addFieldOffset(1, inputsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.SubGraph.createInputsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.SubGraph.startInputsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputsOffset + */ +tflite_schema.SubGraph.addOutputs = function(builder, outputsOffset) { + builder.addFieldOffset(2, outputsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.SubGraph.createOutputsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.SubGraph.startOutputsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} operatorsOffset + */ +tflite_schema.SubGraph.addOperators = function(builder, operatorsOffset) { + builder.addFieldOffset(3, operatorsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.SubGraph.createOperatorsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.SubGraph.startOperatorsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + */ +tflite_schema.SubGraph.addName = function(builder, nameOffset) { + builder.addFieldOffset(4, nameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.SubGraph.endSubGraph = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} tensorsOffset + * @param {flatbuffers.Offset} inputsOffset + * @param {flatbuffers.Offset} outputsOffset + * @param {flatbuffers.Offset} operatorsOffset + * @param {flatbuffers.Offset} nameOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.SubGraph.createSubGraph = function(builder, tensorsOffset, inputsOffset, outputsOffset, operatorsOffset, nameOffset) { + tflite_schema.SubGraph.startSubGraph(builder); + tflite_schema.SubGraph.addTensors(builder, tensorsOffset); + tflite_schema.SubGraph.addInputs(builder, inputsOffset); + tflite_schema.SubGraph.addOutputs(builder, outputsOffset); + tflite_schema.SubGraph.addOperators(builder, operatorsOffset); + tflite_schema.SubGraph.addName(builder, nameOffset); + return tflite_schema.SubGraph.endSubGraph(builder); +} + +/** + * @constructor + */ +tflite_schema.Buffer = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.Buffer} + */ +tflite_schema.Buffer.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Buffer=} obj + * @returns {tflite_schema.Buffer} + */ +tflite_schema.Buffer.getRootAsBuffer = function(bb, obj) { + return (obj || new tflite_schema.Buffer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Buffer=} obj + * @returns {tflite_schema.Buffer} + */ +tflite_schema.Buffer.getSizePrefixedRootAsBuffer = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.Buffer).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.Buffer.prototype.data = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint8(this.bb.__vector(this.bb_pos + offset) + index) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Buffer.prototype.dataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint8Array} + */ +tflite_schema.Buffer.prototype.dataArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.Buffer.startBuffer = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + */ +tflite_schema.Buffer.addData = function(builder, dataOffset) { + builder.addFieldOffset(0, dataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Buffer.createDataVector = function(builder, data) { + builder.startVector(1, data.length, 1); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt8(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Buffer.startDataVector = function(builder, numElems) { + builder.startVector(1, numElems, 1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.Buffer.endBuffer = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dataOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.Buffer.createBuffer = function(builder, dataOffset) { + tflite_schema.Buffer.startBuffer(builder); + tflite_schema.Buffer.addData(builder, dataOffset); + return tflite_schema.Buffer.endBuffer(builder); +} + +/** + * @constructor + */ +tflite_schema.Metadata = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.Metadata} + */ +tflite_schema.Metadata.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Metadata=} obj + * @returns {tflite_schema.Metadata} + */ +tflite_schema.Metadata.getRootAsMetadata = function(bb, obj) { + return (obj || new tflite_schema.Metadata).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Metadata=} obj + * @returns {tflite_schema.Metadata} + */ +tflite_schema.Metadata.getSizePrefixedRootAsMetadata = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.Metadata).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_schema.Metadata.prototype.name = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @returns {number} + */ +tflite_schema.Metadata.prototype.buffer = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.Metadata.startMetadata = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + */ +tflite_schema.Metadata.addName = function(builder, nameOffset) { + builder.addFieldOffset(0, nameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} buffer + */ +tflite_schema.Metadata.addBuffer = function(builder, buffer) { + builder.addFieldInt32(1, buffer, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.Metadata.endMetadata = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + * @param {number} buffer + * @returns {flatbuffers.Offset} + */ +tflite_schema.Metadata.createMetadata = function(builder, nameOffset, buffer) { + tflite_schema.Metadata.startMetadata(builder); + tflite_schema.Metadata.addName(builder, nameOffset); + tflite_schema.Metadata.addBuffer(builder, buffer); + return tflite_schema.Metadata.endMetadata(builder); +} + +/** + * @constructor + */ +tflite_schema.Model = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_schema.Model} + */ +tflite_schema.Model.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Model=} obj + * @returns {tflite_schema.Model} + */ +tflite_schema.Model.getRootAsModel = function(bb, obj) { + return (obj || new tflite_schema.Model).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_schema.Model=} obj + * @returns {tflite_schema.Model} + */ +tflite_schema.Model.getSizePrefixedRootAsModel = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_schema.Model).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @returns {boolean} + */ +tflite_schema.Model.bufferHasIdentifier = function(bb) { + return bb.__has_identifier('TFL3'); +}; + +/** + * @returns {number} + */ +tflite_schema.Model.prototype.version = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @param {tflite_schema.OperatorCode=} obj + * @returns {tflite_schema.OperatorCode} + */ +tflite_schema.Model.prototype.operatorCodes = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new tflite_schema.OperatorCode).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_schema.Model.prototype.operatorCodesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @param {tflite_schema.SubGraph=} obj + * @returns {tflite_schema.SubGraph} + */ +tflite_schema.Model.prototype.subgraphs = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new tflite_schema.SubGraph).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_schema.Model.prototype.subgraphsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_schema.Model.prototype.description = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {number} index + * @param {tflite_schema.Buffer=} obj + * @returns {tflite_schema.Buffer} + */ +tflite_schema.Model.prototype.buffers = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new tflite_schema.Buffer).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_schema.Model.prototype.buffersLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_schema.Model.prototype.metadataBuffer = function(index) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.readInt32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_schema.Model.prototype.metadataBufferLength = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Int32Array} + */ +tflite_schema.Model.prototype.metadataBufferArray = function() { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @param {tflite_schema.Metadata=} obj + * @returns {tflite_schema.Metadata} + */ +tflite_schema.Model.prototype.metadata = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? (obj || new tflite_schema.Metadata).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_schema.Model.prototype.metadataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_schema.Model.startModel = function(builder) { + builder.startObject(7); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} version + */ +tflite_schema.Model.addVersion = function(builder, version) { + builder.addFieldInt32(0, version, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} operatorCodesOffset + */ +tflite_schema.Model.addOperatorCodes = function(builder, operatorCodesOffset) { + builder.addFieldOffset(1, operatorCodesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Model.createOperatorCodesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Model.startOperatorCodesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} subgraphsOffset + */ +tflite_schema.Model.addSubgraphs = function(builder, subgraphsOffset) { + builder.addFieldOffset(2, subgraphsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Model.createSubgraphsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Model.startSubgraphsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptionOffset + */ +tflite_schema.Model.addDescription = function(builder, descriptionOffset) { + builder.addFieldOffset(3, descriptionOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} buffersOffset + */ +tflite_schema.Model.addBuffers = function(builder, buffersOffset) { + builder.addFieldOffset(4, buffersOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Model.createBuffersVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Model.startBuffersVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} metadataBufferOffset + */ +tflite_schema.Model.addMetadataBuffer = function(builder, metadataBufferOffset) { + builder.addFieldOffset(5, metadataBufferOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Model.createMetadataBufferVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Model.startMetadataBufferVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} metadataOffset + */ +tflite_schema.Model.addMetadata = function(builder, metadataOffset) { + builder.addFieldOffset(6, metadataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_schema.Model.createMetadataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_schema.Model.startMetadataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_schema.Model.endModel = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} offset + */ +tflite_schema.Model.finishModelBuffer = function(builder, offset) { + builder.finish(offset, 'TFL3'); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} offset + */ +tflite_schema.Model.finishSizePrefixedModelBuffer = function(builder, offset) { + builder.finish(offset, 'TFL3', true); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} version + * @param {flatbuffers.Offset} operatorCodesOffset + * @param {flatbuffers.Offset} subgraphsOffset + * @param {flatbuffers.Offset} descriptionOffset + * @param {flatbuffers.Offset} buffersOffset + * @param {flatbuffers.Offset} metadataBufferOffset + * @param {flatbuffers.Offset} metadataOffset + * @returns {flatbuffers.Offset} + */ +tflite_schema.Model.createModel = function(builder, version, operatorCodesOffset, subgraphsOffset, descriptionOffset, buffersOffset, metadataBufferOffset, metadataOffset) { + tflite_schema.Model.startModel(builder); + tflite_schema.Model.addVersion(builder, version); + tflite_schema.Model.addOperatorCodes(builder, operatorCodesOffset); + tflite_schema.Model.addSubgraphs(builder, subgraphsOffset); + tflite_schema.Model.addDescription(builder, descriptionOffset); + tflite_schema.Model.addBuffers(builder, buffersOffset); + tflite_schema.Model.addMetadataBuffer(builder, metadataBufferOffset); + tflite_schema.Model.addMetadata(builder, metadataOffset); + return tflite_schema.Model.endModel(builder); +} + +// automatically generated by the FlatBuffers compiler, do not modify + +/** + * @const + * @namespace + */ +var tflite_metadata_schema = tflite_metadata_schema || {}; + +/** + * @enum {number} + */ +tflite_metadata_schema.AssociatedFileType = { + UNKNOWN: 0, + DESCRIPTIONS: 1, + TENSOR_AXIS_LABELS: 2, + TENSOR_VALUE_LABELS: 3, + TENSOR_AXIS_SCORE_CALIBRATION: 4 +}; + +/** + * @enum {string} + */ +tflite_metadata_schema.AssociatedFileTypeName = { + '0': 'UNKNOWN', + '1': 'DESCRIPTIONS', + '2': 'TENSOR_AXIS_LABELS', + '3': 'TENSOR_VALUE_LABELS', + '4': 'TENSOR_AXIS_SCORE_CALIBRATION' +}; + +/** + * @enum {number} + */ +tflite_metadata_schema.ColorSpaceType = { + UNKNOWN: 0, + RGB: 1, + GRAYSCALE: 2 +}; + +/** + * @enum {string} + */ +tflite_metadata_schema.ColorSpaceTypeName = { + '0': 'UNKNOWN', + '1': 'RGB', + '2': 'GRAYSCALE' +}; + +/** + * @enum {number} + */ +tflite_metadata_schema.BoundingBoxType = { + UNKNOWN: 0, + BOUNDARIES: 1, + UPPER_LEFT: 2, + CENTER: 3 +}; + +/** + * @enum {string} + */ +tflite_metadata_schema.BoundingBoxTypeName = { + '0': 'UNKNOWN', + '1': 'BOUNDARIES', + '2': 'UPPER_LEFT', + '3': 'CENTER' +}; + +/** + * @enum {number} + */ +tflite_metadata_schema.CoordinateType = { + RATIO: 0, + PIXEL: 1 +}; + +/** + * @enum {string} + */ +tflite_metadata_schema.CoordinateTypeName = { + '0': 'RATIO', + '1': 'PIXEL' +}; + +/** + * @enum {number} + */ +tflite_metadata_schema.ContentProperties = { + NONE: 0, + FeatureProperties: 1, + ImageProperties: 2, + BoundingBoxProperties: 3 +}; + +/** + * @enum {string} + */ +tflite_metadata_schema.ContentPropertiesName = { + '0': 'NONE', + '1': 'FeatureProperties', + '2': 'ImageProperties', + '3': 'BoundingBoxProperties' +}; + +/** + * @enum {number} + */ +tflite_metadata_schema.ScoreTransformationType = { + IDENTITY: 0, + LOG: 1, + INVERSE_LOGISTIC: 2 +}; + +/** + * @enum {string} + */ +tflite_metadata_schema.ScoreTransformationTypeName = { + '0': 'IDENTITY', + '1': 'LOG', + '2': 'INVERSE_LOGISTIC' +}; + +/** + * @enum {number} + */ +tflite_metadata_schema.ProcessUnitOptions = { + NONE: 0, + NormalizationOptions: 1, + ScoreCalibrationOptions: 2, + ScoreThresholdingOptions: 3 +}; + +/** + * @enum {string} + */ +tflite_metadata_schema.ProcessUnitOptionsName = { + '0': 'NONE', + '1': 'NormalizationOptions', + '2': 'ScoreCalibrationOptions', + '3': 'ScoreThresholdingOptions' +}; + +/** + * @constructor + */ +tflite_metadata_schema.AssociatedFile = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.AssociatedFile} + */ +tflite_metadata_schema.AssociatedFile.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.AssociatedFile=} obj + * @returns {tflite_metadata_schema.AssociatedFile} + */ +tflite_metadata_schema.AssociatedFile.getRootAsAssociatedFile = function(bb, obj) { + return (obj || new tflite_metadata_schema.AssociatedFile).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.AssociatedFile=} obj + * @returns {tflite_metadata_schema.AssociatedFile} + */ +tflite_metadata_schema.AssociatedFile.getSizePrefixedRootAsAssociatedFile = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.AssociatedFile).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.AssociatedFile.prototype.name = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.AssociatedFile.prototype.description = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @returns {tflite_metadata_schema.AssociatedFileType} + */ +tflite_metadata_schema.AssociatedFile.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {tflite_metadata_schema.AssociatedFileType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_metadata_schema.AssociatedFileType.UNKNOWN; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.AssociatedFile.prototype.locale = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.AssociatedFile.startAssociatedFile = function(builder) { + builder.startObject(4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + */ +tflite_metadata_schema.AssociatedFile.addName = function(builder, nameOffset) { + builder.addFieldOffset(0, nameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptionOffset + */ +tflite_metadata_schema.AssociatedFile.addDescription = function(builder, descriptionOffset) { + builder.addFieldOffset(1, descriptionOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_metadata_schema.AssociatedFileType} type + */ +tflite_metadata_schema.AssociatedFile.addType = function(builder, type) { + builder.addFieldInt8(2, type, tflite_metadata_schema.AssociatedFileType.UNKNOWN); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} localeOffset + */ +tflite_metadata_schema.AssociatedFile.addLocale = function(builder, localeOffset) { + builder.addFieldOffset(3, localeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.AssociatedFile.endAssociatedFile = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + * @param {flatbuffers.Offset} descriptionOffset + * @param {tflite_metadata_schema.AssociatedFileType} type + * @param {flatbuffers.Offset} localeOffset + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.AssociatedFile.createAssociatedFile = function(builder, nameOffset, descriptionOffset, type, localeOffset) { + tflite_metadata_schema.AssociatedFile.startAssociatedFile(builder); + tflite_metadata_schema.AssociatedFile.addName(builder, nameOffset); + tflite_metadata_schema.AssociatedFile.addDescription(builder, descriptionOffset); + tflite_metadata_schema.AssociatedFile.addType(builder, type); + tflite_metadata_schema.AssociatedFile.addLocale(builder, localeOffset); + return tflite_metadata_schema.AssociatedFile.endAssociatedFile(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.FeatureProperties = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.FeatureProperties} + */ +tflite_metadata_schema.FeatureProperties.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.FeatureProperties=} obj + * @returns {tflite_metadata_schema.FeatureProperties} + */ +tflite_metadata_schema.FeatureProperties.getRootAsFeatureProperties = function(bb, obj) { + return (obj || new tflite_metadata_schema.FeatureProperties).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.FeatureProperties=} obj + * @returns {tflite_metadata_schema.FeatureProperties} + */ +tflite_metadata_schema.FeatureProperties.getSizePrefixedRootAsFeatureProperties = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.FeatureProperties).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.FeatureProperties.startFeatureProperties = function(builder) { + builder.startObject(0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.FeatureProperties.endFeatureProperties = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.FeatureProperties.createFeatureProperties = function(builder) { + tflite_metadata_schema.FeatureProperties.startFeatureProperties(builder); + return tflite_metadata_schema.FeatureProperties.endFeatureProperties(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.ImageSize = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.ImageSize} + */ +tflite_metadata_schema.ImageSize.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ImageSize=} obj + * @returns {tflite_metadata_schema.ImageSize} + */ +tflite_metadata_schema.ImageSize.getRootAsImageSize = function(bb, obj) { + return (obj || new tflite_metadata_schema.ImageSize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ImageSize=} obj + * @returns {tflite_metadata_schema.ImageSize} + */ +tflite_metadata_schema.ImageSize.getSizePrefixedRootAsImageSize = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.ImageSize).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.ImageSize.prototype.width = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.ImageSize.prototype.height = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readUint32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.ImageSize.startImageSize = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} width + */ +tflite_metadata_schema.ImageSize.addWidth = function(builder, width) { + builder.addFieldInt32(0, width, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} height + */ +tflite_metadata_schema.ImageSize.addHeight = function(builder, height) { + builder.addFieldInt32(1, height, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ImageSize.endImageSize = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} width + * @param {number} height + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ImageSize.createImageSize = function(builder, width, height) { + tflite_metadata_schema.ImageSize.startImageSize(builder); + tflite_metadata_schema.ImageSize.addWidth(builder, width); + tflite_metadata_schema.ImageSize.addHeight(builder, height); + return tflite_metadata_schema.ImageSize.endImageSize(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.ImageProperties = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.ImageProperties} + */ +tflite_metadata_schema.ImageProperties.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ImageProperties=} obj + * @returns {tflite_metadata_schema.ImageProperties} + */ +tflite_metadata_schema.ImageProperties.getRootAsImageProperties = function(bb, obj) { + return (obj || new tflite_metadata_schema.ImageProperties).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ImageProperties=} obj + * @returns {tflite_metadata_schema.ImageProperties} + */ +tflite_metadata_schema.ImageProperties.getSizePrefixedRootAsImageProperties = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.ImageProperties).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_metadata_schema.ColorSpaceType} + */ +tflite_metadata_schema.ImageProperties.prototype.colorSpace = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_metadata_schema.ColorSpaceType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_metadata_schema.ColorSpaceType.UNKNOWN; +}; + +/** + * @param {tflite_metadata_schema.ImageSize=} obj + * @returns {tflite_metadata_schema.ImageSize|null} + */ +tflite_metadata_schema.ImageProperties.prototype.defaultSize = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? (obj || new tflite_metadata_schema.ImageSize).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.ImageProperties.startImageProperties = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_metadata_schema.ColorSpaceType} colorSpace + */ +tflite_metadata_schema.ImageProperties.addColorSpace = function(builder, colorSpace) { + builder.addFieldInt8(0, colorSpace, tflite_metadata_schema.ColorSpaceType.UNKNOWN); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} defaultSizeOffset + */ +tflite_metadata_schema.ImageProperties.addDefaultSize = function(builder, defaultSizeOffset) { + builder.addFieldOffset(1, defaultSizeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ImageProperties.endImageProperties = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_metadata_schema.ColorSpaceType} colorSpace + * @param {flatbuffers.Offset} defaultSizeOffset + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ImageProperties.createImageProperties = function(builder, colorSpace, defaultSizeOffset) { + tflite_metadata_schema.ImageProperties.startImageProperties(builder); + tflite_metadata_schema.ImageProperties.addColorSpace(builder, colorSpace); + tflite_metadata_schema.ImageProperties.addDefaultSize(builder, defaultSizeOffset); + return tflite_metadata_schema.ImageProperties.endImageProperties(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.BoundingBoxProperties = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.BoundingBoxProperties} + */ +tflite_metadata_schema.BoundingBoxProperties.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.BoundingBoxProperties=} obj + * @returns {tflite_metadata_schema.BoundingBoxProperties} + */ +tflite_metadata_schema.BoundingBoxProperties.getRootAsBoundingBoxProperties = function(bb, obj) { + return (obj || new tflite_metadata_schema.BoundingBoxProperties).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.BoundingBoxProperties=} obj + * @returns {tflite_metadata_schema.BoundingBoxProperties} + */ +tflite_metadata_schema.BoundingBoxProperties.getSizePrefixedRootAsBoundingBoxProperties = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.BoundingBoxProperties).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_metadata_schema.BoundingBoxProperties.prototype.index = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readUint32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.BoundingBoxProperties.prototype.indexLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Uint32Array} + */ +tflite_metadata_schema.BoundingBoxProperties.prototype.indexArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @returns {tflite_metadata_schema.BoundingBoxType} + */ +tflite_metadata_schema.BoundingBoxProperties.prototype.type = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? /** @type {tflite_metadata_schema.BoundingBoxType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_metadata_schema.BoundingBoxType.UNKNOWN; +}; + +/** + * @returns {tflite_metadata_schema.CoordinateType} + */ +tflite_metadata_schema.BoundingBoxProperties.prototype.coordinateType = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? /** @type {tflite_metadata_schema.CoordinateType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_metadata_schema.CoordinateType.RATIO; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.BoundingBoxProperties.startBoundingBoxProperties = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} indexOffset + */ +tflite_metadata_schema.BoundingBoxProperties.addIndex = function(builder, indexOffset) { + builder.addFieldOffset(0, indexOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.BoundingBoxProperties.createIndexVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addInt32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.BoundingBoxProperties.startIndexVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_metadata_schema.BoundingBoxType} type + */ +tflite_metadata_schema.BoundingBoxProperties.addType = function(builder, type) { + builder.addFieldInt8(1, type, tflite_metadata_schema.BoundingBoxType.UNKNOWN); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_metadata_schema.CoordinateType} coordinateType + */ +tflite_metadata_schema.BoundingBoxProperties.addCoordinateType = function(builder, coordinateType) { + builder.addFieldInt8(2, coordinateType, tflite_metadata_schema.CoordinateType.RATIO); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.BoundingBoxProperties.endBoundingBoxProperties = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} indexOffset + * @param {tflite_metadata_schema.BoundingBoxType} type + * @param {tflite_metadata_schema.CoordinateType} coordinateType + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.BoundingBoxProperties.createBoundingBoxProperties = function(builder, indexOffset, type, coordinateType) { + tflite_metadata_schema.BoundingBoxProperties.startBoundingBoxProperties(builder); + tflite_metadata_schema.BoundingBoxProperties.addIndex(builder, indexOffset); + tflite_metadata_schema.BoundingBoxProperties.addType(builder, type); + tflite_metadata_schema.BoundingBoxProperties.addCoordinateType(builder, coordinateType); + return tflite_metadata_schema.BoundingBoxProperties.endBoundingBoxProperties(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.ValueRange = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.ValueRange} + */ +tflite_metadata_schema.ValueRange.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ValueRange=} obj + * @returns {tflite_metadata_schema.ValueRange} + */ +tflite_metadata_schema.ValueRange.getRootAsValueRange = function(bb, obj) { + return (obj || new tflite_metadata_schema.ValueRange).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ValueRange=} obj + * @returns {tflite_metadata_schema.ValueRange} + */ +tflite_metadata_schema.ValueRange.getSizePrefixedRootAsValueRange = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.ValueRange).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.ValueRange.prototype.min = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.ValueRange.prototype.max = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readInt32(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.ValueRange.startValueRange = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} min + */ +tflite_metadata_schema.ValueRange.addMin = function(builder, min) { + builder.addFieldInt32(0, min, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} max + */ +tflite_metadata_schema.ValueRange.addMax = function(builder, max) { + builder.addFieldInt32(1, max, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ValueRange.endValueRange = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} min + * @param {number} max + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ValueRange.createValueRange = function(builder, min, max) { + tflite_metadata_schema.ValueRange.startValueRange(builder); + tflite_metadata_schema.ValueRange.addMin(builder, min); + tflite_metadata_schema.ValueRange.addMax(builder, max); + return tflite_metadata_schema.ValueRange.endValueRange(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.Content = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.Content} + */ +tflite_metadata_schema.Content.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.Content=} obj + * @returns {tflite_metadata_schema.Content} + */ +tflite_metadata_schema.Content.getRootAsContent = function(bb, obj) { + return (obj || new tflite_metadata_schema.Content).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.Content=} obj + * @returns {tflite_metadata_schema.Content} + */ +tflite_metadata_schema.Content.getSizePrefixedRootAsContent = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.Content).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_metadata_schema.ContentProperties} + */ +tflite_metadata_schema.Content.prototype.contentPropertiesType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_metadata_schema.ContentProperties} */ (this.bb.readUint8(this.bb_pos + offset)) : tflite_metadata_schema.ContentProperties.NONE; +}; + +/** + * @param {flatbuffers.Table} obj + * @returns {?flatbuffers.Table} + */ +tflite_metadata_schema.Content.prototype.contentProperties = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__union(obj, this.bb_pos + offset) : null; +}; + +/** + * @param {tflite_metadata_schema.ValueRange=} obj + * @returns {tflite_metadata_schema.ValueRange|null} + */ +tflite_metadata_schema.Content.prototype.range = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new tflite_metadata_schema.ValueRange).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.Content.startContent = function(builder) { + builder.startObject(3); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_metadata_schema.ContentProperties} contentPropertiesType + */ +tflite_metadata_schema.Content.addContentPropertiesType = function(builder, contentPropertiesType) { + builder.addFieldInt8(0, contentPropertiesType, tflite_metadata_schema.ContentProperties.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} contentPropertiesOffset + */ +tflite_metadata_schema.Content.addContentProperties = function(builder, contentPropertiesOffset) { + builder.addFieldOffset(1, contentPropertiesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} rangeOffset + */ +tflite_metadata_schema.Content.addRange = function(builder, rangeOffset) { + builder.addFieldOffset(2, rangeOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.Content.endContent = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_metadata_schema.ContentProperties} contentPropertiesType + * @param {flatbuffers.Offset} contentPropertiesOffset + * @param {flatbuffers.Offset} rangeOffset + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.Content.createContent = function(builder, contentPropertiesType, contentPropertiesOffset, rangeOffset) { + tflite_metadata_schema.Content.startContent(builder); + tflite_metadata_schema.Content.addContentPropertiesType(builder, contentPropertiesType); + tflite_metadata_schema.Content.addContentProperties(builder, contentPropertiesOffset); + tflite_metadata_schema.Content.addRange(builder, rangeOffset); + return tflite_metadata_schema.Content.endContent(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.NormalizationOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.NormalizationOptions} + */ +tflite_metadata_schema.NormalizationOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.NormalizationOptions=} obj + * @returns {tflite_metadata_schema.NormalizationOptions} + */ +tflite_metadata_schema.NormalizationOptions.getRootAsNormalizationOptions = function(bb, obj) { + return (obj || new tflite_metadata_schema.NormalizationOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.NormalizationOptions=} obj + * @returns {tflite_metadata_schema.NormalizationOptions} + */ +tflite_metadata_schema.NormalizationOptions.getSizePrefixedRootAsNormalizationOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.NormalizationOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_metadata_schema.NormalizationOptions.prototype.mean = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.NormalizationOptions.prototype.meanLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +tflite_metadata_schema.NormalizationOptions.prototype.meanArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_metadata_schema.NormalizationOptions.prototype.std = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.NormalizationOptions.prototype.stdLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +tflite_metadata_schema.NormalizationOptions.prototype.stdArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.NormalizationOptions.startNormalizationOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} meanOffset + */ +tflite_metadata_schema.NormalizationOptions.addMean = function(builder, meanOffset) { + builder.addFieldOffset(0, meanOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.NormalizationOptions.createMeanVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.NormalizationOptions.startMeanVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} stdOffset + */ +tflite_metadata_schema.NormalizationOptions.addStd = function(builder, stdOffset) { + builder.addFieldOffset(1, stdOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.NormalizationOptions.createStdVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.NormalizationOptions.startStdVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.NormalizationOptions.endNormalizationOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} meanOffset + * @param {flatbuffers.Offset} stdOffset + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.NormalizationOptions.createNormalizationOptions = function(builder, meanOffset, stdOffset) { + tflite_metadata_schema.NormalizationOptions.startNormalizationOptions(builder); + tflite_metadata_schema.NormalizationOptions.addMean(builder, meanOffset); + tflite_metadata_schema.NormalizationOptions.addStd(builder, stdOffset); + return tflite_metadata_schema.NormalizationOptions.endNormalizationOptions(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.ScoreCalibrationOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.ScoreCalibrationOptions} + */ +tflite_metadata_schema.ScoreCalibrationOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ScoreCalibrationOptions=} obj + * @returns {tflite_metadata_schema.ScoreCalibrationOptions} + */ +tflite_metadata_schema.ScoreCalibrationOptions.getRootAsScoreCalibrationOptions = function(bb, obj) { + return (obj || new tflite_metadata_schema.ScoreCalibrationOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ScoreCalibrationOptions=} obj + * @returns {tflite_metadata_schema.ScoreCalibrationOptions} + */ +tflite_metadata_schema.ScoreCalibrationOptions.getSizePrefixedRootAsScoreCalibrationOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.ScoreCalibrationOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_metadata_schema.ScoreTransformationType} + */ +tflite_metadata_schema.ScoreCalibrationOptions.prototype.scoreTransformation = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_metadata_schema.ScoreTransformationType} */ (this.bb.readInt8(this.bb_pos + offset)) : tflite_metadata_schema.ScoreTransformationType.IDENTITY; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.ScoreCalibrationOptions.prototype.defaultScore = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.ScoreCalibrationOptions.startScoreCalibrationOptions = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_metadata_schema.ScoreTransformationType} scoreTransformation + */ +tflite_metadata_schema.ScoreCalibrationOptions.addScoreTransformation = function(builder, scoreTransformation) { + builder.addFieldInt8(0, scoreTransformation, tflite_metadata_schema.ScoreTransformationType.IDENTITY); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} defaultScore + */ +tflite_metadata_schema.ScoreCalibrationOptions.addDefaultScore = function(builder, defaultScore) { + builder.addFieldFloat32(1, defaultScore, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ScoreCalibrationOptions.endScoreCalibrationOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_metadata_schema.ScoreTransformationType} scoreTransformation + * @param {number} defaultScore + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ScoreCalibrationOptions.createScoreCalibrationOptions = function(builder, scoreTransformation, defaultScore) { + tflite_metadata_schema.ScoreCalibrationOptions.startScoreCalibrationOptions(builder); + tflite_metadata_schema.ScoreCalibrationOptions.addScoreTransformation(builder, scoreTransformation); + tflite_metadata_schema.ScoreCalibrationOptions.addDefaultScore(builder, defaultScore); + return tflite_metadata_schema.ScoreCalibrationOptions.endScoreCalibrationOptions(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.ScoreThresholdingOptions = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.ScoreThresholdingOptions} + */ +tflite_metadata_schema.ScoreThresholdingOptions.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ScoreThresholdingOptions=} obj + * @returns {tflite_metadata_schema.ScoreThresholdingOptions} + */ +tflite_metadata_schema.ScoreThresholdingOptions.getRootAsScoreThresholdingOptions = function(bb, obj) { + return (obj || new tflite_metadata_schema.ScoreThresholdingOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ScoreThresholdingOptions=} obj + * @returns {tflite_metadata_schema.ScoreThresholdingOptions} + */ +tflite_metadata_schema.ScoreThresholdingOptions.getSizePrefixedRootAsScoreThresholdingOptions = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.ScoreThresholdingOptions).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.ScoreThresholdingOptions.prototype.globalScoreThreshold = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb_pos + offset) : 0.0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.ScoreThresholdingOptions.startScoreThresholdingOptions = function(builder) { + builder.startObject(1); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} globalScoreThreshold + */ +tflite_metadata_schema.ScoreThresholdingOptions.addGlobalScoreThreshold = function(builder, globalScoreThreshold) { + builder.addFieldFloat32(0, globalScoreThreshold, 0.0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ScoreThresholdingOptions.endScoreThresholdingOptions = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} globalScoreThreshold + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ScoreThresholdingOptions.createScoreThresholdingOptions = function(builder, globalScoreThreshold) { + tflite_metadata_schema.ScoreThresholdingOptions.startScoreThresholdingOptions(builder); + tflite_metadata_schema.ScoreThresholdingOptions.addGlobalScoreThreshold(builder, globalScoreThreshold); + return tflite_metadata_schema.ScoreThresholdingOptions.endScoreThresholdingOptions(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.ProcessUnit = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.ProcessUnit} + */ +tflite_metadata_schema.ProcessUnit.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ProcessUnit=} obj + * @returns {tflite_metadata_schema.ProcessUnit} + */ +tflite_metadata_schema.ProcessUnit.getRootAsProcessUnit = function(bb, obj) { + return (obj || new tflite_metadata_schema.ProcessUnit).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ProcessUnit=} obj + * @returns {tflite_metadata_schema.ProcessUnit} + */ +tflite_metadata_schema.ProcessUnit.getSizePrefixedRootAsProcessUnit = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.ProcessUnit).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @returns {tflite_metadata_schema.ProcessUnitOptions} + */ +tflite_metadata_schema.ProcessUnit.prototype.optionsType = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? /** @type {tflite_metadata_schema.ProcessUnitOptions} */ (this.bb.readUint8(this.bb_pos + offset)) : tflite_metadata_schema.ProcessUnitOptions.NONE; +}; + +/** + * @param {flatbuffers.Table} obj + * @returns {?flatbuffers.Table} + */ +tflite_metadata_schema.ProcessUnit.prototype.options = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__union(obj, this.bb_pos + offset) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.ProcessUnit.startProcessUnit = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_metadata_schema.ProcessUnitOptions} optionsType + */ +tflite_metadata_schema.ProcessUnit.addOptionsType = function(builder, optionsType) { + builder.addFieldInt8(0, optionsType, tflite_metadata_schema.ProcessUnitOptions.NONE); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} optionsOffset + */ +tflite_metadata_schema.ProcessUnit.addOptions = function(builder, optionsOffset) { + builder.addFieldOffset(1, optionsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ProcessUnit.endProcessUnit = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {tflite_metadata_schema.ProcessUnitOptions} optionsType + * @param {flatbuffers.Offset} optionsOffset + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ProcessUnit.createProcessUnit = function(builder, optionsType, optionsOffset) { + tflite_metadata_schema.ProcessUnit.startProcessUnit(builder); + tflite_metadata_schema.ProcessUnit.addOptionsType(builder, optionsType); + tflite_metadata_schema.ProcessUnit.addOptions(builder, optionsOffset); + return tflite_metadata_schema.ProcessUnit.endProcessUnit(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.Stats = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.Stats} + */ +tflite_metadata_schema.Stats.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.Stats=} obj + * @returns {tflite_metadata_schema.Stats} + */ +tflite_metadata_schema.Stats.getRootAsStats = function(bb, obj) { + return (obj || new tflite_metadata_schema.Stats).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.Stats=} obj + * @returns {tflite_metadata_schema.Stats} + */ +tflite_metadata_schema.Stats.getSizePrefixedRootAsStats = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.Stats).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_metadata_schema.Stats.prototype.max = function(index) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.Stats.prototype.maxLength = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +tflite_metadata_schema.Stats.prototype.maxArray = function() { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {number} index + * @returns {number} + */ +tflite_metadata_schema.Stats.prototype.min = function(index) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + offset) + index * 4) : 0; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.Stats.prototype.minLength = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @returns {Float32Array} + */ +tflite_metadata_schema.Stats.prototype.minArray = function() { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + offset), this.bb.__vector_len(this.bb_pos + offset)) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.Stats.startStats = function(builder) { + builder.startObject(2); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} maxOffset + */ +tflite_metadata_schema.Stats.addMax = function(builder, maxOffset) { + builder.addFieldOffset(0, maxOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.Stats.createMaxVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.Stats.startMaxVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} minOffset + */ +tflite_metadata_schema.Stats.addMin = function(builder, minOffset) { + builder.addFieldOffset(1, minOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.Stats.createMinVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addFloat32(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.Stats.startMinVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.Stats.endStats = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} maxOffset + * @param {flatbuffers.Offset} minOffset + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.Stats.createStats = function(builder, maxOffset, minOffset) { + tflite_metadata_schema.Stats.startStats(builder); + tflite_metadata_schema.Stats.addMax(builder, maxOffset); + tflite_metadata_schema.Stats.addMin(builder, minOffset); + return tflite_metadata_schema.Stats.endStats(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.TensorMetadata = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.TensorMetadata} + */ +tflite_metadata_schema.TensorMetadata.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.TensorMetadata=} obj + * @returns {tflite_metadata_schema.TensorMetadata} + */ +tflite_metadata_schema.TensorMetadata.getRootAsTensorMetadata = function(bb, obj) { + return (obj || new tflite_metadata_schema.TensorMetadata).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.TensorMetadata=} obj + * @returns {tflite_metadata_schema.TensorMetadata} + */ +tflite_metadata_schema.TensorMetadata.getSizePrefixedRootAsTensorMetadata = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.TensorMetadata).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.TensorMetadata.prototype.name = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.TensorMetadata.prototype.description = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {number} index + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array} + */ +tflite_metadata_schema.TensorMetadata.prototype.dimensionNames = function(index, optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__string(this.bb.__vector(this.bb_pos + offset) + index * 4, optionalEncoding) : null; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.TensorMetadata.prototype.dimensionNamesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {tflite_metadata_schema.Content=} obj + * @returns {tflite_metadata_schema.Content|null} + */ +tflite_metadata_schema.TensorMetadata.prototype.content = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new tflite_metadata_schema.Content).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {number} index + * @param {tflite_metadata_schema.ProcessUnit=} obj + * @returns {tflite_metadata_schema.ProcessUnit} + */ +tflite_metadata_schema.TensorMetadata.prototype.processUnits = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new tflite_metadata_schema.ProcessUnit).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.TensorMetadata.prototype.processUnitsLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {tflite_metadata_schema.Stats=} obj + * @returns {tflite_metadata_schema.Stats|null} + */ +tflite_metadata_schema.TensorMetadata.prototype.stats = function(obj) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? (obj || new tflite_metadata_schema.Stats).__init(this.bb.__indirect(this.bb_pos + offset), this.bb) : null; +}; + +/** + * @param {number} index + * @param {tflite_metadata_schema.AssociatedFile=} obj + * @returns {tflite_metadata_schema.AssociatedFile} + */ +tflite_metadata_schema.TensorMetadata.prototype.associatedFiles = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? (obj || new tflite_metadata_schema.AssociatedFile).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.TensorMetadata.prototype.associatedFilesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.TensorMetadata.startTensorMetadata = function(builder) { + builder.startObject(7); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + */ +tflite_metadata_schema.TensorMetadata.addName = function(builder, nameOffset) { + builder.addFieldOffset(0, nameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptionOffset + */ +tflite_metadata_schema.TensorMetadata.addDescription = function(builder, descriptionOffset) { + builder.addFieldOffset(1, descriptionOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} dimensionNamesOffset + */ +tflite_metadata_schema.TensorMetadata.addDimensionNames = function(builder, dimensionNamesOffset) { + builder.addFieldOffset(2, dimensionNamesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.TensorMetadata.createDimensionNamesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.TensorMetadata.startDimensionNamesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} contentOffset + */ +tflite_metadata_schema.TensorMetadata.addContent = function(builder, contentOffset) { + builder.addFieldOffset(3, contentOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} processUnitsOffset + */ +tflite_metadata_schema.TensorMetadata.addProcessUnits = function(builder, processUnitsOffset) { + builder.addFieldOffset(4, processUnitsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.TensorMetadata.createProcessUnitsVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.TensorMetadata.startProcessUnitsVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} statsOffset + */ +tflite_metadata_schema.TensorMetadata.addStats = function(builder, statsOffset) { + builder.addFieldOffset(5, statsOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} associatedFilesOffset + */ +tflite_metadata_schema.TensorMetadata.addAssociatedFiles = function(builder, associatedFilesOffset) { + builder.addFieldOffset(6, associatedFilesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.TensorMetadata.createAssociatedFilesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.TensorMetadata.startAssociatedFilesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.TensorMetadata.endTensorMetadata = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + * @param {flatbuffers.Offset} descriptionOffset + * @param {flatbuffers.Offset} dimensionNamesOffset + * @param {flatbuffers.Offset} contentOffset + * @param {flatbuffers.Offset} processUnitsOffset + * @param {flatbuffers.Offset} statsOffset + * @param {flatbuffers.Offset} associatedFilesOffset + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.TensorMetadata.createTensorMetadata = function(builder, nameOffset, descriptionOffset, dimensionNamesOffset, contentOffset, processUnitsOffset, statsOffset, associatedFilesOffset) { + tflite_metadata_schema.TensorMetadata.startTensorMetadata(builder); + tflite_metadata_schema.TensorMetadata.addName(builder, nameOffset); + tflite_metadata_schema.TensorMetadata.addDescription(builder, descriptionOffset); + tflite_metadata_schema.TensorMetadata.addDimensionNames(builder, dimensionNamesOffset); + tflite_metadata_schema.TensorMetadata.addContent(builder, contentOffset); + tflite_metadata_schema.TensorMetadata.addProcessUnits(builder, processUnitsOffset); + tflite_metadata_schema.TensorMetadata.addStats(builder, statsOffset); + tflite_metadata_schema.TensorMetadata.addAssociatedFiles(builder, associatedFilesOffset); + return tflite_metadata_schema.TensorMetadata.endTensorMetadata(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.SubGraphMetadata = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.SubGraphMetadata} + */ +tflite_metadata_schema.SubGraphMetadata.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.SubGraphMetadata=} obj + * @returns {tflite_metadata_schema.SubGraphMetadata} + */ +tflite_metadata_schema.SubGraphMetadata.getRootAsSubGraphMetadata = function(bb, obj) { + return (obj || new tflite_metadata_schema.SubGraphMetadata).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.SubGraphMetadata=} obj + * @returns {tflite_metadata_schema.SubGraphMetadata} + */ +tflite_metadata_schema.SubGraphMetadata.getSizePrefixedRootAsSubGraphMetadata = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.SubGraphMetadata).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.SubGraphMetadata.prototype.name = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.SubGraphMetadata.prototype.description = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {number} index + * @param {tflite_metadata_schema.TensorMetadata=} obj + * @returns {tflite_metadata_schema.TensorMetadata} + */ +tflite_metadata_schema.SubGraphMetadata.prototype.inputTensorMetadata = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? (obj || new tflite_metadata_schema.TensorMetadata).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.SubGraphMetadata.prototype.inputTensorMetadataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @param {tflite_metadata_schema.TensorMetadata=} obj + * @returns {tflite_metadata_schema.TensorMetadata} + */ +tflite_metadata_schema.SubGraphMetadata.prototype.outputTensorMetadata = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new tflite_metadata_schema.TensorMetadata).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.SubGraphMetadata.prototype.outputTensorMetadataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {number} index + * @param {tflite_metadata_schema.AssociatedFile=} obj + * @returns {tflite_metadata_schema.AssociatedFile} + */ +tflite_metadata_schema.SubGraphMetadata.prototype.associatedFiles = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? (obj || new tflite_metadata_schema.AssociatedFile).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.SubGraphMetadata.prototype.associatedFilesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.SubGraphMetadata.startSubGraphMetadata = function(builder) { + builder.startObject(5); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + */ +tflite_metadata_schema.SubGraphMetadata.addName = function(builder, nameOffset) { + builder.addFieldOffset(0, nameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptionOffset + */ +tflite_metadata_schema.SubGraphMetadata.addDescription = function(builder, descriptionOffset) { + builder.addFieldOffset(1, descriptionOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} inputTensorMetadataOffset + */ +tflite_metadata_schema.SubGraphMetadata.addInputTensorMetadata = function(builder, inputTensorMetadataOffset) { + builder.addFieldOffset(2, inputTensorMetadataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.SubGraphMetadata.createInputTensorMetadataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.SubGraphMetadata.startInputTensorMetadataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} outputTensorMetadataOffset + */ +tflite_metadata_schema.SubGraphMetadata.addOutputTensorMetadata = function(builder, outputTensorMetadataOffset) { + builder.addFieldOffset(3, outputTensorMetadataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.SubGraphMetadata.createOutputTensorMetadataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.SubGraphMetadata.startOutputTensorMetadataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} associatedFilesOffset + */ +tflite_metadata_schema.SubGraphMetadata.addAssociatedFiles = function(builder, associatedFilesOffset) { + builder.addFieldOffset(4, associatedFilesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.SubGraphMetadata.createAssociatedFilesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.SubGraphMetadata.startAssociatedFilesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.SubGraphMetadata.endSubGraphMetadata = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + * @param {flatbuffers.Offset} descriptionOffset + * @param {flatbuffers.Offset} inputTensorMetadataOffset + * @param {flatbuffers.Offset} outputTensorMetadataOffset + * @param {flatbuffers.Offset} associatedFilesOffset + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.SubGraphMetadata.createSubGraphMetadata = function(builder, nameOffset, descriptionOffset, inputTensorMetadataOffset, outputTensorMetadataOffset, associatedFilesOffset) { + tflite_metadata_schema.SubGraphMetadata.startSubGraphMetadata(builder); + tflite_metadata_schema.SubGraphMetadata.addName(builder, nameOffset); + tflite_metadata_schema.SubGraphMetadata.addDescription(builder, descriptionOffset); + tflite_metadata_schema.SubGraphMetadata.addInputTensorMetadata(builder, inputTensorMetadataOffset); + tflite_metadata_schema.SubGraphMetadata.addOutputTensorMetadata(builder, outputTensorMetadataOffset); + tflite_metadata_schema.SubGraphMetadata.addAssociatedFiles(builder, associatedFilesOffset); + return tflite_metadata_schema.SubGraphMetadata.endSubGraphMetadata(builder); +} + +/** + * @constructor + */ +tflite_metadata_schema.ModelMetadata = function() { + /** + * @type {flatbuffers.ByteBuffer} + */ + this.bb = null; + + /** + * @type {number} + */ + this.bb_pos = 0; +}; + +/** + * @param {number} i + * @param {flatbuffers.ByteBuffer} bb + * @returns {tflite_metadata_schema.ModelMetadata} + */ +tflite_metadata_schema.ModelMetadata.prototype.__init = function(i, bb) { + this.bb_pos = i; + this.bb = bb; + return this; +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ModelMetadata=} obj + * @returns {tflite_metadata_schema.ModelMetadata} + */ +tflite_metadata_schema.ModelMetadata.getRootAsModelMetadata = function(bb, obj) { + return (obj || new tflite_metadata_schema.ModelMetadata).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @param {tflite_metadata_schema.ModelMetadata=} obj + * @returns {tflite_metadata_schema.ModelMetadata} + */ +tflite_metadata_schema.ModelMetadata.getSizePrefixedRootAsModelMetadata = function(bb, obj) { + bb.setPosition(bb.position() + flatbuffers.SIZE_PREFIX_LENGTH); + return (obj || new tflite_metadata_schema.ModelMetadata).__init(bb.readInt32(bb.position()) + bb.position(), bb); +}; + +/** + * @param {flatbuffers.ByteBuffer} bb + * @returns {boolean} + */ +tflite_metadata_schema.ModelMetadata.bufferHasIdentifier = function(bb) { + return bb.__has_identifier('M001'); +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.ModelMetadata.prototype.name = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 4); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.ModelMetadata.prototype.description = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 6); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.ModelMetadata.prototype.version = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 8); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {number} index + * @param {tflite_metadata_schema.SubGraphMetadata=} obj + * @returns {tflite_metadata_schema.SubGraphMetadata} + */ +tflite_metadata_schema.ModelMetadata.prototype.subgraphMetadata = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? (obj || new tflite_metadata_schema.SubGraphMetadata).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.ModelMetadata.prototype.subgraphMetadataLength = function() { + var offset = this.bb.__offset(this.bb_pos, 10); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.ModelMetadata.prototype.author = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 12); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.ModelMetadata.prototype.license = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 14); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {number} index + * @param {tflite_metadata_schema.AssociatedFile=} obj + * @returns {tflite_metadata_schema.AssociatedFile} + */ +tflite_metadata_schema.ModelMetadata.prototype.associatedFiles = function(index, obj) { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? (obj || new tflite_metadata_schema.AssociatedFile).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + offset) + index * 4), this.bb) : null; +}; + +/** + * @returns {number} + */ +tflite_metadata_schema.ModelMetadata.prototype.associatedFilesLength = function() { + var offset = this.bb.__offset(this.bb_pos, 16); + return offset ? this.bb.__vector_len(this.bb_pos + offset) : 0; +}; + +/** + * @param {flatbuffers.Encoding=} optionalEncoding + * @returns {string|Uint8Array|null} + */ +tflite_metadata_schema.ModelMetadata.prototype.minParserVersion = function(optionalEncoding) { + var offset = this.bb.__offset(this.bb_pos, 18); + return offset ? this.bb.__string(this.bb_pos + offset, optionalEncoding) : null; +}; + +/** + * @param {flatbuffers.Builder} builder + */ +tflite_metadata_schema.ModelMetadata.startModelMetadata = function(builder) { + builder.startObject(8); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + */ +tflite_metadata_schema.ModelMetadata.addName = function(builder, nameOffset) { + builder.addFieldOffset(0, nameOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} descriptionOffset + */ +tflite_metadata_schema.ModelMetadata.addDescription = function(builder, descriptionOffset) { + builder.addFieldOffset(1, descriptionOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} versionOffset + */ +tflite_metadata_schema.ModelMetadata.addVersion = function(builder, versionOffset) { + builder.addFieldOffset(2, versionOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} subgraphMetadataOffset + */ +tflite_metadata_schema.ModelMetadata.addSubgraphMetadata = function(builder, subgraphMetadataOffset) { + builder.addFieldOffset(3, subgraphMetadataOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ModelMetadata.createSubgraphMetadataVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.ModelMetadata.startSubgraphMetadataVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} authorOffset + */ +tflite_metadata_schema.ModelMetadata.addAuthor = function(builder, authorOffset) { + builder.addFieldOffset(4, authorOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} licenseOffset + */ +tflite_metadata_schema.ModelMetadata.addLicense = function(builder, licenseOffset) { + builder.addFieldOffset(5, licenseOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} associatedFilesOffset + */ +tflite_metadata_schema.ModelMetadata.addAssociatedFiles = function(builder, associatedFilesOffset) { + builder.addFieldOffset(6, associatedFilesOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {Array.} data + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ModelMetadata.createAssociatedFilesVector = function(builder, data) { + builder.startVector(4, data.length, 4); + for (var i = data.length - 1; i >= 0; i--) { + builder.addOffset(data[i]); + } + return builder.endVector(); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {number} numElems + */ +tflite_metadata_schema.ModelMetadata.startAssociatedFilesVector = function(builder, numElems) { + builder.startVector(4, numElems, 4); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} minParserVersionOffset + */ +tflite_metadata_schema.ModelMetadata.addMinParserVersion = function(builder, minParserVersionOffset) { + builder.addFieldOffset(7, minParserVersionOffset, 0); +}; + +/** + * @param {flatbuffers.Builder} builder + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ModelMetadata.endModelMetadata = function(builder) { + var offset = builder.endObject(); + return offset; +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} offset + */ +tflite_metadata_schema.ModelMetadata.finishModelMetadataBuffer = function(builder, offset) { + builder.finish(offset, 'M001'); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} offset + */ +tflite_metadata_schema.ModelMetadata.finishSizePrefixedModelMetadataBuffer = function(builder, offset) { + builder.finish(offset, 'M001', true); +}; + +/** + * @param {flatbuffers.Builder} builder + * @param {flatbuffers.Offset} nameOffset + * @param {flatbuffers.Offset} descriptionOffset + * @param {flatbuffers.Offset} versionOffset + * @param {flatbuffers.Offset} subgraphMetadataOffset + * @param {flatbuffers.Offset} authorOffset + * @param {flatbuffers.Offset} licenseOffset + * @param {flatbuffers.Offset} associatedFilesOffset + * @param {flatbuffers.Offset} minParserVersionOffset + * @returns {flatbuffers.Offset} + */ +tflite_metadata_schema.ModelMetadata.createModelMetadata = function(builder, nameOffset, descriptionOffset, versionOffset, subgraphMetadataOffset, authorOffset, licenseOffset, associatedFilesOffset, minParserVersionOffset) { + tflite_metadata_schema.ModelMetadata.startModelMetadata(builder); + tflite_metadata_schema.ModelMetadata.addName(builder, nameOffset); + tflite_metadata_schema.ModelMetadata.addDescription(builder, descriptionOffset); + tflite_metadata_schema.ModelMetadata.addVersion(builder, versionOffset); + tflite_metadata_schema.ModelMetadata.addSubgraphMetadata(builder, subgraphMetadataOffset); + tflite_metadata_schema.ModelMetadata.addAuthor(builder, authorOffset); + tflite_metadata_schema.ModelMetadata.addLicense(builder, licenseOffset); + tflite_metadata_schema.ModelMetadata.addAssociatedFiles(builder, associatedFilesOffset); + tflite_metadata_schema.ModelMetadata.addMinParserVersion(builder, minParserVersionOffset); + return tflite_metadata_schema.ModelMetadata.endModelMetadata(builder); +} + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports = { tflite_schema: tflite_schema, tflite_metadata_schema: tflite_metadata_schema }; +} diff --git a/frontend/packages/core/public/netron/tflite.js b/frontend/packages/core/public/netron/tflite.js new file mode 100644 index 00000000..9828ebd7 --- /dev/null +++ b/frontend/packages/core/public/netron/tflite.js @@ -0,0 +1,1010 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var tflite = tflite || {}; +var base = base || require('./base'); +var flatbuffers = flatbuffers || require('flatbuffers').flatbuffers; +var long = long || { Long: require('long') }; + +tflite.ModelFactory = class { + + match(context) { + const extension = context.identifier.split('.').pop().toLowerCase(); + if (['tflite', 'lite', 'tfl', 'bin', 'pb', 'model', 'tmfile', 'h5' ].indexOf(extension) !== -1) { + const buffer = context.buffer; + const signature = 'TFL3'; + if (buffer && buffer.length > 8 && buffer.subarray(4, 8).every((x, i) => x === signature.charCodeAt(i))) { + return true; + } + } + if (extension === 'json') { + const json = context.text; + if (json.indexOf("\"subgraphs\"", 0) !== -1 && json.indexOf("\"operator_codes\"", 0) !== -1) { + return true; + } + } + return false; + } + + open(context, host) { + return host.require('./tflite-schema').then((schema) => { + tflite.schema = schema.tflite_schema; + tflite.metadata_schema = schema.tflite_metadata_schema; + return tflite.Metadata.open(host).then((metadata) => { + const identifier = context.identifier; + try { + const extension = identifier.split('.').pop().toLowerCase(); + switch (extension) { + default: { + const buffer = new flatbuffers.ByteBuffer(context.buffer); + if (!tflite.schema.Model.bufferHasIdentifier(buffer)) { + throw new tflite.Error("File format is not tflite.Model."); + } + const model = tflite.schema.Model.getRootAsModel(buffer); + return new tflite.Model(metadata, null, model); + } + case 'json': { + const model = JSON.parse(context.text); + return new tflite.Model(metadata, 'json', model); + } + } + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new tflite.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + }); + } +}; + +tflite.Model = class { + + constructor(metadata, format, model) { + this._graphs = []; + this._format = 'TensorFlow Lite'; + switch (format) { + default: { + this._format = this._format + ' v' + model.version().toString(); + this._description = model.description() || ''; + const operators = []; + const builtinOperatorMap = {}; + for (const key of Object.keys(tflite.schema.BuiltinOperator)) { + const index = tflite.schema.BuiltinOperator[key]; + builtinOperatorMap[index] = tflite.Utility.type(key); + } + for (let i = 0; i < model.operatorCodesLength(); i++) { + const operatorCode = model.operatorCodes(i); + const code = operatorCode.builtinCode(); + const version = operatorCode.version(); + const custom = code === tflite.schema.BuiltinOperator.CUSTOM; + const name = custom ? operatorCode.customCode() : builtinOperatorMap[code]; + if (!name) { + throw new tflite.Error("Invalid built-in code '" + code.toString() + "' at '" + i.toString() + "'."); + } + operators.push(custom ? { name: name, version: version, custom: true } : { name: name, version: version }); + } + /* + for (let i = 0; i < model.metadataBufferLength(); i++) { + const metadataBufferIndex = model.metadataBuffer(i); + const data = model.buffers(metadataBufferIndex).dataArray(); + // file_identifier "FDMD" + // file_identifier "MSMD" + // file_identifier "SEMD" + } + */ + let modelMetadata = null; + for (let i = 0; i < model.metadataLength(); i++) { + const metadata = model.metadata(i); + switch (metadata.name()) { + case 'min_runtime_version': { + const data = model.buffers(metadata.buffer()).dataArray(); + this._runtime = data ? new TextDecoder().decode(data) : undefined; + break; + } + case 'TFLITE_METADATA': { + const buffer = new flatbuffers.ByteBuffer(model.buffers(metadata.buffer()).dataArray() || []); + if (tflite.metadata_schema.ModelMetadata.bufferHasIdentifier(buffer)) { + modelMetadata = tflite.metadata_schema.ModelMetadata.getRootAsModelMetadata(buffer); + this._name = modelMetadata.name() || ''; + this._version = modelMetadata.version() || ''; + this._description = modelMetadata.description() ? [ this.description, modelMetadata.description()].join(' ') : this._description; + this._author = modelMetadata.author() || ''; + this._license = modelMetadata.license() || ''; + } + break; + } + } + } + const subgraphsLength = model.subgraphsLength(); + for (let i = 0; i < subgraphsLength; i++) { + const subgraph = model.subgraphs(i); + const name = subgraphsLength > 1 ? i.toString() : ''; + const subgraphMetadata = modelMetadata && i < modelMetadata.subgraphMetadataLength() ? modelMetadata.subgraphMetadata(i) : null; + this._graphs.push(new tflite.Graph(metadata, format, subgraph, subgraphMetadata, name, operators, model)); + } + break; + } + case 'json': { + this._format = this._format + (model.version ? ' v' + model.version.toString() : ''); + this._description = model.description || ''; + const operators = []; + if (model.operator_codes && Array.isArray(model.operator_codes)) { + for (let i = 0; i < model.operator_codes.length; i++) { + const operatorCode = model.operator_codes[i]; + const code = operatorCode.builtin_code; + const version = operatorCode.version || 1; + const custom = code === 'CUSTOM'; + const name = custom ? operatorCode.custom_code : tflite.Utility.type(code); + if (!name) { + throw new tflite.Error("Invalid built-in code '" + code.toString() + "' at '" + i.toString() + "'."); + } + operators.push(custom ? { name: name, version: version, custom: true } : { name: name, version: version }); + } + } + if (model.subgraphs && Array.isArray(model.subgraphs)) { + const subgraphsLength = model.subgraphs.length; + for (let i = 0; i < subgraphsLength; i++) { + const subgraph = model.subgraphs[i]; + const name = subgraphsLength > 1 ? i.toString() : ''; + this._graphs.push(new tflite.Graph(metadata, format, subgraph, null, name, operators, model)); + } + } + break; + } + } + } + + get format() { + return this._format; + } + + get runtime() { + return this._runtime; + } + + get name() { + return this._name; + } + + get version() { + return this._version; + } + + get description() { + return this._description; + } + + get author() { + return this._author; + } + + get license() { + return this._license; + } + + get graphs() { + return this._graphs; + } +}; + +tflite.Graph = class { + + constructor(metadata, format, subgraph, subgraphMetadata, name, operators, model) { + this._nodes = []; + this._inputs = []; + this._outputs = []; + switch (format) { + default: { + this._name = subgraph.name() || name; + const args = []; + const tensorNames = []; + for (let i = 0; i < subgraph.tensorsLength(); i++) { + const tensor = subgraph.tensors(i); + const buffer = model.buffers(tensor.buffer()); + const is_variable = tensor.isVariable(); + const initializer = buffer.dataLength() > 0 || is_variable ? new tflite.Tensor(format, i, tensor, buffer, is_variable) : null; + args.push(new tflite.Argument(format, i, tensor, initializer)); + tensorNames.push(tensor.name()); + } + for (let i = 0; i < subgraph.operatorsLength(); i++) { + const node = subgraph.operators(i); + const index = node.opcodeIndex(); + const operator = index < operators.length ? operators[index] : { name: '(' + index.toString() + ')' }; + this._nodes.push(new tflite.Node(metadata, format, node, operator, i.toString(), args)); + } + const applyTensorMetadata = (argument, tensorMetadata) => { + if (tensorMetadata) { + const description = tensorMetadata.description(); + if (description) { + argument.description = description; + } + const content = tensorMetadata.content(); + if (argument.type && content) { + let denotation = null; + switch (content.contentPropertiesType()) { + case 1: { + denotation = 'Feature'; + break; + } + case 2: { + denotation = 'Image'; + const imageProperties = content.contentProperties(Reflect.construct(tflite.metadata_schema.ImageProperties, [])); + switch(imageProperties.colorSpace()) { + case 1: denotation += '(RGB)'; break; + case 2: denotation += '(Grayscale)'; break; + } + break; + } + case 3: { + denotation = 'BoundingBox'; + break; + } + } + if (denotation) { + argument.type.denotation = denotation; + } + } + } + }; + for (let i = 0; i < subgraph.inputsLength(); i++) { + const input = subgraph.inputs(i); + const argument = args[input]; + if (subgraphMetadata && i < subgraphMetadata.inputTensorMetadataLength()) { + applyTensorMetadata(argument, subgraphMetadata.inputTensorMetadata(i)); + } + this._inputs.push(new tflite.Parameter(tensorNames[input], true, [ argument ])); + } + for (let i = 0; i < subgraph.outputsLength(); i++) { + const output = subgraph.outputs(i); + const argument = args[output]; + if (subgraphMetadata && i < subgraphMetadata.outputTensorMetadataLength()) { + applyTensorMetadata(argument, subgraphMetadata.outputTensorMetadata(i)); + } + this._outputs.push(new tflite.Parameter(tensorNames[output], true, [ argument ])); + } + break; + } + case 'json': { + this._name = subgraph.name || ''; + const args = []; + const tensorNames = []; + if (subgraph.tensors && Array.isArray(subgraph.tensors)) { + for (let i = 0; i < subgraph.tensors.length; i++) { + const tensor = subgraph.tensors[i]; + const buffer = model.buffers[tensor.buffer]; + const is_variable = tensor.isVariable; + const initializer = buffer.data && buffer.data.length > 0 || is_variable ? new tflite.Tensor(format, i, tensor, buffer, is_variable) : null; + args.push(new tflite.Argument(format, i, tensor, initializer)); + tensorNames.push(tensor.name); + } + } + if (subgraph.operators && Array.isArray(subgraph.operators)) { + for (let i = 0; i < subgraph.operators.length; i++) { + const node = subgraph.operators[i]; + const index = node.opcode_index; + const operator = index < operators.length ? operators[index] : { name: '(' + index.toString() + ')' }; + this._nodes.push(new tflite.Node(metadata, format, node, operator, i.toString(), args)); + } + } + if (subgraph.inputs && Array.isArray(subgraph.inputs)) { + for (const input of subgraph.inputs) { + this._inputs.push(new tflite.Parameter(tensorNames[input], true, [ args[input] ])); + } + } + if (subgraph.outputs && Array.isArray(subgraph.outputs)) { + for (const output of subgraph.outputs) { + this._outputs.push(new tflite.Parameter(tensorNames[output], true, [ args[output] ])); + } + } + break; + } + } + } + + get name() { + return this._name; + } + + get groups() { + return false; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } +}; + +tflite.Node = class { + + constructor(metadata, format, node, type, location, args) { + this._metadata = metadata; + this._location = location; + this._type = type; + this._inputs = []; + this._outputs = []; + this._attributes = []; + if (node) { + let inputs = []; + let outputs = []; + switch (format) { + default: { + inputs = Array.from(node.inputsArray() || []); + outputs = Array.from(node.outputsArray() || []); + break; + } + case 'json': { + inputs = node.inputs && Array.isArray(node.inputs) ? node.inputs : []; + outputs = node.outputs && Array.isArray(node.outputs) ? node.outputs : []; + break; + } + } + const schema = this._metadata.type(this.type); + let inputIndex = 0; + while (inputIndex < inputs.length) { + let count = 1; + let inputName = null; + let inputVisible = true; + const inputArguments = []; + if (schema && schema.inputs && inputIndex < schema.inputs.length) { + const input = schema.inputs[inputIndex]; + inputName = input.name; + if (input.option == 'variadic') { + count = inputs.length - inputIndex; + } + if (Object.prototype.hasOwnProperty.call(input, 'visible') && !input.visible) { + inputVisible = false; + } + } + const inputArray = inputs.slice(inputIndex, inputIndex + count); + for (let j = 0; j < inputArray.length; j++) { + if (inputArray[j] != -1) { + inputArguments.push(args[inputArray[j]]); + } + } + inputIndex += count; + inputName = inputName ? inputName : inputIndex.toString(); + this._inputs.push(new tflite.Parameter(inputName, inputVisible, inputArguments)); + } + for (let k = 0; k < outputs.length; k++) { + const outputIndex = outputs[k]; + const argument = args[outputIndex]; + let outputName = k.toString(); + if (schema && schema.outputs && k < schema.outputs.length) { + const output = schema.outputs[k]; + if (output && (!output.option || output.opcodeIndex != 'variadic') && output.name) { + outputName = output.name; + } + } + this._outputs.push(new tflite.Parameter(outputName, true, [ argument ])); + } + switch (format) { + default: { + if (type.custom && node.customOptionsLength() > 0) { + const custom = Array.from(node.customOptionsArray() || []); + const schema = metadata.attribute(this.type, 'custom'); + this._attributes.push(new tflite.Attribute(schema, format, 'custom', custom)); + } + let optionsTypeName = this.type + 'Options'; + switch (this.type) { + case 'AveragePool2D': + case 'MaxPool2D': + optionsTypeName = 'Pool2DOptions'; + break; + case 'Mean': + case 'ReduceMax': + case 'ReduceMin': + case 'Sum': + optionsTypeName = 'ReducerOptions'; + break; + case 'Minimum': + case 'Maximum': + optionsTypeName = 'MaximumMinimumOptions'; + break; + } + const optionsType = tflite.schema[optionsTypeName] || null; + if (typeof optionsType === 'function') { + const options = node.builtinOptions(Reflect.construct(optionsType, [])); + if (options) { + const names = new Set(Object.keys(Object.getPrototypeOf(options)).filter((name) => name !== '__init')); + const arrayNames = new Set(); + for (const name of new Set(names)) { + if (names.has(name + 'Array') && names.has(name + 'Length')) { + names.delete(name + 'Array'); + names.delete(name + 'Length'); + arrayNames.add(name); + } + } + for (const name of names) { + if (options[name] && typeof options[name] == 'function') { + const value = arrayNames.has(name) ? Array.from(options[name + 'Array']() || []) : options[name](); + if (name === 'fusedActivationFunction' && value !== 0) { + const activationFunctionMap = { 1: 'Relu', 2: 'ReluN1To1', 3: 'Relu6', 4: 'Tanh', 5: 'SignBit' }; + if (!activationFunctionMap[value]) { + throw new tflite.Error("Unknown activation funtion index '" + JSON.stringify(value) + "'."); + } + const type = activationFunctionMap[value]; + this._chain = [ new tflite.Node(metadata, format, null, { name: type }, null, []) ]; + } + const schema = metadata.attribute(this.type, 'custom'); + this._attributes.push(new tflite.Attribute(schema, format, name, value)); + } + } + } + } + break; + } + case 'json': { + if (node.builtin_options && !Array.isArray(node.builtin_options)) { + if (type.custom && Array.isArray(type.custom)) { + const schema = metadata.attribute(this.type, 'custom'); + this._attributes.push(new tflite.Attribute(schema, format, 'custom', type.custom)); + } + for (const name of Object.keys(node.builtin_options)) { + const value = node.builtin_options[name]; + if (name === 'fused_activation_function' && value !== 'NONE') { + const activationFunctionMap = { 'RELU': 'Relu', 'RELU_N1_TO_1': 'ReluN1To1', 'RELU6': 'Relu6', 'TANH': 'Tanh', 'SIGN_BIT': 'SignBit' }; + if (!activationFunctionMap[value]) { + throw new tflite.Error("Unknown activation funtion index '" + JSON.stringify(value) + "'."); + } + const type = activationFunctionMap[value]; + this._chain = [ new tflite.Node(metadata, format, null, { name: type }, null, []) ]; + } + const schema = metadata.attribute(this.type, name); + this._attributes.push(new tflite.Attribute(schema, format, name, value)); + } + } + break; + } + } + } + } + + get type() { + return this._type.name; + } + + get name() { + return ''; + } + + get location() { + return this._location; + } + + get domain() { + return null; + } + + get metadata() { + if (this._type.custom) { + return { name: this.type, category: 'custom' }; + } + return this._metadata.type(this.type); + } + + get group() { + return null; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get chain() { + return this._chain; + } + + get attributes() { + return this._attributes; + } +}; + +tflite.Attribute = class { + + constructor(schema, format, name, value) { + this._type = null; + this._name = ''; + this._value = value; + const lower = name.toLowerCase(); + for (let i = 0; i < name.length; i++) { + this._name += (name[i] == lower[i]) ? name[i] : ('_' + lower[i]); + } + if (this._name == 'fused_activation_function') { + this._visible = false; + + } + if (schema) { + if (schema.type) { + this._type = schema.type; + } + if (this._type) { + switch (this._type) { + case 'shape': + this._value = new tflite.TensorShape(value); + break; + case 'TensorType': + this._value = tflite.Utility.dataType(format, this._value); + break; + default: + this._value = tflite.Utility.enum(this._type, this._value); + break; + } + } + if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) { + this._visible = false; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + value = this._value; + if (typeof value == 'function') { + value = value(); + } + if (value == schema.default) { + this._visible = false; + } + } + } + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +tflite.Parameter = class { + + constructor(name, visible, args) { + this._name = name; + this._visible = visible; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get arguments() { + return this._arguments; + } +}; + +tflite.Argument = class { + + constructor(format, index, tensor, initializer) { + this._location = index.toString(); + this._type = new tflite.TensorType(format, tensor); + this._initializer = initializer; + switch (format) { + default: { + this._name = tensor.name(); + const quantization = tensor.quantization(); + if (quantization) { + let value = 'q'; + const scale = (quantization.scaleLength() == 1) ? quantization.scale(0) : 0; + const zeroPoint = (quantization.zeroPointLength() == 1) ? quantization.zeroPoint(0).toFloat64() : 0; + if (scale != 0 || zeroPoint != 0) { + value = scale.toString() + ' * ' + (zeroPoint == 0 ? 'q' : ('(q - ' + zeroPoint.toString() + ')')); + } + if (quantization.minLength() == 1) { + value = quantization.min(0).toString() + ' \u2264 ' + value; + } + if (quantization.maxLength() == 1) { + value = value + ' \u2264 ' + quantization.max(0).toString(); + } + if (value != 'q') { + this._quantization = value; + } + } + break; + } + case 'json': { + this._name = tensor.name || ''; + break; + } + } + } + + get name() { + return this._name; + } + + get location() { + return this._location; + } + + get type() { + return this._type; + } + + get quantization() { + return this._quantization; + } + + set description(value) { + this._description = value; + } + + get description() { + return this._description; + } + + get initializer() { + return this._initializer; + } +}; + +tflite.Tensor = class { + + constructor(format, index, tensor, buffer, is_variable) { + this._location = index.toString(); + this._type = new tflite.TensorType(format, tensor); + this._is_variable = is_variable; + switch (format) { + default: { + this._name = tensor.name(); + this._data = buffer.dataLength() > 0 ? buffer.dataArray() || [] : null; + break; + } + case 'json': { + this._name = tensor.name || ''; + this._data = buffer.data && buffer.data.length > 0 ? new Uint8Array(buffer.data) : null; + break; + } + } + } + + get kind() { + return this._is_variable ? 'Variable' : ''; + } + + get name() { + return this._name; + } + + get location() { + return this._location; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state; + } + + get value() { + const context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + const context = this._context(); + if (context.state) { + return ''; + } + context.limit = 10000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + const context = {}; + context.state = null; + context.index = 0; + context.count = 0; + + if (this._data == null) { + context.state = 'Tensor data is empty.'; + return context; + } + + context.dataType = this._type.dataType; + context.shape = this._type.shape.dimensions; + context.data = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength); + + if (this._type.dataType == 'string') { + let offset = 0; + const count = context.data.getInt32(0, true); + offset += 4; + const offsetTable = []; + for (let j = 0; j < count; j++) { + offsetTable.push(context.data.getInt32(offset, true)); + offset += 4; + } + offsetTable.push(this._data.length); + const stringTable = []; + const utf8Decoder = new TextDecoder('utf-8'); + for (let k = 0; k < count; k++) { + const textArray = this._data.subarray(offsetTable[k], offsetTable[k + 1]); + stringTable.push(utf8Decoder.decode(textArray)); + } + context.data = stringTable; + } + return context; + } + + _decode(context, dimension) { + const shape = (context.shape.length == 0) ? [ 1 ] : context.shape; + const size = shape[dimension]; + const results = []; + if (dimension == shape.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + switch (context.dataType) { + case 'uint8': + results.push(context.data.getUint8(context.index)); + context.index += 1; + context.count++; + break; + case 'int8': + results.push(context.data.getInt8(context.index)); + context.index += 1; + context.count++; + break; + case 'int16': + results.push(context.data.getInt16(context.index)); + context.index += 2; + context.count++; + break; + case 'int32': + results.push(context.data.getInt32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'int64': + results.push(new long.Long(context.data.getUint32(context.index, true), context.data.getUint32(context.index + 4, true), false)); + context.index += 8; + context.count++; + break; + case 'float16': + results.push(context.data.getFloat16(context.index, true)); + context.index += 2; + context.count++; + break; + case 'float32': + results.push(context.data.getFloat32(context.index, true)); + context.index += 4; + context.count++; + break; + case 'float64': + results.push(context.data.getFloat64(context.index, true)); + context.index += 8; + context.count++; + break; + case 'string': + results.push(context.data[context.index++]); + context.count++; + break; + default: + break; + } + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + if (context.shape.length == 0) { + return results[0]; + } + return results; + } +}; + +tflite.TensorType = class { + + constructor(format, tensor) { + switch (format) { + default: { + this._dataType = tflite.Utility.dataType(format, tensor.type()); + this._shape = new tflite.TensorShape(Array.from(tensor.shapeArray() || [])); + break; + } + case 'json': { + this._dataType = tflite.Utility.dataType(format, tensor.type); + this._shape = new tflite.TensorShape(tensor.shape || []); + break; + } + } + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + set denotation(value) { + this._denotation = value; + } + + get denotation() { + return this._denotation; + } + + toString() { + return this.dataType + this._shape.toString(); + } +}; + +tflite.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (!this._dimensions || this._dimensions.length == 0) { + return ''; + } + return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']'; + } +}; + +tflite.Metadata = class { + + static open(host) { + if (tflite.Metadata._metadata) { + return Promise.resolve(tflite.Metadata._metadata); + } + return host.request(null, 'tflite-metadata.json', 'utf-8').then((data) => { + tflite.Metadata._metadata = new tflite.Metadata(data); + return tflite.Metadata._metadata; + }).catch(() => { + tflite.Metadata._metadata = new tflite.Metadata(null); + return tflite.Metadata._metadata; + }); + } + + constructor(data) { + this._map = new Map(); + if (data) { + const items = JSON.parse(data); + if (items) { + for (const item of items) { + item.schema.name = item.name; + this._map.set(item.name, item.schema); + } + } + } + } + + type(name) { + return this._map.has(name) ? this._map.get(name) : null; + } + + attribute(type, name) { + const schema = this.type(type); + if (schema) { + let attributeMap = schema.attributeMap; + if (!attributeMap) { + attributeMap = {}; + if (schema.attributes) { + for (const attribute of schema.attributes) { + attributeMap[attribute.name] = attribute; + } + } + schema.attributeMap = attributeMap; + } + const attributeSchema = attributeMap[name]; + if (attributeSchema) { + return attributeSchema; + } + } + return null; + } +}; + +tflite.Utility = class { + + static dataType(format, type) { + switch (format) { + default: { + if (!tflite.Utility._tensorTypeMap) { + tflite.Utility._tensorTypeMap = new Map(); + for (const name of Object.keys(tflite.schema.TensorType)) { + tflite.Utility._tensorTypeMap.set(tflite.schema.TensorType[name], name.toLowerCase()); + } + tflite.Utility._tensorTypeMap.set(6, 'boolean'); + } + return tflite.Utility._tensorTypeMap.has(type) ? tflite.Utility._tensorTypeMap.get(type) : '?'; + } + case 'json': { + switch (type) { + case 'BOOL': return 'boolean'; + default: return type.toLowerCase(); + } + } + } + } + + static enum(type, value) { + if (type && tflite.schema && tflite.schema[type]) { + if (!tflite.Utility._enumTypeMap) { + tflite.Utility._enumTypeMap = new Map(); + } + let typeMap = tflite.Utility._enumTypeMap.get(type); + if (!typeMap) { + typeMap = new Map(); + const enumType = tflite.schema[type]; + if (enumType) { + for (const key of Object.keys(enumType)) { + typeMap.set(enumType[key], key); + } + } + tflite.Utility._enumTypeMap.set(type, typeMap); + } + if (typeMap.has(value)) { + return typeMap.get(value); + } + } + return value; + } + + static type(name) { + const upperCase = new Set([ '2D', 'LSH', 'SVDF', 'RNN', 'L2', 'LSTM' ]); + if (name === 'BATCH_MATMUL') { + return "BatchMatMul"; + } + return name.split('_').map((s) => (s.length < 1 || upperCase.has(s)) ? s : s[0] + s.substring(1).toLowerCase()).join(''); + } +}; + +tflite.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading TensorFlow Lite model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = tflite.ModelFactory; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/torch-metadata.json b/frontend/packages/core/public/netron/torch-metadata.json new file mode 100644 index 00000000..bab13f8d --- /dev/null +++ b/frontend/packages/core/public/netron/torch-metadata.json @@ -0,0 +1,553 @@ +[ + { + "name": "nn.Linear", + "schema": { + "category": "Layer" + } + }, + { + "name": "nn.LinearNoBias", + "schema": { + "category": "Layer" + } + }, + { + "name": "nn.SpatialConvolution", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "benchmarked", "visible": false }, + { "name": "input_offset", "visible": false }, + { "name": "output_offset", "visible": false }, + { "name": "weight_offset", "visible": false }, + { "name": "groups", "default": 1 }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "padding", "default": 0 }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false }, + { "name": "convDescData", "visible": false }, + { "name": "autotunerHash", "visible": false }, + { "name": "fmode", "visible": false }, + { "name": "bwmode", "visible": false }, + { "name": "bdmode", "visible": false } + ] + } + }, + { + "name": "cudnn.SpatialConvolution", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "benchmarked", "visible": false }, + { "name": "input_offset", "visible": false }, + { "name": "output_offset", "visible": false }, + { "name": "weight_offset", "visible": false }, + { "name": "groups", "default": 1 }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "padding", "default": 0 }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false }, + { "name": "convDescData", "visible": false }, + { "name": "autotunerHash", "visible": false }, + { "name": "fmode", "visible": false }, + { "name": "bwmode", "visible": false }, + { "name": "bdmode", "visible": false } + ] + } + }, + { + "name": "nn.VolumetricConvolution", + "schema": { + "category": "Layer" + } + }, + { + "name": "nn.SpatialConvolutionMM", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "groups", "default": 1 }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "padding", "default": 0 }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false } + ] + } + }, + { + "name": "nn.SpatialFullConvolution", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "groups", "default": 1 }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "dilation", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "convDescData", "visible": false }, + { "name": "autotunerHash", "visible": false }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false }, + { "name": "input_offset", "visible": false }, + { "name": "output_offset", "visible": false }, + { "name": "weight_offset", "visible": false } + ] + } + }, + { + "name": "cudnn.SpatialFullConvolution", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "groups", "default": 1 }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "dilation", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "convDescData", "visible": false }, + { "name": "autotunerHash", "visible": false }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false }, + { "name": "input_offset", "visible": false }, + { "name": "output_offset", "visible": false }, + { "name": "weight_offset", "visible": false } + ] + } + }, + { + "name": "nn.SpatialDilatedConvolution", + "schema": { + "category": "Layer", + "attributes": [ + { "name": "d", "default": [ 1, 1 ] }, + { "name": "dilation", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "nInputPlane", "visible": false }, + { "name": "nOutputPlane", "visible": false } + ] + } + }, + { + "name": "nn.SpatialSubtractiveNormalization", + "schema": { + "category": "Normalization" + } + }, + { + "name": "nn.InstanceNormalization", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "nOutput", "visible": false }, + { "name": "prev_batch_size", "visible": false }, + { "name": "eps", "default": 0.00001 }, + { "name": "momentum", "default": 0.1 } + ] + } + }, + { + "name": "nn.BatchNormalization", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "momentum", "default": 0.1 }, + { "name": "eps", "default": 0.00001 } + ] + } + }, + { + "name": "cudnn.BatchNormalization", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "momentum", "default": 0.1 }, + { "name": "eps", "default": 0.00001 } + ] + } + }, + { + "name": "nn.SpatialBatchNormalization", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "momentum", "default": 0.1 }, + { "name": "eps", "default": 0.00001 }, + { "name": "mode", "default": "CUDNN_BATCHNORM_SPATIAL" }, + { "name": "nDim", "default": 4 }, + { "name": "__shareGradInputKey", "visible": false } + ] + } + }, + { + "name": "cudnn.SpatialBatchNormalization", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "momentum", "default": 0.1 }, + { "name": "eps", "default": 0.00001 }, + { "name": "mode", "default": "CUDNN_BATCHNORM_SPATIAL" }, + { "name": "nDim", "default": 4 }, + { "name": "__shareGradInputKey", "visible": false } + ] + } + }, + { + "name": "nn.VolumetricBatchNormalization", + "schema": { + "category": "Normalization", + "attributes": [ + { "name": "affine", "default": true }, + { "name": "momentum", "default": 0.1 }, + { "name": "eps", "default": 0.00001 } + ] + } + }, + { + "name": "nn.SpatialAveragePooling", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false }, + { "name": "mode", "default": "CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING" }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "count_include_pad", "visible": false } + ] + } + }, + { + "name": "cudnn.SpatialAveragePooling", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false }, + { "name": "mode", "default": "CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING" }, + { "name": "d", "default": [ 1, 1 ] }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "count_include_pad", "visible": false } + ] + } + }, + { + "name": "nn.VolumetricAveragePooling", + "schema": { + "category": "Pool" + } + }, + { + "name": "nn.SpatialMaxPooling", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false }, + { "name": "mode", "default": "CUDNN_POOLING_MAX" }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "iheight", "visible": false }, + { "name": "iwidth", "visible": false } + ] + } + }, + { + "name": "cudnn.SpatialMaxPooling", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false }, + { "name": "mode", "default": "CUDNN_POOLING_MAX" }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "iheight", "visible": false }, + { "name": "iwidth", "visible": false } + ] + } + }, + { + "name": "inn.SpatialMaxPooling", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false }, + { "name": "mode", "default": "CUDNN_POOLING_MAX" }, + { "name": "pad", "default": [ 0, 0 ] }, + { "name": "iheight", "visible": false }, + { "name": "iwidth", "visible": false } + ] + } + }, + { + "name": "nn.VolumetricMaxPooling", + "schema": { + "category": "Pool", + "attributes": [ + { "name": "ceil_mode", "default": false } + ] + } + }, + { + "name": "nn.SpatialFractionalMaxPooling", + "schema": { + "category": "Pool" + } + }, + { + "name": "nn.SpatialZeroPadding", + "schema": { + "category": "Tensor", + "attributes": [ + ] + } + }, + { + "name": "nn.SpatialReflectionPadding", + "schema": { + "category": "Tensor", + "attributes": [ + ] + } + }, + { + "name": "nn.SpatialReplicationPadding", + "schema": { + "category": "Tensor", + "attributes": [ + ] + } + }, + { + "name": "nn.Concat", + "schema": { + "category": "Tensor", + "attributes": [ + { "name": "outputSize", "visible": false } + ] + } + }, + { + "name": "nn.PReLU", + "schema": { + "category": "Activation" + } + }, + { + "name": "nn.ReLU", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "threshold", "default": 0 }, + { "name": "val", "default": 0 }, + { "name": "inplace", "default": false, "visible": false }, + { "name": "mode", "default": "CUDNN_ACTIVATION_RELU" }, + { "name": "nElem", "visible": false } + ] + } + }, + { + "name": "cudnn.ReLU", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "threshold", "default": 0 }, + { "name": "val", "default": 0 }, + { "name": "inplace", "default": false, "visible": false }, + { "name": "mode", "default": "CUDNN_ACTIVATION_RELU" }, + { "name": "nElem", "visible": false } + ] + } + }, + { + "name": "nn.SoftMax", + "schema": { + "category": "Activation" + } + }, + { + "name": "nn.LogSoftMax", + "schema": { + "category": "Activation" + } + }, + { + "name": "cudnn.LogSoftMax", + "schema": { + "category": "Activation" + } + }, + { + "name": "nn.Tanh", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "mode", "default": "CUDNN_ACTIVATION_TANH" }, + { "name": "nElem", "visible": false } + ] + } + }, + { + "name": "nn.LeakyReLU", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "negval", "default": 0.01, "visible": false }, + { "name": "inplace", "default": false, "visible": false } + ] + } + }, + { + "name": "nn.Sigmoid", + "schema": { + "category": "Activation", + "attributes": [ + { "name": "mode", "default": "CUDNN_ACTIVATION_SIGMOID" }, + { "name": "nElem", "visible": false } + ] + } + }, + { + "name": "nn.Reshape", + "schema": { + "category": "Shape", + "attributes": [ + { "name": "nelement", "visible": false } + ] + } + }, + { + "name": "nn.Dropout", + "schema": { + "category": "Dropout", + "attributes": [ + { "name": "v2", "visible": false } + ] + } + }, + { + "name": "nn.SpatialDropout", + "schema": { + "category": "Dropout" + } + }, + { + "name": "nn.Normalize", + "schema": { + "category": "Normalization", + "attributes": [ + ] + } + }, + { + "name": "nn.Normalize2", + "schema": { + "category": "Normalization", + "attributes": [ + ] + } + }, + { + "name": "nn.SpatialCrossMapLRN", + "schema": { + "category": "Normalization", + "attributes": [ + ] + } + }, + { + "name": "nn.Mean", + "schema": { + "attributes": [ + { "name": "squeeze", "default": true }, + { "name": "sizeAverage", "default": false }, + { "name": "dimension", "default": 1 }, + { "name": "nInputDims", "visible": false } + ] + } + }, + { + "name": "nn.MulConstant", + "schema": { + "attributes": [ + { "name": "inplace", "default": false, "visible": false } + ] + } + }, + { + "name": "nn.Identity", + "schema": { + } + }, + { + "name": "nn.ConcatTable", + "schema": { + } + }, + { + "name": "nn.CAddTable", + "schema": { + } + }, + { + "name": "nn.ScaleTable", + "schema": { + } + }, + { + "name": "nn.SelectTable", + "schema": { + } + }, + { + "name": "w2nn.ScaleTable", + "schema": { + } + }, + { + "name": "nn.SplitTable", + "schema": { + } + }, + { + "name": "nn.FlattenTable", + "schema": { + } + }, + { + "name": "nn.View", + "schema": { + } + }, + { + "name": "nn.Sequencer", + "schema": { + } + }, + { + "name": "nn.TotalVariation", + "schema": { + } + }, + { + "name": "nn.ShaveImage", + "schema": { + } + }, + { + "name": "nn.Contiguous", + "schema": { + } + }, + { + "name": "nn.Squeeze", + "schema": { + "category": "Transform" + } + }, + { + "name": "nn.MM", + "schema": { + } + } +] \ No newline at end of file diff --git a/frontend/packages/core/public/netron/torch.js b/frontend/packages/core/public/netron/torch.js new file mode 100644 index 00000000..bcff8095 --- /dev/null +++ b/frontend/packages/core/public/netron/torch.js @@ -0,0 +1,1239 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var torch = torch || {}; +var base = base || require('./base'); +var long = long || { Long: require('long') }; + +torch.ModelFactory = class { + + match(context) { + const extension = context.identifier.split('.').pop().toLowerCase(); + if (extension == 't7') { + const buffer = context.buffer; + if (buffer.length >= 1 && buffer[0] > 58) { + return false; + } + return true; + } + return false; + } + + open(context, host) { + return torch.Metadata.open(host).then((metadata) => { + const identifier = context.identifier; + try { + const reader = new torch.T7Reader(context.buffer, (name) => { + if (name && name != 'nn.JointTrainModule' && !name.startsWith('nn.MSDNet_') && !name.startsWith('onmt.')) { + host.exception(new torch.Error("Unknown type '" + name + "' in '" + identifier + "'."), false); + } + return null; + }); + let root = reader.read(); + if (root && Array.isArray(root) && root.length == 2 && root[0].__type__ && !root[1].__type__) { + root = root[0]; + } + return new torch.Model(metadata, root); + } + catch (error) { + const message = error && error.message ? error.message : error.toString(); + throw new torch.Error(message.replace(/\.$/, '') + " in '" + identifier + "'."); + } + }); + } +}; + +torch.Model = class { + + constructor(metadata, root) { + this._graphs = []; + this._graphs.push(new torch.Graph(metadata, root)); + } + + get graphs() { + return this._graphs; + } + + get format() { + return 'Torch v7'; + } +}; + +torch.Graph = class { + + constructor(metadata, root) { + this._inputs = []; + this._outputs = []; + this._nodes = []; + this._groups = 'false'; + + if (Object.prototype.hasOwnProperty.call(root, 'model')) { + root = root.model; + } + + let inputs = []; + let outputs = []; + this._loadModule(metadata, root, [], '', inputs, outputs); + + this._inputs = this._inputs.concat(inputs.map((input, index) => { + return new torch.Parameter('input' + (index != 0 ? (index + 1).toString() : ''), true, [ input ]); + })); + this._outputs = this._outputs.concat(outputs.map((output, index) => { + return new torch.Parameter('output' + (index != 0 ? (index + 1).toString() : ''), true, [ output ]); + })); + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + get nodes() { + return this._nodes; + } + + get groups() { + return this._groups; + } + + _loadModule(metadata, module, groups, key, inputs, outputs) { + if (groups.length > 0) { + this._groups = true; + } + switch (module.__type__) { + case 'nn.Sequential': { + groups.push(key); + let subInputs = inputs; + let subOutputs = []; + const length = module.modules.length; + let index = 0; + for (const subModule of module.modules) { + if (index == length - 1) { + subOutputs = outputs; + } + this._loadModule(metadata, subModule, groups, index.toString(), subInputs, subOutputs); + subInputs = subOutputs; + subOutputs = []; + index++; + } + groups.pop(); + break; + } + case 'nn.Parallel': + case 'nn.ParallelTable': + case 'nn.JointTrain': { + groups.push(key); + let newInputs = []; + let newOutputs = []; + let index = 0; + for (const subModule of module.modules) { + let subInputs = [].concat(inputs); + let subOutputs = [].concat(outputs); + this._loadModule(metadata, subModule, groups, index.toString(), subInputs, subOutputs); + if (inputs.length == 0) { + newInputs = newInputs.concat(subInputs); + } + if (outputs.length == 0) { + newOutputs = newOutputs.concat(subOutputs); + } + index++; + } + inputs = inputs.concat(newInputs); + for (const newOutput of newOutputs) { + outputs.push(newOutput); + } + groups.pop(); + break; + } + case 'nn.Concat': + case 'nn.ConcatTable': { + const prefix = key; + if (inputs.length == 0) { + inputs.push(new torch.Argument(groups.join('/') + ':' + key + ':in', null, null)); + } + let concatInputs = []; + let index = 0; + for (const subModule of module.modules) { + let streamInputs = inputs.map((input) => input); + let streamOutputs = []; + this._loadModule(metadata, subModule, groups, prefix + '.' + index.toString(), streamInputs, streamOutputs); + concatInputs = concatInputs.concat(streamOutputs); + index++; + } + delete module.modules; + delete module.dimension; + this._createNode(metadata, module, groups, key, concatInputs, outputs); + break; + } + case 'nn.Inception': { + delete module.modules; // TODO + delete module.module; // TODO + delete module.transfer; // TODO + delete module.pool; // TODO + this._createNode(metadata, module, groups, key, inputs, outputs); + break; + } + default: { + this._createNode(metadata, module, groups, key, inputs, outputs); + break; + } + } + } + + _createNode(metadata, module, group, subIndex, inputs, outputs) { + this._nodes.push(new torch.Node(metadata, module, group, subIndex, inputs, outputs)); + } +}; + +torch.Parameter = class { + + constructor(name, visible, args) { + this._name = name; + this._visible = visible; + this._arguments = args; + } + + get name() { + return this._name; + } + + get visible() { + return this._visible; + } + + get arguments() { + return this._arguments; + } +}; + +torch.Argument = class { + + constructor(name, type, initializer) { + if (typeof name !== 'string') { + throw new torch.Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + this._name = name; + this._type = type; + this._initializer = initializer; + } + + get name() { + return this._name; + } + + get type() { + if (this._initializer) { + return this._initializer.type; + } + return this._type; + } + + get initializer() { + return this._initializer; + } +}; + +torch.Node = class { + + constructor(metadata, module, groups, name, inputs, outputs) { + this._metadata = metadata; + this._group = groups.join('/'); + if (module.name && typeof module.name === 'string') { + this._name = module.name; + delete module.name; + } + else { + this._name = this._group ? (this._group + ':' + name) : name; + } + this._type = module.__type__ || 'nn.Module'; + let initializers = []; + for (const key of Object.keys(module)) { + const obj = module[key]; + if (obj && obj.__type__ && obj.__type__.startsWith('torch.') && obj.__type__.endsWith('Storage')) { + let array = []; + obj.reset(); + for (let i = 0; i < obj.size; i++) { + array.push(obj.read()); + } + module[key] = array; + } + } + delete module.iSize; + delete module.finput; + delete module.fgradInput; + delete module.output; + delete module.gradInput; + delete module.gradWeight; + delete module.gradBias; + delete module.grad_tmp; + delete module.scaleT; + delete module._input; + delete module._output; + delete module._gradInput; + delete module._gradOutput; + delete module.buffer; + delete module.buffer2; + delete module.tmp_in; + delete module.tmp_out; + delete module.accUpdateGradParameters; + switch (this._type) { + case 'nn.Linear': + delete module.addBuffer; + break; + case 'nn.Normalize': + case 'nn.Normalize2': + delete module.addBuffer; + delete module.normp; + delete module.norm; + break; + case 'cudnn.SpatialConvolution': + case 'cudnn.SpatialFullConvolution': + case 'nn.SpatialConvolution': + case 'nn.SpatialConvolutionMM': + case 'nn.SpatialDilatedConvolution': + case 'nn.SpatialFullConvolution': + delete module.ones; + delete module.input_slice; + delete module.output_slice; + delete module.convDescData; + this._updateSize(module, 'adj'); + this._updateSize(module, 'd'); + this._updateSize(module, 'dilation'); + this._updateSize(module, 'k'); + this._updateSize(module, 'pad'); + break; + case 'cudnn.BatchNormalization': + case 'cudnn.SpatialBatchNormalization': + case 'nn.BatchNormalization': + case 'nn.SpatialBatchNormalization': + case 'nn.InstanceNormalization': + delete module.save_mean; + delete module.save_std; + delete module.gradWeight; + delete module.normalized; + delete module.centered; + delete module.bn; // TODO InstanceNormalization + break; + case 'nn.SpatialCrossMapLRN': + delete module.scale; + break; + case 'cudnn.SpatialMaxPooling': + case 'cudnn.SpatialAveragePooling': + case 'inn.SpatialMaxPooling': + case 'nn.SpatialMaxPooling': + case 'nn.SpatialAveragePooling': + delete module.indices; + this._updateSize(module, 'pad'); + this._updateSize(module, 'd'); + this._updateSize(module, 'k'); + break; + case 'nn.SpatialZeroPadding': + case 'nn.SpatialReflectionPadding': + case 'nn.SpatialReplicationPadding': + this._updateBox(module, 'pad'); + break; + case 'nn.Dropout': + delete module.noise; + break; + case 'nn.gModule': + delete module.forwardnodes; + delete module.backwardnodes; + break; + case 'nn.StereoJoin': + delete module.output_L; + break; + } + this._attributes = []; + if (module.__type__) { + for (const key of Object.keys(module)) { + if (key == '__type__' || key == '_type') { + continue; + } + const obj = module[key]; + if (Array.isArray(obj) && obj.every(((item) => item && item.__type__ && item.__type__.startsWith('nn.')))) { + continue; + } + if (obj.__type__ && obj.__type__.startsWith('torch.') && obj.__type__.endsWith('Tensor')) { + initializers.push(new torch.Parameter(key, true, [ + new torch.Argument(key, null, new torch.Tensor(obj)) + ])); + continue; + } + if (key == 'modules' || (obj.__type__ && obj.__type__ != 'Function')) { + continue; + } + this._attributes.push(new torch.Attribute(this._metadata, this._type, key, obj)); + } + } + this._inputs = []; + if (inputs.length == 0 && this._name) { + inputs.push(new torch.Argument(this._name + ':in', null, null)); + } + this._inputs.push(new torch.Parameter('input', true, inputs)); + if (outputs.length == 0 && this._name) { + outputs.push(new torch.Argument(this._name, null, null)); + } + this._outputs = []; + this._outputs.push(new torch.Parameter('output', true, outputs)); + initializers = initializers.filter((argument) => { + if (argument.name == 'weight') { + this._inputs.push(argument); + return false; + } + return true; + }); + initializers = initializers.filter((argument) => { + if (argument.name == 'bias') { + this._inputs.push(argument); + return false; + } + return true; + }); + this._inputs = this._inputs.concat(initializers); + } + + get name() { + return this._name; + } + + get type() { + return this._type; + } + + get group() { + return this._group; + } + + get metadata() { + return this._metadata.type(this._type); + } + + get attributes() { + return this._attributes; + } + + get inputs() { + return this._inputs; + } + + get outputs() { + return this._outputs; + } + + _updateSize(module, name) { + if (Object.prototype.hasOwnProperty.call(module, name + 'W') && + Object.prototype.hasOwnProperty.call(module, name + 'H')) { + module[name] = [ module[name + 'W'], module[name + 'H'] ]; + delete module[name + 'W']; + delete module[name + 'H']; + } + } + + _updateBox(module, name) { + if (Object.prototype.hasOwnProperty.call(module, name + '_t') && + Object.prototype.hasOwnProperty.call(module, name + '_r') && + Object.prototype.hasOwnProperty.call(module, name + '_b') && + Object.prototype.hasOwnProperty.call(module, name + '_l')) { + module[name] = [ module[name + '_t'], module[name + '_r'], module[name + '_b'], module[name + '_l'] ]; + delete module[name + '_t']; + delete module[name + '_r']; + delete module[name + '_b']; + delete module[name + '_l']; + } + } +}; + +torch.Attribute = class { + + constructor(metadata, type, name, value) { + this._name = name; + this._value = value; + if (name == 'train') { + this._visible = false; + } + const schema = metadata.attribute(type, name); + if (schema) { + if (Object.prototype.hasOwnProperty.call(schema, 'visible')) { + this._visible = schema.visible; + } + else if (Object.prototype.hasOwnProperty.call(schema, 'default')) { + if (JSON.stringify(schema.default) == JSON.stringify(this._value)) { + this._visible = false; + } + } + } + } + + get name() { + return this._name; + } + + get value() { + return this._value; + } + + get visible() { + return this._visible == false ? false : true; + } +}; + +torch.Tensor = class { + + constructor(tensor) { + this._type = new torch.TensorType(tensor); + this._storage = tensor.storage; + } + + get type() { + return this._type; + } + + get state() { + return this._context().state || null; + } + + get value() { + let context = this._context(); + if (context.state) { + return null; + } + context.limit = Number.MAX_SAFE_INTEGER; + return this._decode(context, 0); + } + + toString() { + let context = this._context(); + if (context.state) { + return ''; + } + context.limit = 1000; + const value = this._decode(context, 0); + return JSON.stringify(value, null, 4); + } + + _context() { + let context = {}; + context.state = null; + context.index = 0; + context.count = 0; + if (!this._storage || !this._storage.reader) { + context.state = 'Tensor data is empty.'; + return context; + } + switch (this._type.dataType) { + case 'uint8': + case 'int8': + case 'int16': + case 'int32': + case 'int64': + case 'float32': + case 'float64': + break; + default: + context.state = 'Tensor data type is not implemented.'; + break; + } + context.dimensions = this._type.shape.dimensions; + if (!context.dimensions && context.dimensions.length == 0) { + context.state = 'Tensor has no dimensions.'; + return context; + } + context.storage = this._storage; + context.storage.reset(); + return context; + } + + _decode(context, dimension) { + let results = []; + const size = context.dimensions[dimension]; + if (dimension == context.dimensions.length - 1) { + for (let i = 0; i < size; i++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(context.storage.read()); + context.index++; + context.count++; + } + } + else { + for (let j = 0; j < size; j++) { + if (context.count > context.limit) { + results.push('...'); + return results; + } + results.push(this._decode(context, dimension + 1)); + } + } + return results; + } +}; + +torch.TensorType = class { + + constructor(tensor) { + this._dataType = tensor.dataType; + this._shape = new torch.TensorShape(tensor.size); + } + + get dataType() { + return this._dataType; + } + + get shape() { + return this._shape; + } + + toString() { + return (this.dataType || '?') + this._shape.toString(); + } +}; + +torch.TensorShape = class { + + constructor(dimensions) { + this._dimensions = dimensions; + } + + get dimensions() { + return this._dimensions; + } + + toString() { + if (this._dimensions) { + if (this._dimensions.length == 0) { + return ''; + } + return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']'; + } + return ''; + } +}; + + +torch.Metadata = class { + + static open(host) { + if (torch.Metadata._metadata) { + return Promise.resolve(torch.Metadata._metadata); + } + return host.request(null, 'torch-metadata.json', 'utf-8').then((data) => { + torch.Metadata._metadata = new torch.Metadata(data); + return torch.Metadata._metadata; + }).catch(() => { + torch.Metadata._metadata = new torch.Metadata(null); + return torch.Metadata._metadata; + }); + } + + constructor(data) { + this._map = {}; + this._attributeCache = {}; + if (data) { + let items = JSON.parse(data); + if (items) { + for (const item of items) { + item.schema.name = item.name; + this._map[item.name] = item.schema; + } + } + } + } + + type(name) { + return this._map[name] || null; + } + + attribute(type, name) { + let map = this._attributeCache[type]; + if (!map) { + map = {}; + const schema = this.type(type); + if (schema && schema.attributes && schema.attributes.length > 0) { + for (const attribute of schema.attributes) { + map[attribute.name] = attribute; + } + } + this._attributeCache[type] = map; + } + return map[name] || null; + } +}; + +torch.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading Torch model.'; + } +}; + +torch.T7Reader = class { + + constructor(buffer, callback) { + this._callback = callback; + this._memo = new Map(); + + this._registry = {}; + this._registry['bnn.Binary'] = function(reader) { reader.nn(this); }; + this._registry['bnn.SpatialConvolution'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.BatchNormalization'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.ReLU'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.Sigmoid'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.SoftMax'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.LogSoftMax'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.SpatialAveragePooling'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.SpatialBatchNormalization'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.SpatialConvolution'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.SpatialFullConvolution'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.SpatialMaxPooling'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.SpatialSoftMax'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.Tanh'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.VolumetricAveragePooling'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.VolumetricBatchNormalization'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.VolumetricConvolution'] = function(reader) { reader.nn(this); }; + this._registry['cudnn.VolumetricMaxPooling'] = function(reader) { reader.nn(this); }; + this._registry['Dict'] = function(reader) { reader.nn(this); }; + this._registry['inn.ConstAffine'] = function(reader) { reader.nn(this); }; + this._registry['inn.SpatialMaxPooling'] = function(reader) { reader.nn(this); }; + this._registry['nn.Abs'] = function(reader) { reader.nn(this); }; + this._registry['nn.AddConstant'] = function(reader) { reader.nn(this); }; + this._registry['nn.BatchNormalization'] = function(reader) { reader.nn(this); }; + this._registry['nn.BilinearSamplerBHWD'] = function(reader) { reader.nn(this); }; + this._registry['nn.BinActiveZ'] = function(reader) { reader.nn(this); }; // allenai/XNOR-Net + this._registry['nn.BCECriterion'] = function(reader) { reader.nn(this); }; + this._registry['nn.CMul'] = function(reader) { reader.nn(this); }; + this._registry['nn.CAddTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.CDivTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.CMulTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.CSubTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.Concat'] = function(reader) { reader.nn(this); }; + this._registry['nn.Copy'] = function(reader) { reader.nn(this); }; + this._registry['nn.ConcatTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.Contiguous'] = function(reader) { reader.nn(this); }; + this._registry['nn.Constant'] = function(reader) { reader.nn(this); }; + this._registry['nn.CostVolMulti'] = function(reader) { reader.nn(this); }; + this._registry['nn.DepthConcat'] = function(reader) { reader.nn(this); }; + this._registry['nn.Dropout'] = function(reader) { reader.nn(this); }; + this._registry['nn.Exp'] = function(reader) { reader.nn(this); }; + this._registry['nn.ExpOut'] = function(reader) { reader.nn(this); }; + this._registry['nn.FlattenTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.GenNoise'] = function(reader) { reader.nn(this); }; + this._registry['nn.Identity'] = function(reader) { reader.nn(this); }; + this._registry['nn.Index'] = function(reader) { reader.nn(this); }; + this._registry['nn.Inception'] = function(reader) { reader.nn(this); }; + this._registry['nn.InstanceNormalization'] = function(reader) { reader.nn(this); }; + this._registry['nn.JoinTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.JointTrain'] = function(reader) { reader.nn(this); }; + this._registry['nn.KeypointCoordinate'] = function(reader) { reader.nn(this); }; + this._registry['nn.LeakyReLU'] = function(reader) { reader.nn(this); }; + this._registry['nn.Linear'] = function(reader) { reader.nn(this); }; + this._registry['nn.LinearNoBias'] = function(reader) { reader.nn(this); }; + this._registry['nn.LogSoftMax'] = function(reader) { reader.nn(this); }; + this._registry['nn.LookupTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.LSTM'] = function(reader) { reader.nn(this); }; + this._registry['nn.MaskZero'] = function(reader) { reader.nn(this); }; + this._registry['nn.MapTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.Max'] = function(reader) { reader.nn(this); }; + this._registry['nn.Mean'] = function(reader) { reader.nn(this); }; + this._registry['nn.Min'] = function(reader) { reader.nn(this); }; + this._registry['nn.MulConstant'] = function(reader) { reader.nn(this); }; + this._registry['nn.MM'] = function(reader) { reader.nn(this); }; + this._registry['nn.MSECriterion'] = function(reader) { reader.nn(this); }; + this._registry['nn.Narrow'] = function(reader) { reader.nn(this); }; + this._registry['nn.NarrowTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.Normalize'] = function(reader) { reader.nn(this); }; + this._registry['nn.Normalize2'] = function(reader) { reader.nn(this); }; + this._registry['nn.NoiseFill'] = function(reader) { reader.nn(this); }; + this._registry['nn.Padding'] = function(reader) { reader.nn(this); }; + this._registry['nn.Parallel'] = function(reader) { reader.nn(this); }; + this._registry['nn.ParallelCriterion'] = function(reader) { reader.nn(this); }; + this._registry['nn.ParallelTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.PixelShuffle'] = function(reader) { reader.nn(this); }; + this._registry['nn.Power'] = function(reader) { reader.nn(this); }; + this._registry['nn.PReLU'] = function(reader) { reader.nn(this); }; + this._registry['nn.Recursor'] = function(reader) { reader.nn(this); }; + this._registry['nn.ReLU'] = function(reader) { reader.nn(this); }; + this._registry['nn.Replicate'] = function(reader) { reader.nn(this); }; + this._registry['nn.Reshape'] = function(reader) { reader.nn(this); }; + this._registry['nn.ShaveImage'] = function(reader) { reader.nn(this); }; + this._registry['nn.Select'] = function(reader) { reader.nn(this); }; + this._registry['nn.SelectTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.Sequencer'] = function(reader) { reader.nn(this); }; + this._registry['nn.Sequential'] = function(reader) { reader.nn(this); }; + this._registry['nn.Sigmoid'] = function(reader) { reader.nn(this); }; + this._registry['nn.Sum'] = function(reader) { reader.nn(this); }; + this._registry['nn.SoftMax'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialAveragePooling'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialBatchNormalization'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialConvolution'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialConvolutionMM'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialCrossMapLRN'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialDilatedConvolution'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialDropout'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialFractionalMaxPooling'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialFullConvolution'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialLPPooling'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialMaxPooling'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialReflectionPadding'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialReplicationPadding'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialSoftMax'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialSubtractiveNormalization'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialUpSamplingBilinear'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialUpSamplingNearest'] = function(reader) { reader.nn(this); }; + this._registry['nn.SpatialZeroPadding'] = function(reader) { reader.nn(this); }; + this._registry['nn.SplitTable'] = function(reader) { reader.nn(this); }; + this._registry['nn.Squeeze'] = function(reader) { reader.nn(this); }; + this._registry['nn.Square'] = function(reader) { reader.nn(this); }; + this._registry['nn.Sqrt'] = function(reader) { reader.nn(this); }; + this._registry['nn.StereoJoin'] = function(reader) { reader.nn(this); }; + this._registry['nn.Tanh'] = function(reader) { reader.nn(this); }; + this._registry['nn.Transpose'] = function(reader) { reader.nn(this); }; + this._registry['nn.TotalVariation'] = function(reader) { reader.nn(this); }; + this._registry['nn.Unpool'] = function(reader) { reader.nn(this); }; + this._registry['nn.View'] = function(reader) { reader.nn(this); }; + this._registry['nn.gModule'] = function(reader) { reader.nn(this); }; + this._registry['nngraph.Node'] = function(reader) { reader.nn(this); }; + this._registry['graph.Edge'] = function(reader) { reader.nn(this); }; + this._registry['graph.Graph'] = function(reader) { reader.nn(this); }; + this._registry['torch.ByteTensor'] = function(reader) { reader.tensor(this, 'uint8'); }; + this._registry['torch.CharTensor'] = function(reader) { reader.tensor(this, 'int8'); }; + this._registry['torch.ShortTensor'] = function(reader) { reader.tensor(this, 'int16'); }; + this._registry['torch.IntTensor'] = function(reader) { reader.tensor(this, 'int32'); }; + this._registry['torch.LongTensor'] = function(reader) { reader.tensor(this, 'int64'); }; + this._registry['torch.FloatTensor'] = function(reader) { reader.tensor(this, 'float32'); }; + this._registry['torch.DoubleTensor'] = function(reader) { reader.tensor(this, 'float64'); }; + this._registry['torch.CudaByteTensor'] = function(reader) { reader.tensor(this, 'uint8'); }; + this._registry['torch.CudaCharTensor'] = function(reader) { reader.tensor(this, 'int8'); }; + this._registry['torch.CudaShortTensor'] = function(reader) { reader.tensor(this, 'int16'); }; + this._registry['torch.CudaIntTensor'] = function(reader) { reader.tensor(this, 'int32'); }; + this._registry['torch.CudaLongTensor'] = function(reader) { reader.tensor(this, 'int64'); }; + this._registry['torch.CudaTensor'] = function(reader) { reader.tensor(this, 'float32'); }; + this._registry['torch.CudaDoubleTensor'] = function(reader) { reader.tensor(this, 'float64'); }; + this._registry['torch.ByteStorage'] = function(reader) { reader.storage(this, 'uint8', 1); }; + this._registry['torch.CharStorage'] = function(reader) { reader.storage(this, 'int8', 1); }; + this._registry['torch.ShortStorage'] = function(reader) { reader.storage(this, 'int16', 2); }; + this._registry['torch.IntStorage'] = function(reader) { reader.storage(this, 'int32', 4); }; + this._registry['torch.LongStorage'] = function(reader) { reader.storage(this, 'int64', 8); }; + this._registry['torch.FloatStorage'] = function(reader) { reader.storage(this, 'float32', 4); }; + this._registry['torch.DoubleStorage'] = function(reader) { reader.storage(this, 'float64', 8); }; + this._registry['torch.CudaByteStorage'] = function(reader) { reader.storage(this, 'uint8', 1); }; + this._registry['torch.CudaCharStorage'] = function(reader) { reader.storage(this, 'int8', 1); }; + this._registry['torch.CudaShortStorage'] = function(reader) { reader.storage(this, 'int16', 2); }; + this._registry['torch.CudaIntStorage'] = function(reader) { reader.storage(this, 'int32', 4); }; + this._registry['torch.CudaLongStorage'] = function(reader) { reader.storage(this, 'int64', 8); }; + this._registry['torch.CudaIntStorage'] = function(reader) { reader.storage(this, 'int32', 4); }; + this._registry['torch.CudaStorage'] = function(reader) { reader.storage(this, 'float32', 4); }; + this._registry['torch.CudaFloatStorage'] = function(reader) { reader.storage(this, 'float64', 8); }; + this._registry['w2nn.AuxiliaryLossTable'] = function(reader) { reader.nn(this); }; + this._registry['w2nn.InplaceClip01'] = function(reader) { reader.nn(this); }; + this._registry['w2nn.ScaleTable'] = function(reader) { reader.nn(this); }; + + if (buffer.length == 0) { + throw new torch.Error('File is empty.'); + } + if (buffer[0] <= 8) { + this._reader = new torch.BinaryReader(buffer); + } + else { + this._reader = new torch.TextReader(buffer); + this._reader.int32(); + this._reader.reset(); + } + } + + read() { + const type = this.int32(); + switch (type) { + case 0: return null; + case 1: return this.float64(); + case 2: return this.string(); + case 3: return this.table(); + case 4: return this.object(); + case 5: return this.boolean(); + case 6: return this.function(); + case 7: return this.function(); + case 8: return this.function(); + default: throw new torch.Error("File format has invalid type '" + type + "'."); + } + } + + boolean() { + return this._reader.boolean(); + } + + bytes(size) { + return this._reader.bytes(size); + } + + int32() { + return this._reader.int32(); + } + + int64() { + return this._reader.int64(); + } + + int64s(size) { + return this._reader.int64s(size); + } + + float64() { + return this._reader.float64(); + } + + string() { + return this._reader.string(); + } + + object() { + let index = this.int32(); + if (this._memo.has(index)) { + return this._memo.get(index); + } + + let version = this.string(); + let name = null; + if (version.startsWith('V ')) { + name = this.string(); + version = Number(version.split(' ')[1]); + } + else { + name = version; + version = 0; + } + + let obj = { __type__: name }; + this._memo.set(index, obj); + + let constructor = this._registry[name]; + if (constructor) { + constructor.apply(obj, [ this, version ]); + } + else { + constructor = this._callback(name); + if (constructor) { + constructor.apply(obj, [ this, version ]); + } + this.nn(obj); + } + return obj; + } + + table() { + const index = this.int32(); + if (this._memo.has(index)) { + return this._memo.get(index); + } + let table = {}; + this._memo.set(index, table); + const size = this.int32(); + let convert = true; + let sum = 0; + for (let i = 0; i < size; i++) { + let key = this.read(); + let value = this.read(); + table[key] = value; + if (Number.isInteger(key) && key >= 0) { + sum += key; + } + else { + convert = false; + } + } + let n = Object.keys(table).length; + if (convert && (n * (n + 1)) == (2 * sum)) { + let list = []; + for (let j = 0; j < n; j++) { + let item = table[j + 1]; + if (item == table) { + item = list; + } + list.push(item); + } + this._memo.set(index, list); + return list; + } + return table; + } + + function() { + const index = this.int32(); + if (this._memo.has(index)) { + return this._memo.get(index); + } + const size = this.int32(); + const dumped = this.bytes(size); + const upvalues = this.read(); + const func = { __type__: 'Function', size: size, dumped: dumped, upvalues: upvalues }; + this._memo.set(index, func); + return func; + } + + nn(obj) { + const attributes = this.read(); + if (attributes != null) { + for (const key of Object.keys(attributes)) { + obj[key] = attributes[key]; + } + } + } + + tensor(obj, dataType) { + const dim = this.int32(); + obj.dataType = dataType; + obj.size = this.int64s(dim); + obj.stride = this.int64s(dim); + obj.storage_offset = this.int64() - 1; + obj.storage = this.read(); + } + + storage(obj, dataType, itemSize) { + obj.dataType = dataType; + obj.itemSize = itemSize; + obj.size = this.int64(); + obj.reader = this._reader.storage(obj.size, obj.itemSize, dataType); + obj.reset = function() { + this.reader.reset(); + }; + obj.read = function() { + switch (dataType) { + case 'uint8': + return this.reader.byte(); + case 'int8': + return this.reader.int8(); + case 'int16': + return this.reader.int16(); + case 'int32': + return this.reader.int32(); + case 'int64': + return this.reader.int64(); + case 'float32': + return this.reader.float32(); + case 'float64': + return this.reader.float64(); + } + return null; + }; + } +}; + +torch.BinaryReader = class { + + constructor(buffer) { + this._buffer = buffer; + this._dataView = new DataView(this._buffer.buffer, this._buffer.byteOffset, this._buffer.byteLength); + this._position = 0; + this._textDecoder = new TextDecoder('ascii'); + } + + reset() { + this._position = 0; + } + + skip(offset) { + this._position += offset; + if (this._position > this._buffer.length) { + throw new torch.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.'); + } + } + + boolean() { + return this.int32() == 1; + } + + bytes(length) { + const position = this._position; + this.skip(length); + return this._buffer.subarray(position, this._position); + } + + int8() { + const position = this._position; + this.skip(1); + return this._dataView.getInt8(position, true); + } + + int16() { + const position = this._position; + this.skip(2); + return this._dataView.getInt16(position, true); + } + + int32() { + const position = this._position; + this.skip(4); + return this._dataView.getInt32(position, true); + } + + int64() { + const position = this._position; + this.skip(8); + const lo = this._dataView.getUint32(position, true); + const hi = this._dataView.getUint32(position + 4, true); + return new long.Long(lo, hi, false).toNumber(); + } + + int64s(size) { + let array = []; + for (let i = 0; i < size; i++) { + array.push(this.int64()); + } + return array; + } + + float32() { + const position = this._position; + this.skip(4); + return this._dataView.getFloat32(position, true); + } + + float64() { + const position = this._position; + this.skip(8); + return this._dataView.getFloat64(position, true); + } + + string() { + return this._textDecoder.decode(this.bytes(this.int32())); + } + + storage(size, itemSize) { + return new torch.BinaryReader(this.bytes(size * itemSize)); + } +}; + +torch.TextReader = class { + + constructor(buffer, separator) { + this._buffer = buffer; + this._position = 0; + this._dataView = new DataView(this._buffer.buffer, this._buffer.byteOffset, this._buffer.byteLength); + this._textDecoder = new TextDecoder('ascii'); + this._separator = separator || 0x0a; + } + + reset() { + this._position = 0; + } + + line(size) { + const start = this._position; + while (this._position < this._buffer.length && size > -1) { + const c = this._buffer[this._position++]; + if (c == this._separator) { + return this._buffer.slice(start, this._position - 1); + } + else if (this._position == this._buffer.length) { + return this._buffer.slice(start, this._position); + } + size--; + } + throw new torch.Error('Line exceeded maximum length.'); + } + + boolean() { + return this.int32() == 1; + } + + bytes(size) { + return this.line(size); + } + + int8() { + return this.int64(); + } + + int16() { + return this.int64(); + } + + int32() { + return this.int64(); + } + + int64() { + const token = this._textDecoder.decode(this.line(20)); + const number = Number.parseInt(token, 10); + if (Number.isNaN(token - number)) { + throw new torch.Error("Couldn't parse int64 '" + token + "'."); + } + return number; + } + + int64s(size) { + let array = []; + if (size > 0) { + const text = this._textDecoder.decode(this.line(Number.MAX_SAFE_INTEGER)); + for (const token of text.split(' ')) { + const number = Number.parseInt(token, 10); + if (Number.isNaN(token - number)) { + throw new torch.Error("Couldn't parse int64 '" + token + "'."); + } + array.push(number); + } + } + return array; + } + + float32() { + return this.float64(); + } + + float64() { + const token = this._textDecoder.decode(this.line(24)); + if (token.startsWith('-nan')) { + return -NaN; + } + if (token.startsWith('nan')) { + return NaN; + } + if (token.startsWith('inf')) { + return Infinity; + } + if (token.startsWith('-inf')) { + return -Infinity; + } + const number = Number.parseFloat(token); + if (Number.isNaN(token - number)) { + throw new torch.Error("Couldn't parse float '" + token + "'."); + } + return number; + } + + string() { + const size = this.int32(); + if (size == 0) { + return ''; + } + const data = this.line(size); + const text = this._textDecoder.decode(data); + if (size != text.length) { + throw torch.Error('Invalid text length.'); + } + return text; + } + + storage(size, itemSize, dataType) { + if (size <= 0) { + throw new torch.Error("Unsupported storage size '" + size + "'."); + } + if (dataType === 'uint8') { + const start = this._position; + this._position += size; + const bytes = this._buffer.slice(start, this._position); + this.line(0); + return new torch.BinaryReader(bytes); + } + const data = this.line(Number.MAX_SAFE_INTEGER); + return new torch.TextReader(data, 0x20); + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = torch.ModelFactory; +} diff --git a/frontend/packages/core/public/netron/view-grapher.css b/frontend/packages/core/public/netron/view-grapher.css new file mode 100644 index 00000000..ef1611a0 --- /dev/null +++ b/frontend/packages/core/public/netron/view-grapher.css @@ -0,0 +1,165 @@ +path { + stroke: #666; + stroke-width: 1px; + fill: none; +} +line { + stroke: #666; + stroke-width: 1px; + fill: #666; +} +text { + font-family: -apple-system, BlinkMacSystemFont, "Segoe WPC", "Segoe UI", "Ubuntu", "Droid Sans", sans-serif, "PingFang SC"; + font-size: 11px; + text-rendering: geometricPrecision; + fill: #000; +} + +.node-item path { + fill: #fff; +} + +.node-item-function path { + fill: #9BB9E8; + fill-opacity: 0.7; +} +.node-item-type path { + fill: #8BB8FF; + fill-opacity: 0.9; +} +.node-item-type:hover { + cursor: pointer; +} + +.node-item-type-constant path { + fill: #B4CCB7; +} +.node-item-type-control path { + fill: #A8E9B8; +} +.node-item-type-layer path { + fill: #DB989A; + fill-opacity: 0.7; +} +.node-item-type-wrapper path { + fill: #6DCDE4; + fill-opacity: 0.7; +} +.node-item-type-activation path { + fill: #93C2CA; + fill-opacity: 0.7; +} +.node-item-type-pool path { + fill: #DE7CCE; + fill-opacity: 0.7; +} +.node-item-type-normalization path { + fill: #DA96BC; + fill-opacity: 0.7; +} +.node-item-type-dropout path { + fill: #309E51; + fill-opacity: 0.7; +} +.node-item-type-shape path { + fill: #D6C482; + fill-opacity: 0.7; +} +.node-item-type-tensor path { + fill: #6D7CE4; + fill-opacity: 0.7; +} +.node-item-type-transform path { + fill: #CDCB74; +} +.node-item-type-data path { + fill: #2576AD; + fill-opacity: 0.7; +} +.node-item-type-custom path { + fill: #E46D6D; + fill-opacity: 0.7; +} + +.node-item-input path { + fill: #fff; +} +.node-item-input:hover { + cursor: pointer; +} + +.node-item-constant path { + fill: #eee; +} +.node-item-constant:hover { + cursor: pointer; +} + +.node-item-undefined path { + fill: #CA5353; + fill-opacity: 0.7; +} +.node-item-undefined:hover { + cursor: pointer; +} + +.node-attribute:hover { + cursor: pointer; +} +.node-attribute:hover line { + cursor: pointer; +} +.node-attribute text { + font-size: 9px; + font-weight: normal; +} +.node-attribute path { + fill: #fff; + stroke-width: 0; +} + +.graph-item-input path { + fill: #E49D6D; + fill-opacity: 0.7; +} +.graph-item-input:hover { + cursor: pointer; +} + +.graph-item-output path { + fill: #E4E06D; + fill-opacity: 0.9; +} +.graph-item-output:hover { + cursor: pointer; +} + +.edge-label text { + font-size: 10px; +} +.edge-path { + stroke: #666; + stroke-width: 1px; + fill: none; +} +#arrowhead-vee path { + fill: #666; +} +.edge-path-control-dependency { + stroke-dasharray: 3, 2; +} + +.cluster rect { + stroke: #000; + fill: #000; + fill-opacity: 0.02; + stroke-opacity: 0.06; + stroke-width: 1px; +} + +.select .node.border { + stroke: #1527C2; +} +.select.edge-path { + stroke: #1527C2; +} diff --git a/frontend/packages/core/public/netron/view-grapher.js b/frontend/packages/core/public/netron/view-grapher.js new file mode 100644 index 00000000..0c3737f9 --- /dev/null +++ b/frontend/packages/core/public/netron/view-grapher.js @@ -0,0 +1,646 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var grapher = grapher || {}; +var dagre = dagre || require('dagre'); + +grapher.Renderer = class { + + constructor(document, svgElement) { + this._document = document; + this._svgElement = svgElement; + } + + render(graph) { + + let svgClusterGroup = this.createElement('g'); + svgClusterGroup.setAttribute('id', 'clusters'); + svgClusterGroup.setAttribute('class', 'clusters'); + this._svgElement.appendChild(svgClusterGroup); + + let svgEdgePathGroup = this.createElement('g'); + svgEdgePathGroup.setAttribute('id', 'edge-paths'); + svgEdgePathGroup.setAttribute('class', 'edge-paths'); + this._svgElement.appendChild(svgEdgePathGroup); + + let svgEdgeLabelGroup = this.createElement('g'); + svgEdgeLabelGroup.setAttribute('id', 'edge-labels'); + svgEdgeLabelGroup.setAttribute('class', 'edge-labels'); + this._svgElement.appendChild(svgEdgeLabelGroup); + + let svgNodeGroup = this.createElement('g'); + svgNodeGroup.setAttribute('id', 'nodes'); + svgNodeGroup.setAttribute('class', 'nodes'); + this._svgElement.appendChild(svgNodeGroup); + + for (const nodeId of graph.nodes()) { + if (graph.children(nodeId).length == 0) { + const node = graph.node(nodeId); + const element = this.createElement('g'); + if (node.id) { + element.setAttribute('id', node.id); + } + element.setAttribute('class', Object.prototype.hasOwnProperty.call(node, 'class') ? ('node ' + node.class) : 'node'); + element.style.opacity = 0; + const container = this.createElement('g'); + container.appendChild(node.label); + element.appendChild(container); + svgNodeGroup.appendChild(element); + const nodeBox = node.label.getBBox(); + const nodeX = - nodeBox.width / 2; + const nodeY = - nodeBox.height / 2; + container.setAttribute('transform', 'translate(' + nodeX + ',' + nodeY + ')'); + node.width = nodeBox.width; + node.height = nodeBox.height; + node.element = element; + } + } + + for (const edgeId of graph.edges()) { + const edge = graph.edge(edgeId); + if (edge.label) { + let tspan = this.createElement('tspan'); + tspan.setAttribute('xml:space', 'preserve'); + tspan.setAttribute('dy', '1em'); + tspan.setAttribute('x', '1'); + tspan.appendChild(this._document.createTextNode(edge.label)); + const text = this.createElement('text'); + text.appendChild(tspan); + const textContainer = this.createElement('g'); + textContainer.appendChild(text); + const labelElement = this.createElement('g'); + labelElement.style.opacity = 0; + labelElement.setAttribute('class', 'edge-label'); + labelElement.appendChild(textContainer); + svgEdgeLabelGroup.appendChild(labelElement); + const edgeBox = textContainer.getBBox(); + const edgeX = - edgeBox.width / 2; + const edgeY = - edgeBox.height / 2; + textContainer.setAttribute('transform', 'translate(' + edgeX + ',' + edgeY + ')'); + edge.width = edgeBox.width; + edge.height = edgeBox.height; + edge.labelElement = labelElement; + } + } + + dagre.layout(graph); + + for (const nodeId of graph.nodes()) { + if (graph.children(nodeId).length == 0) { + const node = graph.node(nodeId); + node.element.setAttribute('transform', 'translate(' + node.x + ',' + node.y + ')'); + node.element.style.opacity = 1; + delete node.element; + } + } + + for (const edgeId of graph.edges()) { + const edge = graph.edge(edgeId); + if (edge.labelElement) { + edge.labelElement.setAttribute('transform', 'translate(' + edge.x + ',' + edge.y + ')'); + edge.labelElement.style.opacity = 1; + delete edge.labelElement; + } + } + + const edgePathGroupDefs = this.createElement('defs'); + svgEdgePathGroup.appendChild(edgePathGroupDefs); + const marker = this.createElement('marker'); + marker.setAttribute('id', 'arrowhead-vee'); + marker.setAttribute('viewBox', '0 0 10 10'); + marker.setAttribute('refX', 9); + marker.setAttribute('refY', 5); + marker.setAttribute('markerUnits', 'strokeWidth'); + marker.setAttribute('markerWidth', 8); + marker.setAttribute('markerHeight', 6); + marker.setAttribute('orient', 'auto'); + edgePathGroupDefs.appendChild(marker); + const markerPath = this.createElement('path'); + markerPath.setAttribute('d', 'M 0 0 L 10 5 L 0 10 L 4 5 z'); + markerPath.style.setProperty('stroke-width', 1); + markerPath.style.setProperty('stroke-dasharray', '1,0'); + marker.appendChild(markerPath); + + for (const edgeId of graph.edges()) { + const edge = graph.edge(edgeId); + const edgePath = grapher.Renderer._computeCurvePath(edge, graph.node(edgeId.v), graph.node(edgeId.w)); + const edgeElement = this.createElement('path'); + edgeElement.setAttribute('class', Object.prototype.hasOwnProperty.call(edge, 'class') ? ('edge-path ' + edge.class) : 'edge-path'); + edgeElement.setAttribute('d', edgePath); + edgeElement.setAttribute('marker-end', 'url(#arrowhead-vee)'); + if (edge.id) { + edgeElement.setAttribute('id', edge.id); + } + svgEdgePathGroup.appendChild(edgeElement); + } + + for (const nodeId of graph.nodes()) { + if (graph.children(nodeId).length > 0) { + const node = graph.node(nodeId); + const nodeElement = this.createElement('g'); + nodeElement.setAttribute('class', 'cluster'); + nodeElement.setAttribute('transform', 'translate(' + node.x + ',' + node.y + ')'); + const rect = this.createElement('rect'); + rect.setAttribute('x', - node.width / 2); + rect.setAttribute('y', - node.height / 2 ); + rect.setAttribute('width', node.width); + rect.setAttribute('height', node.height); + if (node.rx) { + rect.setAttribute('rx', node.rx); + } + if (node.ry) { + rect.setAttribute('ry', node.ry); + } + nodeElement.appendChild(rect); + svgClusterGroup.appendChild(nodeElement); + } + } + } + + createElement(name) { + return this._document.createElementNS('http://www.w3.org/2000/svg', name); + } + + static _computeCurvePath(edge, tail, head) { + let points = edge.points.slice(1, edge.points.length - 1); + points.unshift(grapher.Renderer.intersectRect(tail, points[0])); + points.push(grapher.Renderer.intersectRect(head, points[points.length - 1])); + + const path = new Path(); + const curve = new Curve(path); + for (let i = 0; i < points.length; i++) { + const point = points[i]; + if (i == 0) { + curve.lineStart(); + } + curve.point(point.x, point.y); + if (i == points.length - 1) { + curve.lineEnd(); + } + } + + return path.data; + } + + static intersectRect(node, point) { + const x = node.x; + const y = node.y; + const dx = point.x - x; + const dy = point.y - y; + let w = node.width / 2; + let h = node.height / 2; + let sx; + let sy; + if (Math.abs(dy) * w > Math.abs(dx) * h) { + if (dy < 0) { + h = -h; + } + sx = dy === 0 ? 0 : h * dx / dy; + sy = h; + } + else { + if (dx < 0) { + w = -w; + } + sx = w; + sy = dx === 0 ? 0 : w * dy / dx; + } + return {x: x + sx, y: y + sy}; + } +}; + +grapher.NodeElement = class { + + constructor(document) { + this._document = document; + this._blocks = []; + } + + block(type) { + this._block = null; + switch (type) { + case 'header': + this._block = new grapher.NodeElement.Header(this._document); + break; + case 'list': + this._block = new grapher.NodeElement.List(this._document); + break; + } + this._blocks.push(this._block); + return this._block; + } + + format(contextElement) { + let rootElement = this.createElement('g'); + contextElement.appendChild(rootElement); + + let width = 0; + let height = 0; + let tops = []; + + for (const block of this._blocks) { + tops.push(height); + block.layout(rootElement); + if (width < block.width) { + width = block.width; + } + height = height + block.height; + } + + for (let i = 0; i < this._blocks.length; i++) { + let top = tops.shift(); + this._blocks[i].update(rootElement, top, width, i == 0, i == this._blocks.length - 1); + } + + let borderElement = this.createElement('path'); + borderElement.setAttribute('class', [ 'node', 'border' ].join(' ')); + borderElement.setAttribute('d', grapher.NodeElement.roundedRect(0, 0, width, height, true, true, true, true)); + rootElement.appendChild(borderElement); + + contextElement.innerHTML = ''; + return rootElement; + } + + static roundedRect(x, y, width, height, r1, r2, r3, r4) { + const radius = 5; + r1 = r1 ? radius : 0; + r2 = r2 ? radius : 0; + r3 = r3 ? radius : 0; + r4 = r4 ? radius : 0; + return "M" + (x + r1) + "," + y + + "h" + (width - r1 - r2) + + "a" + r2 + "," + r2 + " 0 0 1 " + r2 + "," + r2 + + "v" + (height - r2 - r3) + + "a" + r3 + "," + r3 + " 0 0 1 " + -r3 + "," + r3 + + "h" + (r3 + r4 - width) + + "a" + r4 + "," + r4 + " 0 0 1 " + -r4 + "," + -r4 + + 'v' + (-height + r4 + r1) + + "a" + r1 + "," + r1 + " 0 0 1 " + r1 + "," + -r1 + + "z"; + } + + createElement(name) { + return this._document.createElementNS('http://www.w3.org/2000/svg', name); + } +}; + +grapher.NodeElement.Header = class { + + constructor(document) { + this._document = document; + this._items = []; + } + + add(id, classList, content, tooltip, handler) { + this._items.push({ + id: id, + classList: classList, + content: content, + tooltip: tooltip, + handler: handler + }); + } + + layout(parentElement) { + this._width = 0; + this._height = 0; + this._elements = []; + let x = 0; + let y = 0; + for (const item of this._items) { + let yPadding = 4; + let xPadding = 7; + let element = this.createElement('g'); + let classList = [ 'node-item' ]; + parentElement.appendChild(element); + let pathElement = this.createElement('path'); + let textElement = this.createElement('text'); + element.appendChild(pathElement); + element.appendChild(textElement); + if (item.classList) { + classList = classList.concat(item.classList); + } + element.setAttribute('class', classList.join(' ')); + if (item.id) { + element.setAttribute('id', item.id); + } + if (item.handler) { + element.addEventListener('click', item.handler); + } + if (item.tooltip) { + let titleElement = this.createElement('title'); + titleElement.textContent = item.tooltip; + element.appendChild(titleElement); + } + if (item.content) { + textElement.textContent = item.content; + } + let boundingBox = textElement.getBBox(); + let width = boundingBox.width + xPadding + xPadding; + let height = boundingBox.height + yPadding + yPadding; + this._elements.push({ + 'group': element, + 'text': textElement, + 'path': pathElement, + 'x': x, 'y': y, + 'width': width, 'height': height, + 'tx': xPadding, 'ty': yPadding - boundingBox.y, + }); + x += width; + if (this._height < height) { + this._height = height; + } + if (x > this._width) { + this._width = x; + } + } + } + + get width() { + return this._width; + } + + get height() { + return this._height; + } + + update(parentElement, top, width, first, last) { + + let dx = width - this._width; + let i; + let element; + + for (i = 0; i < this._elements.length; i++) { + element = this._elements[i]; + if (i == 0) { + element.width = element.width + dx; + } + else { + element.x = element.x + dx; + element.tx = element.tx + dx; + } + element.y = element.y + top; + } + + for (i = 0; i < this._elements.length; i++) { + element = this._elements[i]; + element.group.setAttribute('transform', 'translate(' + element.x + ',' + element.y + ')'); + let r1 = i == 0 && first; + let r2 = i == this._elements.length - 1 && first; + let r3 = i == this._elements.length - 1 && last; + let r4 = i == 0 && last; + element.path.setAttribute('d', grapher.NodeElement.roundedRect(0, 0, element.width, element.height, r1, r2, r3, r4)); + element.text.setAttribute('x', 6); + element.text.setAttribute('y', element.ty); + } + + let lineElement; + for (i = 0; i < this._elements.length; i++) { + element = this._elements[i]; + if (i != 0) { + lineElement = this.createElement('line'); + lineElement.setAttribute('class', 'node'); + lineElement.setAttribute('x1', element.x); + lineElement.setAttribute('x2', element.x); + lineElement.setAttribute('y1', top); + lineElement.setAttribute('y2', top + this._height); + parentElement.appendChild(lineElement); + } + } + + if (!first) { + lineElement = this.createElement('line'); + lineElement.setAttribute('class', 'node'); + lineElement.setAttribute('x1', 0); + lineElement.setAttribute('x2', width); + lineElement.setAttribute('y1', top); + lineElement.setAttribute('y2', top); + parentElement.appendChild(lineElement); + } + } + + createElement(name) { + return this._document.createElementNS('http://www.w3.org/2000/svg', name); + } +}; + +grapher.NodeElement.List = class { + + constructor(document) { + this._document = document; + this._items = []; + } + + add(id, name, value, tooltip, separator) { + this._items.push({ id: id, name: name, value: value, tooltip: tooltip, separator: separator }); + } + + get handler() { + return this._handler; + } + + set handler(handler) { + this._handler = handler; + } + + layout(parentElement) { + this._width = 0; + this._height = 0; + let x = 0; + let y = 0; + this._element = this.createElement('g'); + this._element.setAttribute('class', 'node-attribute'); + parentElement.appendChild(this._element); + if (this._handler) { + this._element.addEventListener('click', this._handler); + } + this._backgroundElement = this.createElement('path'); + this._element.appendChild(this._backgroundElement); + this._element.setAttribute('transform', 'translate(' + x + ',' + y + ')'); + this._height += 3; + for (const item of this._items) { + const yPadding = 1; + const xPadding = 6; + let textElement = this.createElement('text'); + if (item.id) { + textElement.setAttribute('id', item.id); + } + textElement.setAttribute('xml:space', 'preserve'); + this._element.appendChild(textElement); + if (item.tooltip) { + let titleElement = this.createElement('title'); + titleElement.textContent = item.tooltip; + textElement.appendChild(titleElement); + } + let textNameElement = this.createElement('tspan'); + textNameElement.textContent = item.name; + if (item.separator.trim() != '=') { + textNameElement.style.fontWeight = 'bold'; + } + textElement.appendChild(textNameElement); + let textValueElement = this.createElement('tspan'); + textValueElement.textContent = item.separator + item.value; + textElement.appendChild(textValueElement); + const size = textElement.getBBox(); + const width = xPadding + size.width + xPadding; + if (this._width < width) { + this._width = width; + } + textElement.setAttribute('x', x + xPadding); + textElement.setAttribute('y', this._height + yPadding - size.y); + this._height += yPadding + size.height + yPadding; + } + this._height += 3; + + if (this._width < 100) { + this._width = 100; + } + } + + get width() { + return this._width; + } + + get height() { + return this._height; + } + + update(parentElement, top, width , first, last) { + + this._element.setAttribute('transform', 'translate(0,' + top + ')'); + + let r1 = first; + let r2 = first; + let r3 = last; + let r4 = last; + this._backgroundElement.setAttribute('d', grapher.NodeElement.roundedRect(0, 0, width, this._height, r1, r2, r3, r4)); + + if (!first) { + let lineElement = this.createElement('line'); + lineElement.setAttribute('class', 'node'); + lineElement.setAttribute('x1', 0); + lineElement.setAttribute('x2', width); + lineElement.setAttribute('y1', 0); + lineElement.setAttribute('y2', 0); + this._element.appendChild(lineElement); + } + } + + createElement(name) { + return this._document.createElementNS('http://www.w3.org/2000/svg', name); + } +}; + + +class Path { + + constructor() { + this._x0 = null; + this._y0 = null; + this._x1 = null; + this._y1 = null; + this._data = ''; + } + + moveTo(x, y) { + this._data += "M" + (this._x0 = this._x1 = +x) + "," + (this._y0 = this._y1 = +y); + } + + lineTo(x, y) { + this._data += "L" + (this._x1 = +x) + "," + (this._y1 = +y); + } + + bezierCurveTo(x1, y1, x2, y2, x, y) { + this._data += "C" + (+x1) + "," + (+y1) + "," + (+x2) + "," + (+y2) + "," + (this._x1 = +x) + "," + (this._y1 = +y); + } + + closePath() { + if (this._x1 !== null) { + this._x1 = this._x0; + this._y1 = this._y0; + this._data += "Z"; + } + } + + get data() { + return this._data; + } +} + +class Curve { + + constructor(context) { + this._context = context; + } + + lineStart() { + this._x0 = NaN; + this._x1 = NaN; + this._y0 = NaN; + this._y1 = NaN; + this._point = 0; + } + + lineEnd() { + switch (this._point) { + case 3: + this.curve(this._x1, this._y1); + this._context.lineTo(this._x1, this._y1); + break; + case 2: + this._context.lineTo(this._x1, this._y1); + break; + } + if (this._line || (this._line !== 0 && this._point === 1)) { + this._context.closePath(); + } + this._line = 1 - this._line; + } + + point(x, y) { + x = +x; + y = +y; + switch (this._point) { + case 0: + this._point = 1; + if (this._line) { + this._context.lineTo(x, y); + } + else { + this._context.moveTo(x, y); + } + break; + case 1: + this._point = 2; + break; + case 2: + this._point = 3; + this._context.lineTo((5 * this._x0 + this._x1) / 6, (5 * this._y0 + this._y1) / 6); + this.curve(x, y); + break; + default: + this.curve(x, y); + break; + } + this._x0 = this._x1; + this._x1 = x; + this._y0 = this._y1; + this._y1 = y; + } + + curve(x, y) { + this._context.bezierCurveTo( + (2 * this._x0 + this._x1) / 3, + (2 * this._y0 + this._y1) / 3, + (this._x0 + 2 * this._x1) / 3, + (this._y0 + 2 * this._y1) / 3, + (this._x0 + 4 * this._x1 + x) / 6, + (this._y0 + 4 * this._y1 + y) / 6 + ); + } +} + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.Renderer = grapher.Renderer; + module.exports.NodeElement = grapher.NodeElement; +} \ No newline at end of file diff --git a/frontend/packages/core/public/netron/view-sidebar.js b/frontend/packages/core/public/netron/view-sidebar.js new file mode 100644 index 00000000..b6e68315 --- /dev/null +++ b/frontend/packages/core/public/netron/view-sidebar.js @@ -0,0 +1,800 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var sidebar = sidebar || {}; +var long = long || {Long: require('long')}; +var marked = marked || require('marked'); + +sidebar.NodeSidebar = class { + constructor(host, node) { + this._host = host; + this._node = node; + this._properties = []; + this._attributes = []; + this._inputs = []; + this._outputs = []; + + if (node.type) { + this._addProperty( + 'type', + new sidebar.ValueTextView(this._host, node.type, {documentation: !!node.metadata}) + ); + } + + if (node.name) { + this._addProperty('name', new sidebar.ValueTextView(this._host, node.name)); + } + + if (node.location) { + this._addProperty('location', new sidebar.ValueTextView(this._host, node.location)); + } + + if (node.domain) { + this._addProperty('domain', new sidebar.ValueTextView(this._host, node.domain)); + } + + if (node.description) { + this._addProperty('description', new sidebar.ValueTextView(this._host, node.description)); + } + + if (node.device) { + this._addProperty('device', new sidebar.ValueTextView(this._host, node.device)); + } + + const attributes = node.attributes; + if (attributes && attributes.length > 0) { + let sortedAttributes = node.attributes.slice(); + sortedAttributes.sort((a, b) => { + const au = a.name.toUpperCase(); + const bu = b.name.toUpperCase(); + return au < bu ? -1 : au > bu ? 1 : 0; + }); + for (const attribute of sortedAttributes) { + this._addAttribute(attribute.name, attribute); + } + } + + const inputs = node.inputs; + if (inputs && inputs.length > 0) { + for (const input of inputs) { + this._addInput(input.name, input); + } + } + + const outputs = node.outputs; + if (outputs && outputs.length > 0) { + for (const output of outputs) { + this._addOutput(output.name, output); + } + } + } + + render() { + return { + properties: this._properties, + groups: [ + { + name: 'attributes', + properties: this._attributes + }, + { + name: 'inputs', + properties: this._inputs + }, + { + name: 'outputs', + properties: this._outputs + } + ] + }; + } + + _addProperty(name, value) { + let item = new sidebar.NameValueView(this._host, name, value); + this._properties.push(item.render()); + } + + _addAttribute(name, attribute) { + const item = new sidebar.NameValueView(this._host, name, new NodeAttributeView(this._host, attribute)); + this._attributes.push(item.render()); + } + + _addInput(name, input) { + if (input.arguments.length > 0) { + const view = new sidebar.ParameterView(this._host, input); + const item = new sidebar.NameValueView(this._host, name, view); + this._inputs.push(item.render()); + } + } + + _addOutput(name, output) { + if (output.arguments.length > 0) { + const view = new sidebar.ParameterView(this._host, output); + const item = new sidebar.NameValueView(this._host, name, view); + this._outputs.push(item.render()); + } + } + + static formatAttributeValue(value, type, quote) { + if (typeof value === 'function') { + return value(); + } + if (value && long.Long.isLong(value)) { + return value.toString(); + } + if (value && long.Long.isLong(value)) { + return value.toString(); + } + if (Number.isNaN(value)) { + return 'NaN'; + } + switch (type) { + case 'shape': + return value.toString(); + case 'shape[]': + return value ? value.map(item => item.toString()).join(', ') : '(null)'; + case 'graph': + return value.toString(); + case 'graph[]': + return value ? value.map(item => item.toString()).join(', ') : '(null)'; + case 'tensor': + if ( + value && + value.type && + value.type.shape && + value.type.shape.dimensions && + value.type.shape.dimensions.length == 0 + ) { + return value.toString(); + } + return '[...]'; + } + if (typeof value === 'string' && (!type || type != 'string')) { + return quote ? '"' + value + '"' : value; + } + if (Array.isArray(value)) { + if (value.length == 0) { + return quote ? '[]' : ''; + } + let ellipsis = false; + if (value.length > 1000) { + value = value.slice(0, 1000); + ellipsis = true; + } + let array = value.map(item => { + if (item && long.Long.isLong(item)) { + return item.toString(); + } + if (Number.isNaN(item)) { + return 'NaN'; + } + return sidebar.NodeSidebar.formatAttributeValue(item, null, true); + }); + if (ellipsis) { + array.push('\u2026'); + } + return quote ? ['[', array.join(', '), ']'].join(' ') : array.join(', '); + } + if (value === null) { + return quote ? 'null' : ''; + } + if (value === undefined) { + return 'undefined'; + } + if (value !== Object(value)) { + return value.toString(); + } + let list = []; + const keys = Object.keys(value).filter(key => !key.startsWith('__') && !key.endsWith('__')); + if (keys.length == 1) { + list.push(sidebar.NodeSidebar.formatAttributeValue(value[Object.keys(value)[0]], null, true)); + } else { + for (const key of keys) { + list.push(key + ': ' + sidebar.NodeSidebar.formatAttributeValue(value[key], null, true)); + } + } + let objectType = value.__type__; + if (!objectType && value.constructor.name && value.constructor.name && value.constructor.name !== 'Object') { + objectType = value.constructor.name; + } + if (objectType) { + return objectType + (list.length == 0 ? '()' : ['(', list.join(', '), ')'].join('')); + } + switch (list.length) { + case 0: + return quote ? '()' : ''; + case 1: + return list[0]; + default: + return quote ? ['(', list.join(', '), ')'].join(' ') : list.join(', '); + } + } +}; + +sidebar.NameValueView = class { + constructor(host, name, value) { + this._host = host; + this._name = name; + this._value = value; + + this._element = {name, values: value.render()}; + } + + get name() { + return this._name; + } + + render() { + return this._element; + } +}; + +// sidebar.SelectView = class { +// constructor(host, values, selected) { +// this._host = host; +// this._elements = []; + +// const selectElement = this._host.document.createElement('select'); +// selectElement.setAttribute('class', 'sidebar-view-item-select'); +// selectElement.addEventListener('change', e => { +// this._raise('change', e.target.value); +// }); +// this._elements.push(selectElement); + +// for (const value of values) { +// const optionElement = this._host.document.createElement('option'); +// optionElement.innerText = value; +// if (value == selected) { +// optionElement.setAttribute('selected', 'selected'); +// } +// selectElement.appendChild(optionElement); +// } +// } + +// render() { +// return this._elements; +// } + +// on(event, callback) { +// this._events = this._events || {}; +// this._events[event] = this._events[event] || []; +// this._events[event].push(callback); +// } + +// _raise(event, data) { +// if (this._events && this._events[event]) { +// for (const callback of this._events[event]) { +// callback(this, data); +// } +// } +// } +// }; + +sidebar.ValueTextView = class { + constructor(host, value, action) { + this._host = host; + this._elements = []; + this._elements.push(Object.assign({value}, action)); + } + + render() { + return this._elements; + } +}; + +class NodeAttributeView { + constructor(host, attribute) { + this._host = host; + this._attribute = attribute; + this._element = {}; + + if (attribute.type) { + this._element.children = this.renderChildren(); + } + let value = sidebar.NodeSidebar.formatAttributeValue(this._attribute.value, this._attribute.type); + if (value && value.length > 1000) { + value = value.substring(0, 1000) + '\u2026'; + } + this._element.value = value ? value : ' '; + } + + render() { + return [this._element]; + } + + renderChildren() { + const children = []; + const typeLine = this._host.document.createElement('div'); + typeLine.className = 'sidebar-view-item-value-line-border'; + const type = this._attribute.type; + const value = this._attribute.value; + if (type == 'tensor' && value && value.type) { + children.push({ + name: 'type', + value: value.type.toString(), + type: 'code' + }); + } else { + children.push({ + name: 'type', + value: this._attribute.type, + type: 'code' + }); + } + + const description = this._attribute.description; + if (description) { + children.push({ + value: description + }); + } + + if (this._attribute.type == 'tensor' && value) { + const state = value.state; + children.push({ + value: state || value.toString(), + type: 'raw' + }); + } + + return children; + } +} + +sidebar.ParameterView = class { + constructor(host, list) { + this._list = list; + this._elements = []; + this._items = []; + for (const argument of list.arguments) { + const item = new sidebar.ArgumentView(host, argument); + this._items.push(item); + this._elements.push(item.render()); + } + } + + render() { + return this._elements; + } +}; + +sidebar.ArgumentView = class { + constructor(host, argument) { + this._host = host; + this._argument = argument; + + this._element = {}; + + const initializer = argument.initializer; + const quantization = argument.quantization; + const type = argument.type; + if (type || initializer || quantization) { + this._element.children = this.renderChildren(); + } + + let name = this._argument.name || ''; + this._hasId = name ? true : false; + if (initializer && !this._hasId) { + this._element.name = 'kind'; + this._element.value = initializer.kind; + } else { + if (typeof name !== 'string') { + throw new Error("Invalid argument identifier '" + JSON.stringify(name) + "'."); + } + name = name.split('\n').shift(); // custom argument id + this._element.name = 'name'; + this._element.value = name || ' '; + } + } + + render() { + return this._element; + } + + renderChildren() { + const children = []; + + let type = '?'; + let denotation = null; + if (this._argument.type) { + type = this._argument.type.toString(); + denotation = this._argument.type.denotation || null; + } + + if (type) { + children.push({ + name: 'type', + value: type, + type: 'code' + }); + } + if (denotation) { + children.push({ + name: 'denotation', + value: denotation, + type: 'code' + }); + } + + const description = this._argument.description; + if (description) { + children.push({ + name: 'description', + value: description + }); + } + + const quantization = this._argument.quantization; + if (quantization) { + children.push({ + name: 'quantization', + value: quantization + }); + } + + if (this._argument.location) { + children.push({ + name: 'location', + value: this._argument.location + }); + } + + const initializer = this._argument.initializer; + if (initializer) { + const reference = initializer.reference; + if (reference) { + children.push({ + name: 'reference', + value: this._argument.reference + }); + } + const state = initializer.state; + + // TODO: export tensor + // if ( + // state === null && + // this._host.save && + // initializer.type.dataType && + // initializer.type.dataType != '?' && + // initializer.type.shape && + // initializer.type.shape.dimensions && + // initializer.type.shape.dimensions.length > 0 + // ) { + // this._saveButton = this._host.document.createElement('div'); + // this._saveButton.className = 'sidebar-view-item-value-expander'; + // this._saveButton.innerHTML = '💾'; + // this._saveButton.addEventListener('click', () => { + // this._raise('export-tensor', initializer); + // }); + // this._element.appendChild(this._saveButton); + // } + + let content = ''; + try { + content = state || initializer.toString(); + } catch (err) { + content = err.toString(); + this._host.exception(err, false); + } + children.push({ + value: content, + type: 'raw' + }); + } + + return children; + } +}; + +sidebar.ModelSidebar = class { + constructor(host, model, graph) { + this._host = host; + this._model = model; + this._properties = []; + this._groups = []; + + if (this._model.format) { + this._addProperty('format', new sidebar.ValueTextView(this._host, this._model.format)); + } + if (this._model.producer) { + this._addProperty('producer', new sidebar.ValueTextView(this._host, this._model.producer)); + } + if (this._model.source) { + this._addProperty('source', new sidebar.ValueTextView(this._host, this._model.source)); + } + if (this._model.name) { + this._addProperty('name', new sidebar.ValueTextView(this._host, this._model.name)); + } + if (this._model.version) { + this._addProperty('version', new sidebar.ValueTextView(this._host, this._model.version)); + } + if (this._model.description) { + this._addProperty('description', new sidebar.ValueTextView(this._host, this._model.description)); + } + if (this._model.author) { + this._addProperty('author', new sidebar.ValueTextView(this._host, this._model.author)); + } + if (this._model.company) { + this._addProperty('company', new sidebar.ValueTextView(this._host, this._model.company)); + } + if (this._model.license) { + this._addProperty('license', new sidebar.ValueTextView(this._host, this._model.license)); + } + if (this._model.domain) { + this._addProperty('domain', new sidebar.ValueTextView(this._host, this._model.domain)); + } + if (this._model.imports) { + this._addProperty('imports', new sidebar.ValueTextView(this._host, this._model.imports)); + } + if (this._model.runtime) { + this._addProperty('runtime', new sidebar.ValueTextView(this._host, this._model.runtime)); + } + + let metadata = this._model.metadata; + if (metadata) { + for (const property of this._model.metadata) { + this._addProperty(property.name, new sidebar.ValueTextView(this._host, property.value)); + } + } + + // TODO: graph select + // if (this._model._graphs.length > 1) { + // let graphSelector = new sidebar.SelectView( + // this._host, + // this._model.graphs.map(g => g.name), + // graph.name + // ); + // graphSelector.on('change', (sender, data) => { + // this._raise('update-active-graph', data); + // }); + // this._addProperty('subgraph', graphSelector); + // } + + if (graph) { + if (graph.version) { + this._addProperty('version', new sidebar.ValueTextView(this._host, graph.version)); + } + if (graph.type) { + this._addProperty('type', new sidebar.ValueTextView(this._host, graph.type)); + } + if (graph.tags) { + this._addProperty('tags', new sidebar.ValueTextView(this._host, graph.tags)); + } + if (graph.description) { + this._addProperty('description', new sidebar.ValueTextView(this._host, graph.description)); + } + + if (graph.inputs.length) { + for (const input of graph.inputs) { + this._addGroupProperty('inputs', input.name, input); + } + } + + if (graph.outputs.length) { + for (const output of graph.outputs) { + this._addGroupProperty('outputs', output.name, output); + } + } + } + } + + render() { + return { + properties: this._properties, + groups: this._groups + }; + } + + _addGroupProperty(group, name, argument) { + const exist = this._groups.find(g => g.name === group); + if (!exist) { + this._groups.push({ + name: group, + properties: [this._addArgument(name, argument)] + }); + } else { + exist.properties.push(this._addArgument(name, argument)); + } + } + + _addProperty(name, value) { + let item = new sidebar.NameValueView(this._host, name, value); + this._properties.push(item.render()); + } + + _addArgument(name, argument) { + const view = new sidebar.ParameterView(this._host, argument); + const item = new sidebar.NameValueView(this._host, name, view); + return item.render(); + } +}; + +sidebar.DocumentationSidebar = class { + constructor(host, metadata) { + this._host = host; + this._metadata = metadata; + } + + render() { + return sidebar.DocumentationSidebar.formatDocumentation(this._metadata); + } + + static formatDocumentation(data) { + if (data) { + data = JSON.parse(JSON.stringify(data)); + if (data.summary) { + data.summary = marked(data.summary); + } + if (data.description) { + data.description = marked(data.description); + } + if (data.attributes) { + for (const attribute of data.attributes) { + if (attribute.description) { + attribute.description = marked(attribute.description); + } + } + } + if (data.inputs) { + for (const input of data.inputs) { + if (input.description) { + input.description = marked(input.description); + } + } + } + if (data.outputs) { + for (const output of data.outputs) { + if (output.description) { + output.description = marked(output.description); + } + } + } + if (data.references) { + for (const reference of data.references) { + if (reference) { + reference.description = marked(reference.description); + } + } + } + return data; + } + return ''; + } +}; + +sidebar.FindSidebar = class { + constructor(host, graphElement, graph) { + this._host = host; + this._graphElement = graphElement; + this._graph = graph; + } + + static selection(item, graphElement) { + const selection = []; + const id = item.id; + + const nodesElement = graphElement.getElementById('nodes'); + let nodeElement = nodesElement.firstChild; + while (nodeElement) { + if (nodeElement.id == id) { + selection.push(nodeElement); + } + nodeElement = nodeElement.nextSibling; + } + + const edgePathsElement = graphElement.getElementById('edge-paths'); + let edgePathElement = edgePathsElement.firstChild; + while (edgePathElement) { + if (edgePathElement.id == id) { + selection.push(edgePathElement); + } + edgePathElement = edgePathElement.nextSibling; + } + + let initializerElement = graphElement.getElementById(id); + if (initializerElement) { + while (initializerElement.parentElement) { + initializerElement = initializerElement.parentElement; + if (initializerElement.id && initializerElement.id.startsWith('node-')) { + selection.push(initializerElement); + break; + } + } + } + + if (selection.length > 0) { + return selection; + } + + return null; + } + + update(searchText) { + const text = searchText.toLowerCase(); + + const nodeMatches = new Set(); + const edgeMatches = new Set(); + + const result = []; + + for (const node of this._graph.nodes) { + const initializers = []; + + for (const input of node.inputs) { + for (const argument of input.arguments) { + if ( + argument.name && + argument.name.toLowerCase().indexOf(text) != -1 && + !edgeMatches.has(argument.name) + ) { + if (!argument.initializer) { + result.push({ + type: 'input', + name: argument.name.split('\n').shift(), // custom argument id + id: 'edge-' + argument.name + }); + edgeMatches.add(argument.name); + } else { + initializers.push(argument.initializer); + } + } + } + } + + const name = node.name; + const operator = node.type; + if ( + !nodeMatches.has(name) && + name && + (name.toLowerCase().indexOf(text) != -1 || (operator && operator.toLowerCase().indexOf(text) != -1)) + ) { + result.push({ + type: 'node', + name: node.name, + id: 'node-' + node.name + }); + nodeMatches.add(node.name); + } + + for (const initializer of initializers) { + result.push({ + type: 'initializer', + name: initializer.name, + id: 'initializer-' + initializer.name + }); + } + } + + for (const node of this._graph.nodes) { + for (const output of node.outputs) { + for (const argument of output.arguments) { + if ( + argument.name && + argument.name.toLowerCase().indexOf(text) != -1 && + !edgeMatches.has(argument.name) + ) { + result.push({ + type: 'output', + name: argument.name.split('\n').shift(), // custom argument id + id: 'edge-' + argument.name + }); + edgeMatches.add(argument.name); + } + } + } + } + + return { + text: searchText, + result: result + }; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.Sidebar = sidebar.Sidebar; + module.exports.ModelSidebar = sidebar.ModelSidebar; + module.exports.NodeSidebar = sidebar.NodeSidebar; + module.exports.DocumentationSidebar = sidebar.DocumentationSidebar; + module.exports.FindSidebar = sidebar.FindSidebar; +} diff --git a/frontend/packages/core/public/netron/view.js b/frontend/packages/core/public/netron/view.js new file mode 100644 index 00000000..373e2e63 --- /dev/null +++ b/frontend/packages/core/public/netron/view.js @@ -0,0 +1,1326 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ + +var view = view || {}; + +var zip = zip || require('./zip'); +var gzip = gzip || require('./gzip'); +var tar = tar || require('./tar'); +var protobuf = protobuf || require('protobufjs'); +var prototxt = prototxt || require('protobufjs/ext/prototxt'); + +var d3 = d3 || require('d3'); +var dagre = dagre || require('dagre'); + +var grapher = grapher || require('./view-grapher'); + +view.View = class { + constructor(host) { + this._host = host; + this._host + .initialize(this) + .then(() => { + this._model = null; + this._selection = []; + this._host.start(); + this._showAttributes = false; + this._showInitializers = true; + this._showNames = false; + this._modelFactoryService = new view.ModelFactoryService(this._host); + }) + .catch(err => { + this.error(err.message, err); + }); + } + + cut() { + this._host.document.execCommand('cut'); + } + + copy() { + this._host.document.execCommand('copy'); + } + + paste() { + this._host.document.execCommand('paste'); + } + + selectAll() { + this._host.document.execCommand('selectall'); + } + + find(value) { + if (this._activeGraph) { + this.clearSelection(); + const graphElement = document.getElementById('canvas'); + const view = new sidebar.FindSidebar(this._host, graphElement, this._activeGraph); + this._host.message('search', view.update(value)); + } + } + + toggleAttributes(toggle) { + this._showAttributes = toggle == null ? !this._showAttributes : toggle; + this._reload(); + } + + get showAttributes() { + return this._showAttributes; + } + + toggleInitializers(toggle) { + this._showInitializers = toggle == null ? !this._showInitializers : toggle; + this._reload(); + } + + get showInitializers() { + return this._showInitializers; + } + + toggleNames(toggle) { + this._showNames = toggle == null ? !this._showNames : toggle; + this._reload(); + } + + get showNames() { + return this._showNames; + } + + _reload() { + this._host.status('loading'); + if (this._model && this._activeGraph) { + this._updateGraph(this._model, this._activeGraph).catch(error => { + if (error) { + this.error('Graph update failed.', error); + } + }); + } + } + + _timeout(time) { + return new Promise(resolve => { + setTimeout(() => { + resolve(); + }, time); + }); + } + + zoomIn() { + if (this._zoom) { + this._zoom.scaleBy(d3.select(this._host.document.getElementById('canvas')), 1.2); + } + } + + zoomOut() { + if (this._zoom) { + this._zoom.scaleBy(d3.select(this._host.document.getElementById('canvas')), 0.8); + } + } + + resetZoom() { + if (this._zoom) { + this._zoom.scaleTo(d3.select(this._host.document.getElementById('canvas')), 1); + } + } + + select(item) { + this.clearSelection(); + const graphElement = document.getElementById('canvas'); + const selection = sidebar.FindSidebar.selection(item, graphElement); + if (selection && selection.length > 0) { + const graphElement = this._host.document.getElementById('canvas'); + const graphRect = graphElement.getBoundingClientRect(); + let x = 0; + let y = 0; + for (const element of selection) { + element.classList.add('select'); + this._selection.push(element); + const transform = element.transform.baseVal.consolidate(); + const box = element.getBBox(); + const ex = transform ? transform.matrix.e : box.x + box.width / 2; + const ey = transform ? transform.matrix.f : box.y + box.height / 2; + x += ex; + y += ey; + } + x = x / selection.length; + y = y / selection.length; + this._zoom.transform( + d3.select(graphElement), + d3.zoomIdentity.translate(graphRect.width / 2 - x, graphRect.height / 2 - y) + ); + } + } + + clearSelection() { + while (this._selection.length > 0) { + const element = this._selection.pop(); + element.classList.remove('select'); + } + } + + error(message, err) { + this._host.error(message, err.toString()); + } + + accept(file) { + return this._modelFactoryService.accept(file); + } + + open(context) { + return this._timeout(2).then(() => { + return this._modelFactoryService.open(context).then(model => { + return this._timeout(20).then(() => { + const graph = model.graphs.length > 0 ? model.graphs[0] : null; + return this._updateGraph(model, graph); + }); + }); + }); + } + + _updateActiveGraph(name) { + if (this._model) { + const model = this._model; + const graph = model.graphs.filter(graph => name == graph.name).shift(); + if (graph) { + this._host.status('loading'); + this._timeout(200).then(() => { + return this._updateGraph(model, graph).catch(error => { + if (error) { + this.error('Graph update failed.', error); + } + }); + }); + } + } + } + + _updateGraph(model, graph) { + return this._timeout(100).then(() => { + if (graph && graph != this._activeGraph) { + const nodes = graph.nodes; + if (nodes.length > 1400) { + if ( + !this._host.confirm( + 'Large model detected.', + 'This graph contains a large number of nodes and might take a long time to render. Do you want to continue?' + ) + ) { + return null; + } + } + } + return this.renderGraph(model, graph) + .then(() => { + this._model = model; + this._activeGraph = graph; + this._host.status('rendered'); + return this._model; + }) + .catch(error => { + return this.renderGraph(this._model, this._activeGraph) + .then(() => { + this._host.status('rendered'); + throw error; + }) + .catch(() => { + throw error; + }); + }); + }); + } + + renderGraph(model, graph) { + try { + const graphElement = this._host.document.getElementById('canvas'); + while (graphElement.lastChild) { + graphElement.removeChild(graphElement.lastChild); + } + if (!graph) { + return Promise.resolve(); + } else { + this._zoom = null; + graphElement.style.position = 'absolute'; + graphElement.style.margin = '0'; + + const groups = graph.groups; + + const graphOptions = {}; + graphOptions.nodesep = 25; + graphOptions.ranksep = 20; + + const g = new dagre.graphlib.Graph({compound: groups}); + g.setGraph(graphOptions); + g.setDefaultEdgeLabel(() => { + return {}; + }); + + let nodeId = 0; + const edgeMap = {}; + const clusterMap = {}; + const clusterParentMap = {}; + let id = new Date().getTime(); + const nodes = graph.nodes; + + if (nodes.length > 1500) { + graphOptions.ranker = 'longest-path'; + } + + if (groups) { + for (const node of nodes) { + if (node.group) { + const path = node.group.split('/'); + while (path.length > 0) { + const name = path.join('/'); + path.pop(); + clusterParentMap[name] = path.join('/'); + } + } + } + } + + for (const node of nodes) { + const element = new grapher.NodeElement(this._host.document); + + const addNode = (element, node, edges) => { + const header = element.block('header'); + const styles = ['node-item-type']; + const metadata = node.metadata; + const category = metadata && metadata.category ? metadata.category : ''; + if (category) { + styles.push('node-item-type-' + category.toLowerCase()); + } + const type = node.type; + if (typeof type !== 'string' || !type.split) { + // #416 + throw new ModelError( + "Unknown node type '" + JSON.stringify(type) + "' in '" + model.format + "'." + ); + } + const content = this.showNames && node.name ? node.name : type.split('.').pop(); + const tooltip = this.showNames && node.name ? type : node.name; + header.add(null, styles, content, tooltip, () => { + this.showNodeProperties(node); + }); + + if (node.function) { + header.add(null, ['node-item-function'], '+', null, () => { + // debugger; + }); + } + + const initializers = []; + let hiddenInitializers = false; + if (this._showInitializers) { + for (const input of node.inputs) { + if ( + input.visible && + input.arguments.length == 1 && + input.arguments[0].initializer != null + ) { + initializers.push(input); + } + if ( + (!input.visible || input.arguments.length > 1) && + input.arguments.some(argument => argument.initializer != null) + ) { + hiddenInitializers = true; + } + } + } + let sortedAttributes = []; + const attributes = node.attributes; + if (this.showAttributes && attributes) { + sortedAttributes = attributes.filter(attribute => attribute.visible).slice(); + sortedAttributes.sort((a, b) => { + const au = a.name.toUpperCase(); + const bu = b.name.toUpperCase(); + return au < bu ? -1 : au > bu ? 1 : 0; + }); + } + if (initializers.length > 0 || hiddenInitializers || sortedAttributes.length > 0) { + const block = element.block('list'); + block.handler = () => { + this.showNodeProperties(node); + }; + for (const initializer of initializers) { + const argument = initializer.arguments[0]; + const type = argument.type; + let shape = ''; + let separator = ''; + if ( + type && + type.shape && + type.shape.dimensions && + Object.prototype.hasOwnProperty.call(type.shape.dimensions, 'length') + ) { + shape = + '\u3008' + + type.shape.dimensions.map(d => (d ? d : '?')).join('\u00D7') + + '\u3009'; + if ( + type.shape.dimensions.length == 0 && + argument.initializer && + !argument.initializer.state + ) { + shape = argument.initializer.toString(); + if (shape && shape.length > 10) { + shape = shape.substring(0, 10) + '\u2026'; + } + separator = ' = '; + } + } + block.add( + 'initializer-' + argument.name, + initializer.name, + shape, + type ? type.toString() : '', + separator + ); + } + if (hiddenInitializers) { + block.add(null, '\u3008' + '\u2026' + '\u3009', '', null, ''); + } + + for (const attribute of sortedAttributes) { + if (attribute.visible) { + let attributeValue = sidebar.NodeSidebar.formatAttributeValue( + attribute.value, + attribute.type + ); + if (attributeValue && attributeValue.length > 25) { + attributeValue = attributeValue.substring(0, 25) + '\u2026'; + } + block.add(null, attribute.name, attributeValue, attribute.type, ' = '); + } + } + } + + if (edges) { + const inputs = node.inputs; + for (const input of inputs) { + for (const argument of input.arguments) { + if (argument.name != '' && !argument.initializer) { + let tuple = edgeMap[argument.name]; + if (!tuple) { + tuple = {from: null, to: []}; + edgeMap[argument.name] = tuple; + } + tuple.to.push({ + node: nodeId, + name: input.name + }); + } + } + } + let outputs = node.outputs; + if (node.chain && node.chain.length > 0) { + const chainOutputs = node.chain[node.chain.length - 1].outputs; + if (chainOutputs.length > 0) { + outputs = chainOutputs; + } + } + for (const output of outputs) { + for (const argument of output.arguments) { + if (argument.name != '') { + let tuple = edgeMap[argument.name]; + if (!tuple) { + tuple = {from: null, to: []}; + edgeMap[argument.name] = tuple; + } + tuple.from = { + node: nodeId, + name: output.name, + type: argument.type + }; + } + } + } + } + + if (node.chain && node.chain.length > 0) { + for (const innerNode of node.chain) { + addNode(element, innerNode, false); + } + } + + if (node.inner) { + addNode(element, node.inner, false); + } + }; + + addNode(element, node, true); + + if (node.controlDependencies && node.controlDependencies.length > 0) { + for (const controlDependency of node.controlDependencies) { + let tuple = edgeMap[controlDependency]; + if (!tuple) { + tuple = {from: null, to: []}; + edgeMap[controlDependency] = tuple; + } + tuple.to.push({ + node: nodeId, + name: controlDependency, + controlDependency: true + }); + } + } + + const nodeName = node.name; + if (nodeName) { + g.setNode(nodeId, {label: element.format(graphElement), id: 'node-' + nodeName}); + } else { + g.setNode(nodeId, {label: element.format(graphElement), id: 'node-' + id.toString()}); + id++; + } + + const createCluster = function (name) { + if (!clusterMap[name]) { + g.setNode(name, {rx: 5, ry: 5}); + clusterMap[name] = true; + const parent = clusterParentMap[name]; + if (parent) { + createCluster(parent); + g.setParent(name, parent); + } + } + }; + + if (groups) { + let groupName = node.group; + if (groupName && groupName.length > 0) { + if (!Object.prototype.hasOwnProperty.call(clusterParentMap, groupName)) { + const lastIndex = groupName.lastIndexOf('/'); + if (lastIndex != -1) { + groupName = groupName.substring(0, lastIndex); + if (!Object.prototype.hasOwnProperty.call(clusterParentMap, groupName)) { + groupName = null; + } + } else { + groupName = null; + } + } + if (groupName) { + createCluster(groupName); + g.setParent(nodeId, groupName); + } + } + } + + nodeId++; + } + + for (const input of graph.inputs) { + for (const argument of input.arguments) { + let tuple = edgeMap[argument.name]; + if (!tuple) { + tuple = {from: null, to: []}; + edgeMap[argument.name] = tuple; + } + tuple.from = { + node: nodeId, + type: argument.type + }; + } + const types = input.arguments.map(argument => argument.type || '').join('\n'); + let inputName = input.name || ''; + if (inputName.length > 16) { + inputName = inputName.split('/').pop(); + } + + const inputElement = new grapher.NodeElement(this._host.document); + const inputHeader = inputElement.block('header'); + inputHeader.add(null, ['graph-item-input'], inputName, types, () => { + this.showModelProperties(); + }); + g.setNode(nodeId++, {label: inputElement.format(graphElement), class: 'graph-input'}); + } + + for (const output of graph.outputs) { + for (const argument of output.arguments) { + let tuple = edgeMap[argument.name]; + if (!tuple) { + tuple = {from: null, to: []}; + edgeMap[argument.name] = tuple; + } + tuple.to.push({node: nodeId}); + } + const outputTypes = output.arguments.map(argument => argument.type || '').join('\n'); + let outputName = output.name || ''; + if (outputName.length > 16) { + outputName = outputName.split('/').pop(); + } + + const outputElement = new grapher.NodeElement(this._host.document); + const outputHeader = outputElement.block('header'); + outputHeader.add(null, ['graph-item-output'], outputName, outputTypes, () => { + this.showModelProperties(); + }); + g.setNode(nodeId++, {label: outputElement.format(graphElement)}); + } + + for (const edge of Object.keys(edgeMap)) { + const tuple = edgeMap[edge]; + if (tuple.from != null) { + for (const to of tuple.to) { + let text = ''; + const type = tuple.from.type; + if (type && type.shape && type.shape.dimensions && type.shape.dimensions.length > 0) { + text = type.shape.dimensions.join('\u00D7'); + } + + if (this._showNames) { + text = edge.split('\n').shift(); // custom argument id + } + + if (to.controlDependency) { + g.setEdge(tuple.from.node, to.node, { + label: text, + id: 'edge-' + edge, + arrowhead: 'vee', + class: 'edge-path-control-dependency' + }); + } else { + g.setEdge(tuple.from.node, to.node, { + label: text, + id: 'edge-' + edge, + arrowhead: 'vee' + }); + } + } + } + } + + // Workaround for Safari background drag/zoom issue: + // https://stackoverflow.com/questions/40887193/d3-js-zoom-is-not-working-with-mousewheel-in-safari + const backgroundElement = this._host.document.createElementNS('http://www.w3.org/2000/svg', 'rect'); + backgroundElement.setAttribute('id', 'background'); + backgroundElement.setAttribute('width', '100%'); + backgroundElement.setAttribute('height', '100%'); + backgroundElement.setAttribute('fill', 'none'); + backgroundElement.setAttribute('pointer-events', 'all'); + graphElement.appendChild(backgroundElement); + + const originElement = this._host.document.createElementNS('http://www.w3.org/2000/svg', 'g'); + originElement.setAttribute('id', 'origin'); + graphElement.appendChild(originElement); + + let svg = null; + svg = d3.select(graphElement); + this._zoom = d3.zoom(); + this._zoom(svg); + this._zoom.scaleExtent([0.1, 2]); + this._zoom.on('zoom', () => { + originElement.setAttribute('transform', d3.event.transform.toString()); + }); + this._zoom.transform(svg, d3.zoomIdentity); + + return this._timeout(20).then(() => { + const graphRenderer = new grapher.Renderer(this._host.document, originElement); + graphRenderer.render(g); + + const inputElements = graphElement.getElementsByClassName('graph-input'); + const svgSize = graphElement.getBoundingClientRect(); + if (inputElements && inputElements.length > 0) { + // Center view based on input elements + const xs = []; + const ys = []; + for (let i = 0; i < inputElements.length; i++) { + const inputTransform = inputElements[i].transform.baseVal.consolidate().matrix; + xs.push(inputTransform.e); + ys.push(inputTransform.f); + } + let x = xs[0]; + let y = ys[0]; + if (ys.every(y => y == ys[0])) { + x = + xs.reduce((a, b) => { + return a + b; + }) / xs.length; + } + this._zoom.transform( + svg, + d3.zoomIdentity.translate(svgSize.width / 2 - x, svgSize.height / 4 - y) + ); + } else { + this._zoom.transform( + svg, + d3.zoomIdentity.translate( + (svgSize.width - g.graph().width) / 2, + (svgSize.height - g.graph().height) / 2 + ) + ); + } + return; + }); + } + } catch (error) { + return Promise.reject(error); + } + } + + applyStyleSheet(element, name) { + let rules = []; + for (let i = 0; i < this._host.document.styleSheets.length; i++) { + const styleSheet = this._host.document.styleSheets[i]; + if (styleSheet && styleSheet.href && styleSheet.href.endsWith('/' + name)) { + rules = styleSheet.cssRules; + break; + } + } + const nodes = element.getElementsByTagName('*'); + for (let j = 0; j < nodes.length; j++) { + const node = nodes[j]; + for (let k = 0; k < rules.length; k++) { + const rule = rules[k]; + if (node.matches(rule.selectorText)) { + for (let l = 0; l < rule.style.length; l++) { + const item = rule.style.item(l); + node.style[item] = rule.style[item]; + } + } + } + } + } + + export(file) { + const lastIndex = file.lastIndexOf('.'); + const extension = lastIndex != -1 ? file.substring(lastIndex + 1) : ''; + if (this._activeGraph && (extension == 'png' || extension == 'svg')) { + const graphElement = this._host.document.getElementById('canvas'); + const exportElement = graphElement.cloneNode(true); + this.applyStyleSheet(exportElement, 'view-grapher.css'); + exportElement.setAttribute('id', 'export'); + exportElement.removeAttribute('width'); + exportElement.removeAttribute('height'); + exportElement.style.removeProperty('opacity'); + exportElement.style.removeProperty('display'); + const backgroundElement = exportElement.querySelector('#background'); + const originElement = exportElement.querySelector('#origin'); + originElement.setAttribute('transform', 'translate(0,0) scale(1)'); + backgroundElement.removeAttribute('width'); + backgroundElement.removeAttribute('height'); + + const parentElement = graphElement.parentElement; + parentElement.insertBefore(exportElement, graphElement); + const size = exportElement.getBBox(); + parentElement.removeChild(exportElement); + parentElement.removeChild(graphElement); + parentElement.appendChild(graphElement); + + const delta = (Math.min(size.width, size.height) / 2.0) * 0.1; + const width = Math.ceil(delta + size.width + delta); + const height = Math.ceil(delta + size.height + delta); + originElement.setAttribute( + 'transform', + 'translate(' + delta.toString() + ', ' + delta.toString() + ') scale(1)' + ); + exportElement.setAttribute('width', width); + exportElement.setAttribute('height', height); + backgroundElement.setAttribute('width', width); + backgroundElement.setAttribute('height', height); + backgroundElement.setAttribute('fill', '#fff'); + + const data = new XMLSerializer().serializeToString(exportElement); + + if (extension === 'svg') { + const blob = new Blob([data], {type: 'image/svg'}); + this._host.export(file, blob); + } else if (extension === 'png') { + const imageElement = new Image(); + imageElement.onload = () => { + const max = Math.max(width, height); + const scale = max * 2.0 > 24000 ? 24000.0 / max : 2.0; + const canvas = this._host.document.createElement('canvas'); + canvas.width = Math.ceil(width * scale); + canvas.height = Math.ceil(height * scale); + const context = canvas.getContext('2d'); + context.scale(scale, scale); + context.drawImage(imageElement, 0, 0); + this._host.document.body.removeChild(imageElement); + canvas.toBlob(blob => { + if (blob) { + this._host.export(file, blob); + } else { + const err = new Error(); + err.name = 'Error exporting image.'; + err.message = 'Image may be too large to render as PNG.'; + this._host.exception(err, false); + this._host.error(err.name, err.message); + } + }, 'image/png'); + }; + imageElement.src = 'data:image/svg+xml;base64,' + window.btoa(unescape(encodeURIComponent(data))); + this._host.document.body.insertBefore(imageElement, this._host.document.body.firstChild); + } + } + } + + showModelProperties() { + if (this._model) { + const modelSidebar = new sidebar.ModelSidebar(this._host, this._model, this._activeGraph); + this._host.message('show-model-properties', modelSidebar.render()); + } + } + + showNodeProperties(node) { + if (node) { + const nodeSidebar = new sidebar.NodeSidebar(this._host, node); + // TODO: export + // nodeSidebar.on('export-tensor', (sender, tensor) => { + // this._host + // .require('./numpy') + // .then(numpy => { + // const defaultPath = tensor.name + // ? tensor.name.split('/').join('_').split(':').join('_').split('.').join('_') + // : 'tensor'; + // this._host.save('NumPy Array', 'npy', defaultPath, file => { + // try { + // const dataTypeMap = new Map([ + // ['float16', 'f2'], + // ['float32', 'f4'], + // ['float64', 'f8'], + // ['int8', 'i1'], + // ['int16', 'i2'], + // ['int32', 'i4'], + // ['int64', 'i8'], + // ['uint8', 'u1'], + // ['uint16', 'u2'], + // ['uint32', 'u4'], + // ['uint64', 'u8'], + // ['qint8', 'i1'], + // ['qint16', 'i2'], + // ['quint8', 'u1'], + // ['quint16', 'u2'] + // ]); + // let array = new numpy.Array(); + // array.shape = tensor.type.shape.dimensions; + // array.data = tensor.value; + // array.dataType = dataTypeMap.has(tensor.type.dataType) + // ? dataTypeMap.get(tensor.type.dataType) + // : tensor.type.dataType; + // const blob = new Blob([array.toBuffer()], {type: 'application/octet-stream'}); + // this._host.export(file, blob); + // } catch (error) { + // this.error('Error saving NumPy tensor.', error); + // } + // }); + // }) + // .catch(() => {}); + // }); + this._host.message('show-node-properties', {...nodeSidebar.render(), metadata: node.metadata}); + } + } + + showNodeDocumentation(node) { + const metadata = node.metadata; + if (metadata) { + const documentationSidebar = new sidebar.DocumentationSidebar(this._host, metadata); + this._host.message('show-node-documentation', documentationSidebar.render()); + } + } +}; + +class ModelError extends Error { + constructor(message, telemetry) { + super(message); + this.name = 'Error loading model.'; + this.telemetry = telemetry; + } +} + +class ModelContext { + constructor(context) { + this._context = context; + this._tags = new Map(); + this._entries = new Map(); + } + + request(file, encoding) { + return this._context.request(file, encoding); + } + + get identifier() { + return this._context.identifier; + } + + get buffer() { + return this._context.buffer; + } + + get text() { + if (!this._text) { + this._text = new TextDecoder('utf-8').decode(this.buffer); + } + return this._text; + } + + entries(extension) { + let entries = this._entries.get(extension); + if (!entries) { + entries = []; + try { + const buffer = this.buffer; + switch (extension) { + case 'zip': { + if (buffer && buffer.length > 2 && buffer[0] == 0x50 && buffer[1] == 0x4b) { + entries = new zip.Archive(buffer).entries; + } + break; + } + case 'tar': { + if (buffer.length >= 512) { + let sum = 0; + for (let i = 0; i < 512; i++) { + sum += i >= 148 && i < 156 ? 32 : buffer[i]; + } + let checksum = ''; + for (let i = 148; i < 156 && buffer[i] !== 0x00; i++) { + checksum += String.fromCharCode(buffer[i]); + } + checksum = parseInt(checksum, 8); + if (!isNaN(checksum) && sum == checksum) { + entries = new tar.Archive(buffer).entries; + } + } + break; + } + } + } catch (error) { + entries = []; + } + this._entries.set(extension, entries); + } + return entries; + } + + tags(extension) { + let tags = this._tags.get(extension); + if (!tags) { + tags = new Map(); + try { + switch (extension) { + case 'pbtxt': { + const b = this.buffer; + const length = b.length; + const signature = + (length >= 3 && b[0] === 0xef && b[1] === 0xbb && b[2] === 0xbf) || + (length >= 4 && b[0] === 0x00 && b[1] === 0x00 && b[2] === 0xfe && b[3] === 0xff) || + (length >= 4 && b[0] === 0xff && b[1] === 0xfe && b[2] === 0x00 && b[3] === 0x00) || + (length >= 4 && b[0] === 0x84 && b[1] === 0x31 && b[2] === 0x95 && b[3] === 0x33) || + (length >= 2 && b[0] === 0xfe && b[1] === 0xff) || + (length >= 2 && b[0] === 0xff && b[1] === 0xfe); + if ( + !signature && + b.subarray(0, Math.min(1024, length)).some(c => c < 7 || (c > 14 && c < 32)) + ) { + break; + } + const reader = prototxt.TextReader.create(this.text); + reader.start(false); + while (!reader.end(false)) { + const tag = reader.tag(); + tags.set(tag, true); + reader.skip(); + } + break; + } + case 'pb': { + const reader = new protobuf.Reader.create(this.buffer); + while (reader.pos < reader.len) { + const tagType = reader.uint32(); + tags.set(tagType >>> 3, tagType & 7); + try { + reader.skipType(tagType & 7); + } catch (err) { + tags = new Map(); + break; + } + } + break; + } + } + } catch (error) { + tags = new Map(); + } + this._tags.set(extension, tags); + } + return tags; + } +} + +class ArchiveContext { + constructor(entries, rootFolder, identifier, buffer) { + this._entries = {}; + if (entries) { + for (const entry of entries) { + if (entry.name.startsWith(rootFolder)) { + const name = entry.name.substring(rootFolder.length); + if (identifier.length > 0 && identifier.indexOf('/') < 0) { + this._entries[name] = entry; + } + } + } + } + this._identifier = identifier.substring(rootFolder.length); + this._buffer = buffer; + } + + request(file, encoding) { + const entry = this._entries[file]; + if (!entry) { + return Promise.reject(new Error('File not found.')); + } + const data = encoding ? new TextDecoder(encoding).decode(entry.data) : entry.data; + return Promise.resolve(data); + } + + get identifier() { + return this._identifier; + } + + get buffer() { + return this._buffer; + } +} + +class ArchiveError extends Error { + constructor(message) { + super(message); + this.name = 'Error loading archive.'; + } +} + +view.ModelFactoryService = class { + constructor(host) { + this._host = host; + this._extensions = []; + this.register('./onnx', ['.onnx', '.pb', '.pbtxt', '.prototxt']); + this.register('./mxnet', ['.mar', '.model', '.json', '.params']); + this.register('./keras', ['.h5', '.hd5', '.hdf5', '.keras', '.json', '.model', '.pb', '.pth']); + this.register('./coreml', ['.mlmodel']); + this.register('./caffe', ['.caffemodel', '.pbtxt', '.prototxt', '.pt']); + this.register('./caffe2', ['.pb', '.pbtxt', '.prototxt']); + this.register('./pytorch', [ + '.pt', + '.pth', + '.pt1', + '.pkl', + '.h5', + '.t7', + '.model', + '.dms', + '.tar', + '.ckpt', + '.bin', + '.pb', + '.zip' + ]); + this.register('./torch', ['.t7']); + this.register('./tflite', ['.tflite', '.lite', '.tfl', '.bin', '.pb', '.tmfile', '.h5', '.model', '.json']); + this.register('./tf', ['.pb', '.meta', '.pbtxt', '.prototxt', '.json', '.index', '.ckpt']); + this.register('./mediapipe', ['.pbtxt']); + this.register('./sklearn', ['.pkl', '.joblib', '.model', '.meta', '.pb']); + this.register('./cntk', ['.model', '.cntk', '.cmf', '.dnn']); + this.register('./paddle', ['.paddle', '__model__']); + this.register('./armnn', ['.armnn']); + this.register('./bigdl', ['.model', '.bigdl']); + this.register('./darknet', ['.cfg', '.model']); + this.register('./mnn', ['.mnn']); + this.register('./ncnn', ['.param', '.bin', '.cfg.ncnn', '.weights.ncnn']); + this.register('./tengine', ['.tmfile']); + this.register('./barracuda', ['.nn']); + this.register('./openvino', ['.xml', '.bin']); + this.register('./flux', ['.bson']); + this.register('./chainer', ['.npz', '.h5', '.hd5', '.hdf5']); + this.register('./dl4j', ['.zip']); + this.register('./mlnet', ['.zip']); + } + + register(id, extensions) { + for (const extension of extensions) { + this._extensions.push({extension: extension, id: id}); + } + } + + open(context) { + return this._openSignature(context).then(context => { + return this._openArchive(context).then(context => { + context = new ModelContext(context); + const identifier = context.identifier; + const extension = identifier.split('.').pop().toLowerCase(); + const modules = this._filter(context); + if (modules.length == 0) { + throw new ModelError("Unsupported file extension '." + extension + "'."); + } + const errors = []; + let match = false; + const nextModule = () => { + if (modules.length > 0) { + const id = modules.shift(); + return this._host.require(id).then(module => { + if (!module.ModelFactory) { + throw new ModelError("Failed to load module '" + id + "'."); + } + const modelFactory = new module.ModelFactory(); + if (!modelFactory.match(context)) { + return nextModule(); + } + match++; + return modelFactory + .open(context, this._host) + .then(model => { + return model; + }) + .catch(error => { + errors.push(error); + return nextModule(); + }); + }); + } else { + if (match) { + if (errors.length == 1) { + throw errors[0]; + } + throw new ModelError(errors.map(err => err.message).join('\n')); + } + const knownUnsupportedIdentifiers = new Set([ + 'natives_blob.bin', + 'v8_context_snapshot.bin', + 'snapshot_blob.bin', + 'image_net_labels.json', + 'package.json', + 'models.json', + 'LICENSE.meta', + 'input_0.pb', + 'output_0.pb' + ]); + const skip = knownUnsupportedIdentifiers.has(identifier); + const buffer = context.buffer; + const content = Array.from(buffer.subarray(0, Math.min(16, buffer.length))) + .map(c => (c < 16 ? '0' : '') + c.toString(16)) + .join(''); + throw new ModelError( + 'Unsupported file content (' + + content + + ") for extension '." + + extension + + "' in '" + + identifier + + "'.", + !skip + ); + } + }; + return nextModule(); + }); + }); + } + + _openArchive(context) { + let archive = null; + let extension; + let identifier = context.identifier; + let buffer = context.buffer; + + try { + extension = identifier.split('.').pop().toLowerCase(); + if (extension == 'gz' || extension == 'tgz') { + archive = new gzip.Archive(buffer); + if (archive.entries.length == 1) { + const entry = archive.entries[0]; + if (entry.name) { + identifier = entry.name; + } else { + identifier = identifier.substring(0, identifier.lastIndexOf('.')); + if (extension == 'tgz') { + identifier += '.tar'; + } + } + buffer = entry.data; + } + } + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + return Promise.reject(new ArchiveError(message.replace(/\.$/, '') + " in '" + identifier + "'.")); + } + + try { + extension = identifier.split('.').pop().toLowerCase(); + switch (extension) { + case 'tar': { + // handle .pth.tar + const torch = [0x8a, 0x0a, 0x6c, 0xfc, 0x9c, 0x46, 0xf9, 0x20, 0x6a, 0xa8, 0x50, 0x19]; + if ( + !buffer || + buffer.length < 14 || + buffer[0] != 0x80 || + !torch.every((v, i) => v == buffer[i + 2]) + ) { + archive = new tar.Archive(buffer); + } + break; + } + case 'zip': { + archive = new zip.Archive(buffer); + // PyTorch Zip archive + if ( + archive.entries.some(e => e.name.split('/').pop().split('\\').pop() === 'version') && + archive.entries.some(e => e.name.split('/').pop().split('\\').pop() === 'data.pkl') + ) { + return Promise.resolve(context); + } + // dl4j + if ( + archive.entries.some(e => e.name.split('/').pop().split('\\').pop() === 'coefficients.bin') && + archive.entries.some(e => e.name.split('/').pop().split('\\').pop() === 'configuration.json') + ) { + return Promise.resolve(context); + } + break; + } + } + } catch (error) { + const message = error && error.message ? error.message : error.toString(); + return Promise.reject(new ArchiveError(message.replace(/\.$/, '') + " in '" + identifier + "'.")); + } + + if (!archive) { + return Promise.resolve(context); + } + + try { + let folders = {}; + for (const entry of archive.entries) { + if (entry.name.indexOf('/') != -1) { + folders[entry.name.split('/').shift() + '/'] = true; + } else { + folders['/'] = true; + } + } + if (extension == 'tar') { + delete folders['PaxHeader/']; + } + let rootFolder = Object.keys(folders).length == 1 ? Object.keys(folders)[0] : ''; + rootFolder = rootFolder == '/' ? '' : rootFolder; + let matches = []; + let entries = archive.entries.slice(); + const nextEntry = () => { + if (entries.length > 0) { + const entry = entries.shift(); + if (entry.name.startsWith(rootFolder)) { + const identifier = entry.name.substring(rootFolder.length); + if (identifier.length > 0 && identifier.indexOf('/') < 0 && !identifier.startsWith('.')) { + const context = new ModelContext( + new ArchiveContext(null, rootFolder, entry.name, entry.data) + ); + let modules = this._filter(context); + const nextModule = () => { + if (modules.length > 0) { + const id = modules.shift(); + return this._host.require(id).then(module => { + if (!module.ModelFactory) { + throw new ArchiveError("Failed to load module '" + id + "'.", null); + } + const factory = new module.ModelFactory(); + if (factory.match(context)) { + matches.push(entry); + modules = []; + } + return nextModule(); + }); + } else { + return nextEntry(); + } + }; + return nextModule(); + } + } + return nextEntry(); + } else { + if (matches.length == 0) { + return Promise.resolve(context); + } + // MXNet + if ( + matches.length == 2 && + matches.some(e => e.name.endsWith('.params')) && + matches.some(e => e.name.endsWith('-symbol.json')) + ) { + matches = matches.filter(e => e.name.endsWith('.params')); + } + if (matches.length > 1) { + return Promise.reject(new ArchiveError('Archive contains multiple model files.')); + } + const match = matches[0]; + return Promise.resolve( + new ModelContext(new ArchiveContext(archive.entries, rootFolder, match.name, match.data)) + ); + } + }; + return nextEntry(); + } catch (error) { + return Promise.reject(new ArchiveError(error.message)); + } + } + + accept(identifier) { + identifier = identifier.toLowerCase(); + for (const extension of this._extensions) { + if (identifier.endsWith(extension.extension)) { + return true; + } + } + if ( + identifier.endsWith('.zip') || + identifier.endsWith('.tar') || + identifier.endsWith('.tar.gz') || + identifier.endsWith('.tgz') + ) { + return true; + } + return false; + } + + _filter(context) { + const identifier = context.identifier.toLowerCase(); + const list = this._extensions.filter(entry => identifier.endsWith(entry.extension)).map(extry => extry.id); + return Array.from(new Set(list)); + } + + _openSignature(context) { + const buffer = context.buffer; + if (context.buffer.length === 0) { + return Promise.reject(new ModelError('File has no content.', true)); + } + const list = [ + {name: 'ELF executable', value: '\x7FELF'}, + {name: 'Git LFS header', value: 'version https://git-lfs.github.com/spec/v1\n'}, + {name: 'Git LFS header', value: 'oid sha256:'}, + {name: 'HTML markup', value: ''}, + {name: 'HTML markup', value: ''}, + {name: 'HTML markup', value: ''}, + {name: 'HTML markup', value: '\n\n\n\n\n'}, + {name: 'HTML markup', value: '\n\n\n\n\n\n'}, + {name: 'Unity metadata', value: 'fileFormatVersion:'}, + {name: 'Vulkan SwiftShader ICD manifest', value: '{"file_format_version": "1.0.0", "ICD":'}, + {name: 'StringIntLabelMapProto data', value: 'item {\r\n id:'}, + {name: 'StringIntLabelMapProto data', value: 'item {\r\n name:'}, + {name: 'StringIntLabelMapProto data', value: 'item {\n id:'}, + {name: 'StringIntLabelMapProto data', value: 'item {\n name:'}, + {name: 'Python source code', value: 'import sys, types, os;'} + ]; + for (const item of list) { + if ( + buffer.length >= item.value.length && + buffer.subarray(0, item.value.length).every((v, i) => v === item.value.charCodeAt(i)) + ) { + return Promise.reject(new ModelError('Invalid file content. File contains ' + item.name + '.', true)); + } + } + return Promise.resolve(context); + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.View = view.View; + module.exports.ModelFactoryService = view.ModelFactoryService; +} diff --git a/frontend/packages/core/public/netron/zip.js b/frontend/packages/core/public/netron/zip.js new file mode 100644 index 00000000..9f15320a --- /dev/null +++ b/frontend/packages/core/public/netron/zip.js @@ -0,0 +1,502 @@ +/* jshint esversion: 6 */ +/* eslint "indent": [ "error", 4, { "SwitchCase": 1 } ] */ +/* global pako */ + +var zip = zip || {}; + +zip.Archive = class { + + constructor(buffer) { + this._entries = []; + if (buffer.length < 4 || buffer[0] != 0x50 || buffer[1] != 0x4B) { + throw new zip.Error('Invalid ZIP archive.'); + } + let reader = null; + for (let i = buffer.length - 4; i >= 0; i--) { + if (buffer[i] === 0x50 && buffer[i + 1] === 0x4B && buffer[i + 2] === 0x05 && buffer[i + 3] === 0x06) { + reader = new zip.Reader(buffer, i + 4, buffer.length); + break; + } + } + if (!reader) { + throw new zip.Error('End of central directory not found.'); + } + reader.skip(12); + reader.position = reader.uint32(); // central directory offset + while (reader.match([ 0x50, 0x4B, 0x01, 0x02 ])) { + this._entries.push(new zip.Entry(reader)); + } + } + + get entries() { + return this._entries; + } +}; + +zip.Entry = class { + + constructor(reader) { + reader.uint16(); // version made by + reader.skip(2); // version needed to extract + this._flags = reader.uint16(); + if ((this._flags & 1) == 1) { + throw new zip.Error('Encrypted entries not supported.'); + } + this._compressionMethod = reader.uint16(); + reader.uint32(); // date + reader.uint32(); // crc32 + this._compressedSize = reader.uint32(); + this._size = reader.uint32(); + let nameLength = reader.uint16(); // file name length + let extraDataLength = reader.uint16(); + const commentLength = reader.uint16(); + reader.uint16(); // disk number start + reader.uint16(); // internal file attributes + reader.uint32(); // external file attributes + const localHeaderOffset = reader.uint32(); + reader.skip(nameLength); + reader.skip(extraDataLength); + reader.bytes(commentLength); // comment + const position = reader.position; + reader.position = localHeaderOffset; + if (!reader.match([ 0x50, 0x4B, 0x03, 0x04 ])) { + throw new zip.Error('Invalid local file header signature.'); + } + reader.skip(22); + nameLength = reader.uint16(); + extraDataLength = reader.uint16(); + const nameBuffer = reader.bytes(nameLength); + this._name = ''; + for (const c of nameBuffer) { + this._name += String.fromCharCode(c); + } + reader.skip(extraDataLength); + this._compressedData = reader.bytes(this._compressedSize); + reader.position = position; + } + + get name() { + return this._name; + } + + get data() { + if (!this._data) { + + switch (this._compressionMethod) { + case 0: // Stored + if (this._size != this._compressedSize) { + throw new zip.Error('Invalid compression size.'); + } + this._data = new Uint8Array(this._compressedData.length); + this._data.set(this._compressedData); + break; + case 8: // Deflate + this._data = new zip.Inflater().inflateRaw(this._compressedData); + if (this._size != this._data.length) { + throw new zip.Error('Invalid uncompressed size.'); + } + break; + default: + throw new zip.Error('Invalid compression method.'); + } + + delete this._size; + delete this._compressedData; + } + return this._data; + } + +}; + +zip.HuffmanTree = class { + + constructor() { + this.table = new Uint16Array(16); + this.symbol = new Uint16Array(288); + zip.HuffmanTree._offsets = zip.HuffmanTree._offsets || new Uint16Array(16); + } + + build(lengths, offset, count) { + for (let i = 0; i < 16; ++i) { + this.table[i] = 0; + } + for (let i = 0; i < count; ++i) { + this.table[lengths[offset + i]]++; + } + this.table[0] = 0; + let sum = 0; + for (let i = 0; i < 16; i++) { + zip.HuffmanTree._offsets[i] = sum; + sum += this.table[i]; + } + for (let i = 0; i < count; i++) { + if (lengths[offset + i]) { + this.symbol[zip.HuffmanTree._offsets[lengths[offset + i]]++] = i; + } + } + } + + static initialize() { + if (!zip.HuffmanTree.staticLiteralLengthTree) { + zip.HuffmanTree.staticLiteralLengthTree = new zip.HuffmanTree(); + zip.HuffmanTree.staticLiteralLengthTree.table = new Uint8Array([ 0, 0, 0, 0, 0, 0, 0, 24, 152, 112, 0, 0, 0, 0, 0, 0 ]); + for (let i = 0; i < 24; ++i) { + zip.HuffmanTree.staticLiteralLengthTree.symbol[i] = 256 + i; + } + for (let i = 0; i < 144; ++i) { + zip.HuffmanTree.staticLiteralLengthTree.symbol[24 + i] = i; + } + for (let i = 0; i < 8; ++i) { + zip.HuffmanTree.staticLiteralLengthTree.symbol[24 + 144 + i] = 280 + i; + } + for (let i = 0; i < 112; ++i) { + zip.HuffmanTree.staticLiteralLengthTree.symbol[24 + 144 + 8 + i] = 144 + i; + } + zip.HuffmanTree.staticDistanceTree = new zip.HuffmanTree(); + zip.HuffmanTree.staticDistanceTree.table = new Uint8Array([ 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]); + zip.HuffmanTree.staticDistanceTree.symbol = new Uint8Array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 ]); + } + } +}; + +zip.Inflater = class { + + inflateRaw(data) { + + if (typeof process === 'object' && typeof process.versions == 'object' && typeof process.versions.node !== 'undefined') { + return require('zlib').inflateRawSync(data); + } + if (typeof pako !== 'undefined') { + return pako.inflateRaw(data); + } + + zip.Inflater.initilize(); + zip.HuffmanTree.initialize(); + + let reader = new zip.BitReader(data); + let output = new zip.Ouptut(); + + const literalLengthTree = new zip.HuffmanTree(); + const distanceTree = new zip.HuffmanTree(); + + let type; + do { + type = reader.bits(3); + switch (type >>> 1) { + case 0: // uncompressed block + this._inflateUncompressedBlock(reader, output); + break; + case 1: // block with fixed huffman trees + this._inflateBlockData(reader, output, zip.HuffmanTree.staticLiteralLengthTree, zip.HuffmanTree.staticDistanceTree); + break; + case 2: // block with dynamic huffman trees + this._decodeTrees(reader, literalLengthTree, distanceTree); + this._inflateBlockData(reader, output, literalLengthTree, distanceTree); + break; + default: + throw new zip.Error('Unknown block type.'); + } + } while ((type & 1) == 0); + + return output.merge(); + } + + _inflateUncompressedBlock(reader, output) { + while (reader.data > 8) { + reader.position--; + reader.data -= 8; + } + reader.data = 0; + const length = reader.uint16(); + const inverseLength = reader.uint16(); + if (length !== (~inverseLength & 0x0000ffff)) { + throw new zip.Error('Invalid uncompressed block length.'); + } + + const block = reader.bytes(length); + output.push(block); + + if (length > 32768) { + output.buffer.set(block.subarray(block.length - 32768, block.length), 0); + output.position = 32768; + } + else { + output.reset(); + output.buffer.set(block, output.position); + output.position += block.length; + } + } + + _decodeTrees(reader, lengthTree, distanceTree) { + + const hlit = reader.bits(5) + 257; + const hdist = reader.bits(5) + 1; + const lengthCount = reader.bits(4) + 4; + for (let i = 0; i < 19; i++) { + zip.Inflater._lengths[i] = 0; + } + for (let j = 0; j < lengthCount; j++) { + zip.Inflater._lengths[zip.Inflater._codeOrder[j]] = reader.bits(3); + } + zip.Inflater._codeTree.build(zip.Inflater._lengths, 0, 19); + let length; + for (let position = 0; position < hlit + hdist;) { + const symbol = reader.symbol(zip.Inflater._codeTree); + switch (symbol) { + case 16: { + const prev = zip.Inflater._lengths[position - 1]; + for (length = reader.bits(2) + 3; length; length--) { + zip.Inflater._lengths[position++] = prev; + } + break; + } + case 17: { + for (length = reader.bits(3) + 3; length; length--) { + zip.Inflater._lengths[position++] = 0; + } + break; + } + case 18: { + for (length = reader.bits(7) + 11; length; length--) { + zip.Inflater._lengths[position++] = 0; + } + break; + } + default: { + zip.Inflater._lengths[position++] = symbol; + break; + } + } + } + lengthTree.build(zip.Inflater._lengths, 0, hlit); + distanceTree.build(zip.Inflater._lengths, hlit, hdist); + } + + _inflateBlockData(reader, output, lengthTree, distanceTree) { + const buffer = output.buffer; + let position = output.position; + let start = position; + for (;;) { + if (position > 62464) { + output.position = position; + output.push(new Uint8Array(buffer.subarray(start, position))); + position = output.reset(); + start = position; + } + let symbol = reader.symbol(lengthTree); + if (symbol === 256) { + output.position = position; + output.push(new Uint8Array(buffer.subarray(start, output.position))); + output.reset(); + return; + } + if (symbol < 256) { + buffer[position++] = symbol; + } + else { + symbol -= 257; + const length = reader.bitsBase(zip.Inflater._lengthBits[symbol], zip.Inflater._lengthBase[symbol]); + const distance = reader.symbol(distanceTree); + let offset = position - reader.bitsBase(zip.Inflater._distanceBits[distance], zip.Inflater._distanceBase[distance]); + for (let i = 0; i < length; i++) { + buffer[position++] = buffer[offset++]; + } + } + } + } + + static initilize() { + if (zip.HuffmanTree.staticLiteralLengthTree) { + return; + } + zip.Inflater._codeOrder = [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ]; + zip.Inflater._codeTree = new zip.HuffmanTree(); + zip.Inflater._lengths = new Uint8Array(288 + 32); + zip.Inflater._lengthBits = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 6 ]; + zip.Inflater._lengthBase = [ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 323 ]; + zip.Inflater._distanceBits = [ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 ]; + zip.Inflater._distanceBase = [ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577 ]; + } + +}; + +zip.Ouptut = class { + + constructor() { + this._blocks = []; + this.buffer = new Uint8Array(65536); + this.position = 0; + } + + reset() { + if (this.position > 32768) { + this.buffer.set(this.buffer.subarray(this.position - 32768, this.position), 0); + this.position = 32768; + } + return this.position; + } + + push(block) { + this._blocks.push(block); + } + + merge() { + let size = 0; + for (const block1 of this._blocks) { + size += block1.length; + } + let output = new Uint8Array(size); + let offset = 0; + for (const block2 of this._blocks) { + output.set(block2, offset); + offset += block2.length; + } + return output; + } + +}; + +zip.BitReader = class { + + constructor(buffer) { + this.buffer = buffer; + this.position = 0; + this.data = 0; + this.value = 0; + } + + bits(count) { + while (this.data < 24) { + this.value |= this.buffer[this.position++] << this.data; + this.data += 8; + } + const value = this.value & (0xffff >>> (16 - count)); + this.value >>>= count; + this.data -= count; + return value; + } + + bitsBase(count, base) { + if (count == 0) { + return base; + } + while (this.data < 24) { + this.value |= this.buffer[this.position++] << this.data; + this.data += 8; + } + const value = this.value & (0xffff >>> (16 - count)); + this.value >>>= count; + this.data -= count; + return value + base; + } + + bytes(size) { + const value = this.buffer.subarray(this.position, this.position + size); + this.position += size; + return value; + } + + uint16() { + const value = this.buffer[this.position] | (this.buffer[this.position + 1] << 8); + this.position += 2; + return value; + } + + symbol(tree) { + while (this.data < 24) { + this.value |= this.buffer[this.position++] << this.data; + this.data += 8; + } + let sum = 0; + let current = 0; + let length = 0; + let value = this.value; + const table = tree.table; + do { + current = (current << 1) + (value & 1); + value >>>= 1; + length++; + sum += table[length]; + current -= table[length]; + } while (current >= 0); + this.value = value; + this.data -= length; + return tree.symbol[sum + current]; + } + +}; + +zip.Reader = class { + + constructor(buffer, start, end) { + this._buffer = buffer; + this._position = start; + this._end = end; + } + + match(signature) { + if (this._position + signature.length <= this._end) { + for (let i = 0; i < signature.length; i++) { + if (this._buffer[this._position + i] != signature[i]) { + return false; + } + } + } + this._position += signature.length; + return true; + } + + get position() { + return this._position; + } + + set position(value) { + this._position = value >= 0 ? value : this._end + value; + } + + peek() { + return this._position < this._end; + } + + skip(size) { + if (this._position + size > this._end) { + throw new zip.Error('Data not available.'); + } + this._position += size; + } + + bytes(size) { + if (this._position + size > this._end) { + throw new zip.Error('Data not available.'); + } + size = size === undefined ? this._end : size; + const data = this._buffer.subarray(this._position, this._position + size); + this._position += size; + return data; + } + + uint16() { + if (this._position + 2 > this._end) { + throw new zip.Error('Data not available.'); + } + const value = this._buffer[this._position] | (this._buffer[this._position + 1] << 8); + this._position += 2; + return value; + } + + uint32() { + return this.uint16() | (this.uint16() << 16); + } +}; + +zip.Error = class extends Error { + constructor(message) { + super(message); + this.name = 'ZIP Error'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.Archive = zip.Archive; + module.exports.Inflater = zip.Inflater; +} \ No newline at end of file diff --git a/frontend/packages/core/public/style/fonts/vdl-icon.svg b/frontend/packages/core/public/style/fonts/vdl-icon.svg index e67d2df7..21ecdbdc 100755 --- a/frontend/packages/core/public/style/fonts/vdl-icon.svg +++ b/frontend/packages/core/public/style/fonts/vdl-icon.svg @@ -1 +1,34 @@ - \ No newline at end of file + + + +Generated by IcoMoon + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/frontend/packages/core/public/style/fonts/vdl-icon.ttf b/frontend/packages/core/public/style/fonts/vdl-icon.ttf index f7548e4f58885f53a6222c4ecfd779b3d8c36792..a999f966c7fbdb30009fc6ed6cd149713842b360 100755 GIT binary patch delta 2067 zcmb_cU2GIp6uxJ6XLpA(vpX}pJF}f_?at6$nBBH>cXzsM+q7LpgVNH5-=cu5v``6! z@)N+2xFv!Q@*uQ6`coPbhV4@G+cJ8_7 z+;hHj?%D6&xy+(lBR~jgA#=n>oUzS4`LGgr0xciow`cE6bynN<&D(@P67?c{!uYUd;A=;<7-#d*4VUXX6`d!plP0t)ZHJv7} z5u$gXK6~)+-b(eU?}pz;!v!?-%~Vg#l1}m^>NimDI8>dPx_WvO2J5#l$^7i$W5=;j z9IbO~pZNUT#pNN;=3xV9S4QAmcoS~_vFy9fWosfl|y6H}8DJ6=g=dTf{rgtSPoCh~8ptJWaQ$S8w3!XS?zWX`rnO_TjR zK4Rd(f5L;WBID#JAz-oN;^AqG&8!6lHsjH#>mBevH$!;18&{W9QBs%K36@kzR2SK5 z{(tDolB6gSf{W@U26^e9I#k0}KhmOq?~tES!%wh7Dyfq9$QOikQYY&5q(tv^t7LV9 zI{+?4mf%0G7!$g|a?CQAfgqJ?V3}xginQ2ULbpO6lwmEiE79E!s8-y(68x+)Nyde<={i8J6hz?Sq_nBJrsOr&}HhBzR zwJLG~T4klR%P*>ar`=IN62ohB$PEER!%~!M)AdRx!0L6au>_ZLlPNALL4UhtZOmr7 z07q9gyV0`R+ciBU2$pqneEc~yjomkS+^yBG9oV(|{AwKMckepz_w_P>lF${Ol1L~o zGIS0)huR%m#)X;^!^%)jQdNoTtmt~%W3BQ26vuPs<8Cr5OUfeFhk02Ny$!1xYgF~@ z`1ncF@-lm+*05HtS*ow}Xc1mP>}i8}KY`mMOo*Dw$79iel`_+AvA00GeV0|kQ19+! zGu$>*CRGD35{ql{#6$(dh2M@7p<{uk~)DNdf|8-XQv5U)xD zst0(yFYKI5vDtM8xIGa~i@eZd*X$t$$B=FJB=oK2a-~u(Z`BjXB*(S~+18+KI~?zW6D;Bc zH{iOP4I_x$REBf8g0FrtAcjH`Kf?zCJTI|jR*T45mKQ^Kad|WbxR;;o@86ct^~AQm z{&n2SWO58^6%>K%tL5|0#9~?`qA4oN6oAg_WRn-$9ZVzH-bzRU$>p_Hd% z$z%(>#Zn&%U~Un!@qDgcW3|GW9NR#9Pzp3T!U*~O8`{wG;elUC3%+7VXY%cx$|r1} z$%S7cB}gvhmqlzFWk#|srZX@w$^iKx>50V! zKw1FE2hkkqIhARV7kE8@{0asJ{)mj!#1tukNg+Tb7l86+89)I}JJwJJ20;!WUnL{A zq{6#^Asfio0rEp~@{=7C<(DZR0SYt#1$1%~D+(Co7`6lXYk+)(yu{qpL)Aflfc!H+ zOC}ZM7ncAX3IxIlAbAF6<{uLe*fR=DV)SMd*zCjD%C8La4-n5jxVt=_-{va=Hw#dJ mf#KF}wbw9u@(XcuM$yR%5+Rc(Na%2YgFuvF>*gyGXBYvk&OaXj diff --git a/frontend/packages/core/public/style/fonts/vdl-icon.woff b/frontend/packages/core/public/style/fonts/vdl-icon.woff index 3b3941c5fe074785c4750e93cccead434b53eac4..67326818a45733d30eb8ccd95ebe0230278f1dd2 100755 GIT binary patch delta 2160 zcmb_cO>7%Q6rQ*C+TJE>dw0FwwbwPZx3OE>Np@^+TqjZDK&cANpXgr-Z6P#iLzT4Y zkJ_S2EEO#WdVn?@IPjB3l@K?61Sf=|3IQzciNF5sUsGsA+g9AKXCn0R zd#7ejo`lY4c$z(+^l^(GkyjgVmj{EHKdJve`I`S5^b9> z_+=*7&OUQ@_UMU|_&-aleje03UT&@x`azo;gl(XG_6}SKIe75TqUVNZk%UPv*^dCJ zv`>vhOEgvNvdbBhDqWR6sGy$hfNaK0`-9*|4=o8=v{dY>_))_ZeX}%+hi1CrXqOw; zO2vkw8fjefQ|`n1Sy53$xWMMOoU~zNTGnJT=g0O;OzasupTWWO;WvA@X*f{`n(U$@#A`U0YDZpt8XJY(WW%$~@C9{}0t#2+DF0!A0c?LOSA=M=jmMmGd$# zc?nidA>-sD@*N><)QY&Zs?e1xR?H5td%>niD+dKt#+?o@t#lRAK0jq7LW@xW6s7kxh_1-RtOg2lat;QfgPaTAM{~=YOs+p@|U-@{^vuzf07n# zV>o`kGw{{QI#-h_keE9o?MQfT5P}W{bGlD%5%1iRRL}$Ez>mT=!f$J$Yr0!9=!MuM KL9Tx)FTuYJ;(+h~ delta 322 zcmaE3dPZHW+~3WOfsp|SgwHT=gXsna#>p3@#3t$p*Bd40CKfO-FlGQHLO@tN?9%k~ z#A1-x9w46siUrbhD${^sM;I9RBS1Lv0H1yBr#A$E8dFl6WCCj-^_37!E8>VUBPGUX$=i4{PL1%Ckf3Scb9usts^ zH`1^N-0qjP{HIlYJPy83i`aU~J`A26-Ha zXCK^M9?x&{m4TZDD8RsQYq#2K7(LlU!kkfba*jmE = T & {uid: NodeUID}; -type WithUIDMap = T extends (infer U)[] ? WithUID[] : WithUID; - -type NodeFinder = (type: NodeType, uid: NodeUID) => undefined | Node; -const OP_NODE_PREFIX = 'opNode_'; - -/* misc */ -const genNodeUID = (type: NodeType, id: number | string) => { - switch (type) { - case NodeType.Op: - return `${OP_NODE_PREFIX}${id}`; - case NodeType.Input: - case NodeType.Output: - return `${id}`; - } -}; -const assignNodeUID = (type: NodeType, node: T[]): WithUID[] => { - const process = (node: T, i: number) => { - const uid = genNodeUID(type, i); - - return {...node, uid}; - }; - return node.map(process); -}; - -const createNodeFinder = (graph?: Graph) => { - if (!graph) { - return () => undefined; - } - - const reverseInputIdx = graph.input.reduce<{[k: string]: number}>((memo, input, i) => { - memo[genNodeUID(NodeType.Input, input.name)] = i; - return memo; - }, {}); - const reverseOutputIdx = graph.output.reduce<{[k: string]: number}>((memo, input, i) => { - memo[genNodeUID(NodeType.Output, input.name)] = i; - return memo; - }, {}); - - return (type: NodeType, nodeUID: NodeUID) => { - switch (type) { - case NodeType.Input: { - const idx = reverseInputIdx[nodeUID]; - return idx == undefined ? undefined : graph.input[idx]; - } - case NodeType.Output: { - const idx = reverseOutputIdx[nodeUID]; - return idx == undefined ? undefined : graph.output[idx]; - } - case NodeType.Op: { - const idx = +nodeUID.replace(OP_NODE_PREFIX, ''); - return graph.node[idx]; - } - } - }; -}; - -const relationPush = ( - nodeRelationMapping: NodeRelationMapping, - nodeUID: NodeUID, - key: keyof NodeRelation, - value: NodeUID -) => { - const leaf = nodeRelationMapping[nodeUID] || {input: [], output: []}; - leaf[key].push(value); - nodeRelationMapping[nodeUID] = leaf; -}; - -const traverseRelation = ( - nodeMapping: NodeRelationMapping, - process: (bridge: NodeUID, inputTo: NodeUID, outputTo: NodeUID) => void -) => { - for (const [nodeUID, relations] of Object.entries(nodeMapping)) { - const {input, output} = relations; - - input.forEach(inputTo => { - output.forEach(outputTo => { - process(nodeUID, inputTo, outputTo); - }); - }); - } -}; - -const buildNodeRelationMapping = (nodeList: WithUIDMap) => { - return nodeList.reduce((memo, node) => { - const uid = node.uid; - // reverse - (node.output || []).forEach(v => relationPush(memo, v, 'input', uid)); - (node.input || []).forEach(v => relationPush(memo, v, 'output', uid)); - - return memo; - }, {}); -}; - -const expandRelations = (nodeMapping: NodeRelationMapping) => { - const briefLayer: {nodes: DagNode[]; edges: DagEdge[]} = {nodes: [], edges: []}; - // a tmp node the middle man between input & output - const detailLayer: {nodes: DagNode[]; edges: DagEdge[]} = {nodes: [], edges: []}; - - traverseRelation(nodeMapping, (bridge, inputTo, outputTo) => { - detailLayer.nodes.push({ - key: bridge, - label: bridge, - shape: 'diamond', - class: 'output', - type: NodeType.Output - }); - - detailLayer.edges.push([inputTo, bridge]); - detailLayer.edges.push([bridge, outputTo]); - briefLayer.edges.push([inputTo, outputTo]); - }); - - return { - briefLayer, - detailLayer - }; -}; - -const extractInputLayer = (nodeRelationMapping: NodeRelationMapping, findNode: NodeFinder) => { - const nodes: DagNode[] = []; - const edges: DagEdge[] = []; - for (const [nodeUID, relations] of Object.entries(nodeRelationMapping)) { - if (relations.input.length !== 0) { - continue; - } - const sepIdx = nodeUID.indexOf('@'); - const inputNodeUID = sepIdx > 0 ? nodeUID.slice(0, sepIdx) : nodeUID; - const inputNode = findNode(NodeType.Input, inputNodeUID) as InputNode; - nodes.push({ - key: inputNodeUID, - type: NodeType.Input, - label: ` -id: ${inputNode.name} -type: ${inputNode.data_type} -dims: ${inputNode.shape.join(' × ')} -`, - shape: 'rect', - class: 'input' - }); - - relations.output.forEach(o => edges.push([inputNodeUID, o])); - } - - return {nodes, edges}; -}; - -const extractOutputLayer = (nodeRelationMapping: NodeRelationMapping) => { - const nodes: DagNode[] = []; - const edges: DagEdge[] = []; - for (const [nodeUID, relations] of Object.entries(nodeRelationMapping)) { - if (relations.output.length !== 0) { - continue; - } - nodes.push({ - key: nodeUID, - type: NodeType.Output, - label: nodeUID, - shape: 'diamond', - class: 'output' - }); - - for (const inputNode of relations.input) { - edges.push([nodeUID, inputNode]); - } - } - - return { - nodes, - edges - }; -}; - -export const collectDagFacts = (graph?: Graph) => { - const findNode = createNodeFinder(graph); - const nodeList = assignNodeUID(NodeType.Op, graph ? graph.node : []); - const nodeRelationMapping = buildNodeRelationMapping(nodeList); - - const inputLayer = extractInputLayer(nodeRelationMapping, findNode); - const outputLayer = extractOutputLayer(nodeRelationMapping); - - const backboneNodes = nodeList.map(n => ({ - key: n.uid, - type: NodeType.Op, - label: n.opType, - shape: 'rect', - class: 'operator' - })); - - const {briefLayer: bl, detailLayer: dl} = expandRelations(nodeRelationMapping); - const briefLayer = {nodes: backboneNodes, edges: bl.edges}; - const detailLayer = {nodes: briefLayer.nodes.concat(dl.nodes), edges: dl.edges}; - - return { - briefLayer, - detailLayer, - inputLayer, - outputLayer, - findNode - }; -}; diff --git a/frontend/packages/core/resource/graphs/index.ts b/frontend/packages/core/resource/graphs/index.ts deleted file mode 100644 index e00e03e8..00000000 --- a/frontend/packages/core/resource/graphs/index.ts +++ /dev/null @@ -1,2 +0,0 @@ -export * from './types'; -export * from './collectDagFacts'; diff --git a/frontend/packages/core/resource/graphs/types.ts b/frontend/packages/core/resource/graphs/types.ts index 4881dc12..2341adc2 100644 --- a/frontend/packages/core/resource/graphs/types.ts +++ b/frontend/packages/core/resource/graphs/types.ts @@ -1,44 +1,76 @@ -export type NodeUID = string; -export enum NodeType { - Input, - Output, - Op -} - -export type InputNode = { - data_type: string; +export type Property = { + name?: string; + value: string; + type?: 'raw' | 'code'; + documentation?: boolean; +}; + +export type Argument = { + name?: string; + value: string; + children?: Property[]; +}; + +export type NameValues = { name: string; - shape: string[]; + values: T[]; }; -export type OutputNode = { - data_type: string; +export type NameValueGroup = { name: string; - shape: string[]; + properties: NameValues[]; +}; + +export type Properties = { + properties?: NameValues[]; + groups?: NameValueGroup[]; + metadata?: unknown; }; -export type OpNode = { - input: NodeUID[]; - output: NodeUID[]; - opType: string; +export type SearchItem = { + type: 'input' | 'output' | 'node' | 'initializer'; + name: string; + id: string; }; -export type Node = InputNode | OutputNode | OpNode; -export type TypedNode = - | (InputNode & {type: NodeType.Input}) - | (OutputNode & {type: NodeType.Output}) - | (OpNode & {type: NodeType.Op}); +export type SearchResult = { + text: string; + result: SearchItem[]; +}; -export type Edge = { - source: string; - target: string; - label: string; +type IO = { + name: string; + type: string; + option: string; + description: string; }; -export interface Graph { - input: InputNode[]; - output: OutputNode[]; +export type Documentation = { name: string; - node: OpNode[]; - edges: Edge[]; -} + summary?: string; + description?: string; + attributes?: { + name: string; + type: string; + description: string; + }[]; + inputs?: IO[]; + inputs_range?: string; + outputs?: IO[]; + outputs_range?: string; + type_constraints?: { + type_param_str: string; + allowed_type_strs: string[]; + description: string; + }[]; + examples?: { + code: string; + summary: string; + }[]; + references?: { + description: string; + }[]; + domain?: string; + since_version?: string; + support_level?: string; +}; diff --git a/frontend/packages/core/utils/style.ts b/frontend/packages/core/utils/style.ts index 30d12250..efdffb42 100644 --- a/frontend/packages/core/utils/style.ts +++ b/frontend/packages/core/utils/style.ts @@ -19,6 +19,8 @@ export const rem = (pxval: string | number): string => polished.rem(pxval, fontS export const em = (pxval: string | number, base?: string | number): string => polished.em(pxval, base || fontSize); export const half = (value: string | number): string => math(`(${value}) / 2`); export const headerHeight = rem(60); +export const contentMargin = rem(20); +export const contentHeight = `calc(100vh - ${math(`${contentMargin} * 2 + ${headerHeight}`)})`; export const asideWidth = rem(260); export const borderRadius = '4px'; export const progressSpinnerSize = '20px'; diff --git a/frontend/packages/i18n/package.json b/frontend/packages/i18n/package.json index d72385ec..ae1d1457 100644 --- a/frontend/packages/i18n/package.json +++ b/frontend/packages/i18n/package.json @@ -37,23 +37,23 @@ "dependencies": { "detect-node": "2.0.4", "hoist-non-react-statics": "3.3.2", - "i18next": "19.4.4", - "i18next-browser-languagedetector": "4.1.1", - "i18next-fs-backend": "1.0.2", - "i18next-http-backend": "1.0.8", - "i18next-http-middleware": "1.0.4", + "i18next": "19.4.5", + "i18next-browser-languagedetector": "4.2.0", + "i18next-fs-backend": "1.0.4", + "i18next-http-backend": "1.0.15", + "i18next-http-middleware": "2.1.0", "path-match": "1.2.4", "prop-types": "15.7.2", - "react-i18next": "11.4.0", + "react-i18next": "11.5.0", "url": "0.11.0" }, "devDependencies": { "@types/express": "4.17.6", "@types/hoist-non-react-statics": "3.3.1", - "@types/node": "13.13.5", - "@types/react": "16.9.34", - "@types/react-dom": "16.9.7", - "typescript": "3.8.3" + "@types/node": "14.0.10", + "@types/react": "16.9.35", + "@types/react-dom": "16.9.8", + "typescript": "3.9.3" }, "peerDependencies": { "express": "^4.17.1", diff --git a/frontend/packages/i18n/src/hocs/app-with-translation.tsx b/frontend/packages/i18n/src/hocs/app-with-translation.tsx index 017fc929..b15d7352 100644 --- a/frontend/packages/i18n/src/hocs/app-with-translation.tsx +++ b/frontend/packages/i18n/src/hocs/app-with-translation.tsx @@ -38,6 +38,7 @@ type I18nRes = { }; }; +// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types export const appWithTranslation = function (this: NextI18Next, WrappedComponent: any) { const WrappedComponentWithSSR = withSSR()(WrappedComponent); const {config, i18n} = this; diff --git a/frontend/packages/i18n/src/hocs/with-internals.tsx b/frontend/packages/i18n/src/hocs/with-internals.tsx index ed2ffd3b..d8208601 100644 --- a/frontend/packages/i18n/src/hocs/with-internals.tsx +++ b/frontend/packages/i18n/src/hocs/with-internals.tsx @@ -3,6 +3,7 @@ import {NextI18NextInternals} from '../../types'; import React from 'react'; +// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types export const withInternals = (WrappedComponent: any, config: NextI18NextInternals) => { class WithInternals extends React.Component { static displayName = `withNextI18NextInternals(${ diff --git a/frontend/packages/mock/data/components.ts b/frontend/packages/mock/data/components.ts index ca5eb431..d28f03f1 100644 --- a/frontend/packages/mock/data/components.ts +++ b/frontend/packages/mock/data/components.ts @@ -1 +1 @@ -export default ['embeddings', 'scalar', 'image']; +export default ['embeddings', 'scalar', 'image', 'graph']; diff --git a/frontend/packages/mock/package.json b/frontend/packages/mock/package.json index 2b8d83aa..c7176e1a 100644 --- a/frontend/packages/mock/package.json +++ b/frontend/packages/mock/package.json @@ -37,9 +37,9 @@ }, "devDependencies": { "@types/express": "4.17.6", - "@types/faker": "4.1.11", - "@types/node": "13.13.5", - "typescript": "3.8.3" + "@types/faker": "4.1.12", + "@types/node": "14.0.10", + "typescript": "3.9.3" }, "peerDependencies": { "express": "^4.17.1" diff --git a/frontend/packages/server/ecosystem.config.js b/frontend/packages/server/ecosystem.config.js index 929915bb..3ba1dbb9 100644 --- a/frontend/packages/server/ecosystem.config.js +++ b/frontend/packages/server/ecosystem.config.js @@ -11,9 +11,9 @@ module.exports = { instances: 'max', autorestart: true, watch: false, - exec_mode: 'cluster', // eslint-disable-line @typescript-eslint/camelcase - max_memory_restart: '2G', // eslint-disable-line @typescript-eslint/camelcase - wait_ready: true, // eslint-disable-line @typescript-eslint/camelcase + exec_mode: 'cluster', // eslint-disable-line @typescript-eslint/naming-convention + max_memory_restart: '2G', // eslint-disable-line @typescript-eslint/naming-convention + wait_ready: true, // eslint-disable-line @typescript-eslint/naming-convention env: { ...process.env, NODE_ENV: 'production' diff --git a/frontend/packages/server/package.json b/frontend/packages/server/package.json index 002440e6..28c7fe74 100644 --- a/frontend/packages/server/package.json +++ b/frontend/packages/server/package.json @@ -40,23 +40,23 @@ "@visualdl/core": "2.0.0-beta.43", "@visualdl/i18n": "2.0.0-beta.43", "express": "4.17.1", - "http-proxy-middleware": "1.0.3", - "next": "9.3.6", + "http-proxy-middleware": "1.0.4", + "next": "9.4.4", "pm2": "4.4.0" }, "devDependencies": { "@types/express": "4.17.6", - "@types/node": "13.13.5", - "@types/shelljs": "0.8.7", - "@types/webpack": "4.41.12", - "@types/webpack-dev-middleware": "3.7.0", + "@types/node": "14.0.10", + "@types/shelljs": "0.8.8", + "@types/webpack": "4.41.17", + "@types/webpack-dev-middleware": "3.7.1", "@visualdl/mock": "2.0.0-beta.43", "cross-env": "7.0.2", - "nodemon": "2.0.3", + "nodemon": "2.0.4", "shelljs": "0.8.4", - "ts-loader": "7.0.3", - "ts-node": "8.10.1", - "typescript": "3.8.3", + "ts-loader": "7.0.5", + "ts-node": "8.10.2", + "typescript": "3.9.3", "webpack": "4.43.0", "webpack-cli": "3.3.11", "webpack-dev-middleware": "3.7.2" diff --git a/frontend/packages/serverless/package.json b/frontend/packages/serverless/package.json index 37c35302..22825695 100644 --- a/frontend/packages/serverless/package.json +++ b/frontend/packages/serverless/package.json @@ -31,13 +31,13 @@ "test": "echo \"Error: no test specified\" && exit 0" }, "devDependencies": { - "@types/node": "13.13.5", + "@types/node": "14.0.10", "@types/rimraf": "3.0.0", "@visualdl/core": "2.0.0-beta.43", "cross-env": "7.0.2", "rimraf": "3.0.2", - "ts-node": "8.10.1", - "typescript": "3.8.3" + "ts-node": "8.10.2", + "typescript": "3.9.3" }, "engines": { "node": ">=10", diff --git a/frontend/yarn.lock b/frontend/yarn.lock index da53ec90..01b77c7b 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -2,99 +2,127 @@ # yarn lockfile v1 -"@ampproject/toolbox-core@^2.2.0", "@ampproject/toolbox-core@^2.3.0": - version "2.3.0" - resolved "https://registry.yarnpkg.com/@ampproject/toolbox-core/-/toolbox-core-2.3.0.tgz#f27bd17e01fdc6725c440aefa844f63466c0f37e" - integrity sha512-NT+kVR5Rm2cxp12h40IXgPRWmq0cpUdmcgZmgdelplp/q//4aWkt2+llGHR2foQJkwICxMVVlb/XidsHz0Rh9g== +"@ampproject/toolbox-core@^2.4.0-alpha.1", "@ampproject/toolbox-core@^2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@ampproject/toolbox-core/-/toolbox-core-2.5.0.tgz#8ea4575f1c0d0048e73f64bf7e5cfc8a8e5bd6ef" + integrity sha512-aQjE8wORKXJ2tLWHuevdSL31zhQdUhC+skyEhESBV/8eOzd7ROaOzR/F43bS7uAhnYta1G0Zd/HqofgN7LRSfw== dependencies: cross-fetch "3.0.4" + lru-cache "5.1.1" -"@ampproject/toolbox-optimizer@2.2.0": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@ampproject/toolbox-optimizer/-/toolbox-optimizer-2.2.0.tgz#2438d7102beb1a74bae8d20926e038c5f724a8ae" - integrity sha512-lEujArv6jyl/mEab0uBZ25oMkf+kf8cpTuHPcy8k3+jtomNyVtd94lbSWbQtomsEnYQ0MA9MvLvCJXsJz1fQcg== +"@ampproject/toolbox-optimizer@2.4.0": + version "2.4.0" + resolved "https://registry.yarnpkg.com/@ampproject/toolbox-optimizer/-/toolbox-optimizer-2.4.0.tgz#16bde73913f8b58a9bf617d37cdc1f21a1222f38" + integrity sha512-Bmb+eMF9/VB3H0qPdZy0V5yPSkWe5RwuGbXiMxzqYdJgmMat+NL75EtozQnlpa0uBlESnOGe7bMojm/SA1ImrA== dependencies: - "@ampproject/toolbox-core" "^2.2.0" - "@ampproject/toolbox-runtime-version" "^2.2.0" - "@ampproject/toolbox-script-csp" "^2.2.0" - "@ampproject/toolbox-validator-rules" "^2.2.0" + "@ampproject/toolbox-core" "^2.4.0-alpha.1" + "@ampproject/toolbox-runtime-version" "^2.4.0-alpha.1" + "@ampproject/toolbox-script-csp" "^2.3.0" + "@ampproject/toolbox-validator-rules" "^2.3.0" cssnano "4.1.10" domhandler "3.0.0" - domutils "2.0.0" + domutils "2.1.0" htmlparser2 "4.1.0" + lru-cache "5.1.1" normalize-html-whitespace "1.0.0" postcss-safe-parser "4.0.2" - terser "4.6.8" + terser "4.6.13" -"@ampproject/toolbox-runtime-version@^2.2.0": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@ampproject/toolbox-runtime-version/-/toolbox-runtime-version-2.3.1.tgz#c1d0d937a7474b958cb8e713ea8be00e7bc724f4" - integrity sha512-ocDCSaSUlbgPbuXRVyLU7k+dLGYluKrnfSPKXIwwpGkwNw58JOddvgnQGyB0R/2Ma36nxFA8AAyxjnghLSScpg== +"@ampproject/toolbox-runtime-version@^2.4.0-alpha.1": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@ampproject/toolbox-runtime-version/-/toolbox-runtime-version-2.5.0.tgz#671823d749121ffb1b745912a8c2fc8dce27da80" + integrity sha512-mDeHgbxkBag1L/HsH3WhA7rRqoq3H7iiqZ8g/1Mvre4wP1YuN2iOjM/8EJvBJ4JM+UQsu3Kyljc88Mf8FHkSmg== dependencies: - "@ampproject/toolbox-core" "^2.3.0" + "@ampproject/toolbox-core" "^2.5.0" -"@ampproject/toolbox-script-csp@^2.2.0": +"@ampproject/toolbox-script-csp@^2.3.0": version "2.3.0" resolved "https://registry.yarnpkg.com/@ampproject/toolbox-script-csp/-/toolbox-script-csp-2.3.0.tgz#374cd0bf69bfdd0f1784064d0de69162722c89af" integrity sha512-Qba53ohvCH79sYl5O8K5GMSo/372OjuyxNc+XySG26sAsG26WpBKJEE0HTr8rsa//CD3Fc92FieT1gK5U/jK4Q== -"@ampproject/toolbox-validator-rules@^2.2.0": +"@ampproject/toolbox-validator-rules@^2.3.0": version "2.3.0" resolved "https://registry.yarnpkg.com/@ampproject/toolbox-validator-rules/-/toolbox-validator-rules-2.3.0.tgz#047d8a8106ba777f1df308c19f1c1c41ffea4054" integrity sha512-S10YIyOKettoRDWoyRymRyjzWZD4/qW7YfHNhHAS13QVneabRcU5MF7vEwkG6dHWx/UdufT5GbqYnvpQRMNt3Q== dependencies: cross-fetch "3.0.4" -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.5.5", "@babel/code-frame@^7.8.3": +"@babel/code-frame@7.8.3", "@babel/code-frame@^7.0.0", "@babel/code-frame@^7.5.5", "@babel/code-frame@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.8.3.tgz#33e25903d7481181534e12ec0a25f16b6fcf419e" integrity sha512-a9gxpmdXtZEInkCSHUJDLHZVBgb1QS0jhss4cPP93EW7s+uC5bikET2twEF3KV+7rDblJcmNvTR7VJejqd2C2g== dependencies: "@babel/highlight" "^7.8.3" -"@babel/core@7.7.2": - version "7.7.2" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.7.2.tgz#ea5b99693bcfc058116f42fa1dd54da412b29d91" - integrity sha512-eeD7VEZKfhK1KUXGiyPFettgF3m513f8FoBSWiQ1xTvl1RAopLs42Wp9+Ze911I6H0N9lNqJMDgoZT7gHsipeQ== +"@babel/code-frame@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.1.tgz#d5481c5095daa1c57e16e54c6f9198443afb49ff" + integrity sha512-IGhtTmpjGbYzcEDOw7DcQtbQSXcG9ftmAXtWTu9V936vDye4xjjekktFAtgZsWpzTj/X01jocB46mTywm/4SZw== dependencies: - "@babel/code-frame" "^7.5.5" - "@babel/generator" "^7.7.2" - "@babel/helpers" "^7.7.0" - "@babel/parser" "^7.7.2" - "@babel/template" "^7.7.0" - "@babel/traverse" "^7.7.2" - "@babel/types" "^7.7.2" + "@babel/highlight" "^7.10.1" + +"@babel/compat-data@^7.9.6": + version "7.9.6" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.9.6.tgz#3f604c40e420131affe6f2c8052e9a275ae2049b" + integrity sha512-5QPTrNen2bm7RBc7dsOmcA5hbrS4O2Vhmk5XOL4zWW/zD/hV0iinpefDlkm+tBBy8kDtFaaeEvmAqt+nURAV2g== + dependencies: + browserslist "^4.11.1" + invariant "^2.2.4" + semver "^5.5.0" + +"@babel/core@7.10.2": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.10.2.tgz#bd6786046668a925ac2bd2fd95b579b92a23b36a" + integrity sha512-KQmV9yguEjQsXqyOUGKjS4+3K8/DlOCE2pZcq4augdQmtTy5iv5EHtmMSJ7V4c1BIPjuwtZYqYLCq9Ga+hGBRQ== + dependencies: + "@babel/code-frame" "^7.10.1" + "@babel/generator" "^7.10.2" + "@babel/helper-module-transforms" "^7.10.1" + "@babel/helpers" "^7.10.1" + "@babel/parser" "^7.10.2" + "@babel/template" "^7.10.1" + "@babel/traverse" "^7.10.1" + "@babel/types" "^7.10.2" convert-source-map "^1.7.0" debug "^4.1.0" - json5 "^2.1.0" + gensync "^1.0.0-beta.1" + json5 "^2.1.2" lodash "^4.17.13" resolve "^1.3.2" semver "^5.4.1" source-map "^0.5.0" -"@babel/core@7.9.6": - version "7.9.6" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.9.6.tgz#d9aa1f580abf3b2286ef40b6904d390904c63376" - integrity sha512-nD3deLvbsApbHAHttzIssYqgb883yU/d9roe4RZymBCDaZryMJDbptVpEpeQuRh4BJ+SYI8le9YGxKvFEvl1Wg== +"@babel/core@7.7.7": + version "7.7.7" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.7.7.tgz#ee155d2e12300bcc0cff6a8ad46f2af5063803e9" + integrity sha512-jlSjuj/7z138NLZALxVgrx13AOtqip42ATZP7+kYl53GvDV6+4dCek1mVUo8z8c8Xnw/mx2q3d9HWh3griuesQ== dependencies: - "@babel/code-frame" "^7.8.3" - "@babel/generator" "^7.9.6" - "@babel/helper-module-transforms" "^7.9.0" - "@babel/helpers" "^7.9.6" - "@babel/parser" "^7.9.6" - "@babel/template" "^7.8.6" - "@babel/traverse" "^7.9.6" - "@babel/types" "^7.9.6" + "@babel/code-frame" "^7.5.5" + "@babel/generator" "^7.7.7" + "@babel/helpers" "^7.7.4" + "@babel/parser" "^7.7.7" + "@babel/template" "^7.7.4" + "@babel/traverse" "^7.7.4" + "@babel/types" "^7.7.4" convert-source-map "^1.7.0" debug "^4.1.0" - gensync "^1.0.0-beta.1" - json5 "^2.1.2" + json5 "^2.1.0" lodash "^4.17.13" resolve "^1.3.2" semver "^5.4.1" source-map "^0.5.0" -"@babel/generator@^7.7.2", "@babel/generator@^7.9.6": +"@babel/generator@^7.10.1", "@babel/generator@^7.10.2": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.10.2.tgz#0fa5b5b2389db8bfdfcc3492b551ee20f5dd69a9" + integrity sha512-AxfBNHNu99DTMvlUPlt1h2+Hn7knPpH5ayJ8OqDWSeLld+Fi2AYBTC/IejWDM9Edcii4UzZRCsbUt0WlSDsDsA== + dependencies: + "@babel/types" "^7.10.2" + jsesc "^2.5.1" + lodash "^4.17.13" + source-map "^0.5.0" + +"@babel/generator@^7.7.7", "@babel/generator@^7.9.6": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.9.6.tgz#5408c82ac5de98cda0d77d8124e99fa1f2170a43" integrity sha512-+htwWKJbH2bL72HRluF8zumBxzuX0ZZUFl3JLNyoUjM/Ho8wnVpPXM6aUz8cfKDqQ/h7zHqKt4xzJteUosckqQ== @@ -136,7 +164,18 @@ "@babel/helper-annotate-as-pure" "^7.8.3" "@babel/types" "^7.9.0" -"@babel/helper-create-class-features-plugin@^7.7.0", "@babel/helper-create-class-features-plugin@^7.9.6": +"@babel/helper-compilation-targets@^7.9.6": + version "7.9.6" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.9.6.tgz#1e05b7ccc9d38d2f8b40b458b380a04dcfadd38a" + integrity sha512-x2Nvu0igO0ejXzx09B/1fGBxY9NXQlBW2kZsSxCJft+KHN8t9XWzIvFxtPHnBOAXpVsdxZKZFbRUC8TsNKajMw== + dependencies: + "@babel/compat-data" "^7.9.6" + browserslist "^4.11.1" + invariant "^2.2.4" + levenary "^1.1.1" + semver "^5.5.0" + +"@babel/helper-create-class-features-plugin@^7.8.3", "@babel/helper-create-class-features-plugin@^7.9.6": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.9.6.tgz#965c8b0a9f051801fd9d3b372ca0ccf200a90897" integrity sha512-6N9IeuyHvMBRyjNYOMJHrhwtu4WJMrYf8hVbEHD3pbbbmNOk1kmXSQs7bA4dYDUaIx4ZEzdnvo6NwC3WHd/Qow== @@ -174,6 +213,15 @@ "@babel/traverse" "^7.8.3" "@babel/types" "^7.8.3" +"@babel/helper-function-name@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.10.1.tgz#92bd63829bfc9215aca9d9defa85f56b539454f4" + integrity sha512-fcpumwhs3YyZ/ttd5Rz0xn0TpIwVkN7X0V38B9TWNfVF42KEkhkAAuPCQ3oXmtTRtiPJrmZ0TrfS0GKF0eMaRQ== + dependencies: + "@babel/helper-get-function-arity" "^7.10.1" + "@babel/template" "^7.10.1" + "@babel/types" "^7.10.1" + "@babel/helper-function-name@^7.8.3", "@babel/helper-function-name@^7.9.5": version "7.9.5" resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.9.5.tgz#2b53820d35275120e1874a82e5aabe1376920a5c" @@ -183,6 +231,13 @@ "@babel/template" "^7.8.3" "@babel/types" "^7.9.5" +"@babel/helper-get-function-arity@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.10.1.tgz#7303390a81ba7cb59613895a192b93850e373f7d" + integrity sha512-F5qdXkYGOQUb0hpRaPoetF9AnsXknKjWMZ+wmsIRsp5ge5sFh4c3h1eH2pRTTuy9KKAA2+TTYomGXAtEL2fQEw== + dependencies: + "@babel/types" "^7.10.1" + "@babel/helper-get-function-arity@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.8.3.tgz#b894b947bd004381ce63ea1db9f08547e920abd5" @@ -197,6 +252,13 @@ dependencies: "@babel/types" "^7.8.3" +"@babel/helper-member-expression-to-functions@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.10.1.tgz#432967fd7e12a4afef66c4687d4ca22bc0456f15" + integrity sha512-u7XLXeM2n50gb6PWJ9hoO5oO7JFPaZtrh35t8RqKLT1jFKj9IWeD1zrcrYp1q1qiZTdEarfDWfTIP8nGsu0h5g== + dependencies: + "@babel/types" "^7.10.1" + "@babel/helper-member-expression-to-functions@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.8.3.tgz#659b710498ea6c1d9907e0c73f206eee7dadc24c" @@ -204,14 +266,34 @@ dependencies: "@babel/types" "^7.8.3" -"@babel/helper-module-imports@^7.0.0", "@babel/helper-module-imports@^7.7.0", "@babel/helper-module-imports@^7.8.3": +"@babel/helper-module-imports@^7.0.0", "@babel/helper-module-imports@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.8.3.tgz#7fe39589b39c016331b6b8c3f441e8f0b1419498" integrity sha512-R0Bx3jippsbAEtzkpZ/6FIiuzOURPcMjHp+Z6xPe6DtApDJx+w7UYyOLanZqO8+wKR9G10s/FmHXvxaMd9s6Kg== dependencies: "@babel/types" "^7.8.3" -"@babel/helper-module-transforms@^7.7.0", "@babel/helper-module-transforms@^7.9.0": +"@babel/helper-module-imports@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.10.1.tgz#dd331bd45bccc566ce77004e9d05fe17add13876" + integrity sha512-SFxgwYmZ3HZPyZwJRiVNLRHWuW2OgE5k2nrVs6D9Iv4PPnXVffuEHy83Sfx/l4SqF+5kyJXjAyUmrG7tNm+qVg== + dependencies: + "@babel/types" "^7.10.1" + +"@babel/helper-module-transforms@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.10.1.tgz#24e2f08ee6832c60b157bb0936c86bef7210c622" + integrity sha512-RLHRCAzyJe7Q7sF4oy2cB+kRnU4wDZY/H2xJFGof+M+SJEGhZsb+GFj5j1AD8NiSaVBJ+Pf0/WObiXu/zxWpFg== + dependencies: + "@babel/helper-module-imports" "^7.10.1" + "@babel/helper-replace-supers" "^7.10.1" + "@babel/helper-simple-access" "^7.10.1" + "@babel/helper-split-export-declaration" "^7.10.1" + "@babel/template" "^7.10.1" + "@babel/types" "^7.10.1" + lodash "^4.17.13" + +"@babel/helper-module-transforms@^7.9.0": version "7.9.0" resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.9.0.tgz#43b34dfe15961918707d247327431388e9fe96e5" integrity sha512-0FvKyu0gpPfIQ8EkxlrAydOWROdHpBmiCiRwLkUiBGhCUPRRbVD2/tm3sFr/c/GWFrQ/ffutGUAnx7V0FzT2wA== @@ -224,6 +306,13 @@ "@babel/types" "^7.9.0" lodash "^4.17.13" +"@babel/helper-optimise-call-expression@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.10.1.tgz#b4a1f2561870ce1247ceddb02a3860fa96d72543" + integrity sha512-a0DjNS1prnBsoKx83dP2falChcs7p3i8VMzdrSbfLhuQra/2ENC4sbri34dz/rWmDADsmF1q5GbfaXydh0Jbjg== + dependencies: + "@babel/types" "^7.10.1" + "@babel/helper-optimise-call-expression@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.8.3.tgz#7ed071813d09c75298ef4f208956006b6111ecb9" @@ -254,6 +343,16 @@ "@babel/traverse" "^7.8.3" "@babel/types" "^7.8.3" +"@babel/helper-replace-supers@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.10.1.tgz#ec6859d20c5d8087f6a2dc4e014db7228975f13d" + integrity sha512-SOwJzEfpuQwInzzQJGjGaiG578UYmyi2Xw668klPWV5n07B73S0a9btjLk/52Mlcxa+5AdIYqws1KyXRfMoB7A== + dependencies: + "@babel/helper-member-expression-to-functions" "^7.10.1" + "@babel/helper-optimise-call-expression" "^7.10.1" + "@babel/traverse" "^7.10.1" + "@babel/types" "^7.10.1" + "@babel/helper-replace-supers@^7.8.3", "@babel/helper-replace-supers@^7.8.6", "@babel/helper-replace-supers@^7.9.6": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.9.6.tgz#03149d7e6a5586ab6764996cd31d6981a17e1444" @@ -264,7 +363,15 @@ "@babel/traverse" "^7.9.6" "@babel/types" "^7.9.6" -"@babel/helper-simple-access@^7.7.0", "@babel/helper-simple-access@^7.8.3": +"@babel/helper-simple-access@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.10.1.tgz#08fb7e22ace9eb8326f7e3920a1c2052f13d851e" + integrity sha512-VSWpWzRzn9VtgMJBIWTZ+GP107kZdQ4YplJlCmIrjoLVSi/0upixezHCDG8kpPVTBJpKfxTH01wDhh+jS2zKbw== + dependencies: + "@babel/template" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helper-simple-access@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.8.3.tgz#7f8109928b4dab4654076986af575231deb639ae" integrity sha512-VNGUDjx5cCWg4vvCTR8qQ7YJYZ+HBjxOgXEl7ounz+4Sn7+LMD3CFrCTEU6/qXKbA2nKg21CwhhBzO0RpRbdCw== @@ -272,6 +379,13 @@ "@babel/template" "^7.8.3" "@babel/types" "^7.8.3" +"@babel/helper-split-export-declaration@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.10.1.tgz#c6f4be1cbc15e3a868e4c64a17d5d31d754da35f" + integrity sha512-UQ1LVBPrYdbchNhLwj6fetj46BcFwfS4NllJo/1aJsT+1dLTEnXJL0qHqtY7gPzF8S2fXBJamf1biAXV3X077g== + dependencies: + "@babel/types" "^7.10.1" + "@babel/helper-split-export-declaration@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.8.3.tgz#31a9f30070f91368a7182cf05f831781065fc7a9" @@ -279,6 +393,11 @@ dependencies: "@babel/types" "^7.8.3" +"@babel/helper-validator-identifier@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.1.tgz#5770b0c1a826c4f53f5ede5e153163e0318e94b5" + integrity sha512-5vW/JXLALhczRCWP0PnFDMCJAchlBvM7f4uk/jXritBnIa6E1KmqmtrS3yn1LAnxFBypQ3eneLuXjsnfQsgILw== + "@babel/helper-validator-identifier@^7.9.0", "@babel/helper-validator-identifier@^7.9.5": version "7.9.5" resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.9.5.tgz#90977a8e6fbf6b431a7dc31752eee233bf052d80" @@ -294,7 +413,16 @@ "@babel/traverse" "^7.8.3" "@babel/types" "^7.8.3" -"@babel/helpers@^7.7.0", "@babel/helpers@^7.9.6": +"@babel/helpers@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.10.1.tgz#a6827b7cb975c9d9cef5fd61d919f60d8844a973" + integrity sha512-muQNHF+IdU6wGgkaJyhhEmI54MOZBKsFfsXFhboz1ybwJ1Kl7IHlbm2a++4jwrmY5UYsgitt5lfqo1wMFcHmyw== + dependencies: + "@babel/template" "^7.10.1" + "@babel/traverse" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/helpers@^7.7.4": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.9.6.tgz#092c774743471d0bb6c7de3ad465ab3d3486d580" integrity sha512-tI4bUbldloLcHWoRUMAj4g1bF313M/o6fBKhIsb3QnGVPwRm9JsNf/gqMkQ7zjqReABiffPV6RWj7hEglID5Iw== @@ -303,6 +431,15 @@ "@babel/traverse" "^7.9.6" "@babel/types" "^7.9.6" +"@babel/highlight@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.10.1.tgz#841d098ba613ba1a427a2b383d79e35552c38ae0" + integrity sha512-8rMof+gVP8mxYZApLF/JgNDAkdKa+aJt3ZYxF8z6+j/hpeXL7iMsKCPHa2jNMHu/qqBwzQF4OHNoYi8dMA/rYg== + dependencies: + "@babel/helper-validator-identifier" "^7.10.1" + chalk "^2.0.0" + js-tokens "^4.0.0" + "@babel/highlight@^7.8.3": version "7.9.0" resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.9.0.tgz#4e9b45ccb82b79607271b2979ad82c7b68163079" @@ -312,12 +449,17 @@ chalk "^2.0.0" js-tokens "^4.0.0" -"@babel/parser@^7.7.2", "@babel/parser@^7.8.6", "@babel/parser@^7.9.6": +"@babel/parser@^7.10.1", "@babel/parser@^7.10.2": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.10.2.tgz#871807f10442b92ff97e4783b9b54f6a0ca812d0" + integrity sha512-PApSXlNMJyB4JiGVhCOlzKIif+TKFTvu0aQAhnTvfP/z3vVSN6ZypH5bfUNwFXXjRQtUEBNFd2PtmCmG2Py3qQ== + +"@babel/parser@^7.7.7", "@babel/parser@^7.8.6", "@babel/parser@^7.9.6": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.9.6.tgz#3b1bbb30dabe600cd72db58720998376ff653bc7" integrity sha512-AoeIEJn8vt+d/6+PXDRPaksYhnlbMIiejioBZvvMQsOjW/JYK6k/0dKnvvP3EhK5GfMBWDPtrxRtegWdAcdq9Q== -"@babel/plugin-proposal-async-generator-functions@^7.7.0": +"@babel/plugin-proposal-async-generator-functions@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.8.3.tgz#bad329c670b382589721b27540c7d288601c6e6f" integrity sha512-NZ9zLv848JsV3hs8ryEh7Uaz/0KsmPLqv0+PdkDJL1cJy0K4kOCFa8zc1E3mp+RHPQcpdfb/6GovEsW4VDrOMw== @@ -326,15 +468,15 @@ "@babel/helper-remap-async-to-generator" "^7.8.3" "@babel/plugin-syntax-async-generators" "^7.8.0" -"@babel/plugin-proposal-class-properties@7.7.0": - version "7.7.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.7.0.tgz#ac54e728ecf81d90e8f4d2a9c05a890457107917" - integrity sha512-tufDcFA1Vj+eWvwHN+jvMN6QsV5o+vUlytNKrbMiCeDL0F2j92RURzUsUMWE5EJkLyWxjdUslCsMQa9FWth16A== +"@babel/plugin-proposal-class-properties@7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.8.3.tgz#5e06654af5cd04b608915aada9b2a6788004464e" + integrity sha512-EqFhbo7IosdgPgZggHaNObkmO1kNUe3slaKu54d5OWvy+p9QIKOzK1GAEpAIsZtWVtPXUHSMcT4smvDrCfY4AA== dependencies: - "@babel/helper-create-class-features-plugin" "^7.7.0" - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-create-class-features-plugin" "^7.8.3" + "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-proposal-dynamic-import@^7.7.0": +"@babel/plugin-proposal-dynamic-import@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.8.3.tgz#38c4fe555744826e97e2ae930b0fb4cc07e66054" integrity sha512-NyaBbyLFXFLT9FP+zk0kYlUlA8XtCUbehs67F0nnEg7KICgMc2mNkIeu9TYhKzyXMkrapZFwAhXLdnt4IYHy1w== @@ -342,7 +484,7 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/plugin-syntax-dynamic-import" "^7.8.0" -"@babel/plugin-proposal-json-strings@^7.2.0": +"@babel/plugin-proposal-json-strings@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.8.3.tgz#da5216b238a98b58a1e05d6852104b10f9a70d6b" integrity sha512-KGhQNZ3TVCQG/MjRbAUwuH+14y9q0tpxs1nWWs3pbSleRdDro9SAMMDyye8HhY1gqZ7/NqIc8SKhya0wRDgP1Q== @@ -350,15 +492,15 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/plugin-syntax-json-strings" "^7.8.0" -"@babel/plugin-proposal-nullish-coalescing-operator@7.7.4": - version "7.7.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.7.4.tgz#7db302c83bc30caa89e38fee935635ef6bd11c28" - integrity sha512-TbYHmr1Gl1UC7Vo2HVuj/Naci5BEGNZ0AJhzqD2Vpr6QPFWpUmBRLrIDjedzx7/CShq0bRDS2gI4FIs77VHLVQ== +"@babel/plugin-proposal-nullish-coalescing-operator@7.8.3", "@babel/plugin-proposal-nullish-coalescing-operator@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.8.3.tgz#e4572253fdeed65cddeecfdab3f928afeb2fd5d2" + integrity sha512-TS9MlfzXpXKt6YYomudb/KU7nQI6/xnapG6in1uZxoxDghuSMZsPb6D2fyUwNYSAp4l1iR7QtFOjkqcRYcUsfw== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-nullish-coalescing-operator" "^7.7.4" + "@babel/helper-plugin-utils" "^7.8.3" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.0" -"@babel/plugin-proposal-numeric-separator@7.8.3": +"@babel/plugin-proposal-numeric-separator@7.8.3", "@babel/plugin-proposal-numeric-separator@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.8.3.tgz#5d6769409699ec9b3b68684cd8116cedff93bad8" integrity sha512-jWioO1s6R/R+wEHizfaScNsAx+xKgwTLNXSh7tTC4Usj3ItsPEhYkEpU4h+lpnBwq7NBVOJXfO6cRFYcX69JUQ== @@ -366,15 +508,7 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/plugin-syntax-numeric-separator" "^7.8.3" -"@babel/plugin-proposal-object-rest-spread@7.6.2": - version "7.6.2" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.6.2.tgz#8ffccc8f3a6545e9f78988b6bf4fe881b88e8096" - integrity sha512-LDBXlmADCsMZV1Y9OQwMc0MyGZ8Ta/zlD9N67BfQT8uYwkRswiu2hU6nJKrjrt/58aH/vqfQlR/9yId/7A2gWw== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-object-rest-spread" "^7.2.0" - -"@babel/plugin-proposal-object-rest-spread@^7.6.2": +"@babel/plugin-proposal-object-rest-spread@7.9.6", "@babel/plugin-proposal-object-rest-spread@^7.9.6": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.9.6.tgz#7a093586fcb18b08266eb1a7177da671ac575b63" integrity sha512-Ga6/fhGqA9Hj+y6whNpPv8psyaK5xzrQwSPsGPloVkvmH+PqW1ixdnfJ9uIO06OjQNYol3PMnfmJ8vfZtkzF+A== @@ -383,7 +517,7 @@ "@babel/plugin-syntax-object-rest-spread" "^7.8.0" "@babel/plugin-transform-parameters" "^7.9.5" -"@babel/plugin-proposal-optional-catch-binding@^7.2.0": +"@babel/plugin-proposal-optional-catch-binding@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.8.3.tgz#9dee96ab1650eed88646ae9734ca167ac4a9c5c9" integrity sha512-0gkX7J7E+AtAw9fcwlVQj8peP61qhdg/89D5swOkjYbkboA2CVckn3kiyum1DE0wskGb7KJJxBdyEBApDLLVdw== @@ -391,15 +525,15 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/plugin-syntax-optional-catch-binding" "^7.8.0" -"@babel/plugin-proposal-optional-chaining@7.7.4": - version "7.7.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.7.4.tgz#3f04c2de1a942cbd3008324df8144b9cbc0ca0ba" - integrity sha512-JmgaS+ygAWDR/STPe3/7y0lNlHgS+19qZ9aC06nYLwQ/XB7c0q5Xs+ksFU3EDnp9EiEsO0dnRAOKeyLHTZuW3A== +"@babel/plugin-proposal-optional-chaining@7.9.0", "@babel/plugin-proposal-optional-chaining@^7.9.0": + version "7.9.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.9.0.tgz#31db16b154c39d6b8a645292472b98394c292a58" + integrity sha512-NDn5tu3tcv4W30jNhmc2hyD5c56G6cXx4TesJubhxrJeCvuuMpttxr0OnNCqbZGhFjLrg+NIhxxC+BK5F6yS3w== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-optional-chaining" "^7.7.4" + "@babel/helper-plugin-utils" "^7.8.3" + "@babel/plugin-syntax-optional-chaining" "^7.8.0" -"@babel/plugin-proposal-unicode-property-regex@^7.4.4", "@babel/plugin-proposal-unicode-property-regex@^7.7.0": +"@babel/plugin-proposal-unicode-property-regex@^7.4.4", "@babel/plugin-proposal-unicode-property-regex@^7.8.3": version "7.8.8" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.8.8.tgz#ee3a95e90cdc04fe8cd92ec3279fa017d68a0d1d" integrity sha512-EVhjVsMpbhLw9ZfHWSx2iy13Q8Z/eg8e8ccVWt23sWQK5l1UdkoLJPN5w69UA4uITGBnEZD2JOe4QOHycYKv8A== @@ -407,7 +541,7 @@ "@babel/helper-create-regexp-features-plugin" "^7.8.8" "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-syntax-async-generators@^7.2.0", "@babel/plugin-syntax-async-generators@^7.8.0": +"@babel/plugin-syntax-async-generators@^7.8.0": version "7.8.4" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d" integrity sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw== @@ -421,21 +555,14 @@ dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-dynamic-import@7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.2.0.tgz#69c159ffaf4998122161ad8ebc5e6d1f55df8612" - integrity sha512-mVxuJ0YroI/h/tbFTPGZR8cv6ai+STMKNBq0f8hFxsxWjl94qqhsb+wXbpNMDPU3cfR1TIsVFzU3nXyZMqyK4w== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-syntax-dynamic-import@^7.2.0", "@babel/plugin-syntax-dynamic-import@^7.8.0": +"@babel/plugin-syntax-dynamic-import@7.8.3", "@babel/plugin-syntax-dynamic-import@^7.8.0": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz#62bf98b2da3cd21d626154fc96ee5b3cb68eacb3" integrity sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ== dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-json-strings@^7.2.0", "@babel/plugin-syntax-json-strings@^7.8.0": +"@babel/plugin-syntax-json-strings@^7.8.0": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz#01ca21b668cd8218c9e640cb6dd88c5412b2c96a" integrity sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA== @@ -449,42 +576,42 @@ dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-syntax-nullish-coalescing-operator@^7.7.4": +"@babel/plugin-syntax-nullish-coalescing-operator@^7.8.0": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz#167ed70368886081f74b5c36c65a88c03b66d1a9" integrity sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ== dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-numeric-separator@^7.8.3": +"@babel/plugin-syntax-numeric-separator@^7.8.0", "@babel/plugin-syntax-numeric-separator@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.8.3.tgz#0e3fb63e09bea1b11e96467271c8308007e7c41f" integrity sha512-H7dCMAdN83PcCmqmkHB5dtp+Xa9a6LKSvA2hiFBC/5alSHxM5VgWZXFqDi0YFe8XNGT6iCa+z4V4zSt/PdZ7Dw== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-syntax-object-rest-spread@^7.2.0", "@babel/plugin-syntax-object-rest-spread@^7.8.0": +"@babel/plugin-syntax-object-rest-spread@^7.8.0": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz#60e225edcbd98a640332a2e72dd3e66f1af55871" integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA== dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-optional-catch-binding@^7.2.0", "@babel/plugin-syntax-optional-catch-binding@^7.8.0": +"@babel/plugin-syntax-optional-catch-binding@^7.8.0": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz#6111a265bcfb020eb9efd0fdfd7d26402b9ed6c1" integrity sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q== dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-optional-chaining@^7.7.4": +"@babel/plugin-syntax-optional-chaining@^7.8.0": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz#4f69c2ab95167e0180cd5336613f8c5788f7d48a" integrity sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg== dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-top-level-await@^7.7.0": +"@babel/plugin-syntax-top-level-await@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.8.3.tgz#3acdece695e6b13aaf57fc291d1a800950c71391" integrity sha512-kwj1j9lL/6Wd0hROD3b/OZZ7MSrZLqqn9RAZ5+cYYsflQ9HZBIKCUkr3+uL1MEJ1NePiUbf98jjiMQSv0NMR9g== @@ -498,14 +625,14 @@ dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-arrow-functions@^7.2.0": +"@babel/plugin-transform-arrow-functions@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.8.3.tgz#82776c2ed0cd9e1a49956daeb896024c9473b8b6" integrity sha512-0MRF+KC8EqH4dbuITCWwPSzsyO3HIWWlm30v8BbbpOrS1B++isGxPnnuq/IZvOX5J2D/p7DQalQm+/2PnlKGxg== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-async-to-generator@^7.7.0": +"@babel/plugin-transform-async-to-generator@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.8.3.tgz#4308fad0d9409d71eafb9b1a6ee35f9d64b64086" integrity sha512-imt9tFLD9ogt56Dd5CI/6XgpukMwd/fLGSrix2httihVe7LOGVPhyhMh1BU5kDM7iHD08i8uUtmV2sWaBFlHVQ== @@ -514,14 +641,14 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/helper-remap-async-to-generator" "^7.8.3" -"@babel/plugin-transform-block-scoped-functions@^7.2.0": +"@babel/plugin-transform-block-scoped-functions@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.8.3.tgz#437eec5b799b5852072084b3ae5ef66e8349e8a3" integrity sha512-vo4F2OewqjbB1+yaJ7k2EJFHlTP3jR634Z9Cj9itpqNjuLXvhlVxgnjsHsdRgASR8xYDrx6onw4vW5H6We0Jmg== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-block-scoping@^7.6.3": +"@babel/plugin-transform-block-scoping@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.8.3.tgz#97d35dab66857a437c166358b91d09050c868f3a" integrity sha512-pGnYfm7RNRgYRi7bids5bHluENHqJhrV4bCZRwc5GamaWIIs07N4rZECcmJL6ZClwjDz1GbdMZFtPs27hTB06w== @@ -529,7 +656,7 @@ "@babel/helper-plugin-utils" "^7.8.3" lodash "^4.17.13" -"@babel/plugin-transform-classes@^7.7.0": +"@babel/plugin-transform-classes@^7.9.5": version "7.9.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.9.5.tgz#800597ddb8aefc2c293ed27459c1fcc935a26c2c" integrity sha512-x2kZoIuLC//O5iA7PEvecB105o7TLzZo8ofBVhP79N+DO3jaX+KYfww9TQcfBEZD0nikNyYcGB1IKtRq36rdmg== @@ -543,21 +670,21 @@ "@babel/helper-split-export-declaration" "^7.8.3" globals "^11.1.0" -"@babel/plugin-transform-computed-properties@^7.2.0": +"@babel/plugin-transform-computed-properties@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.8.3.tgz#96d0d28b7f7ce4eb5b120bb2e0e943343c86f81b" integrity sha512-O5hiIpSyOGdrQZRQ2ccwtTVkgUDBBiCuK//4RJ6UfePllUTCENOzKxfh6ulckXKc0DixTFLCfb2HVkNA7aDpzA== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-destructuring@^7.6.0": +"@babel/plugin-transform-destructuring@^7.9.5": version "7.9.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.9.5.tgz#72c97cf5f38604aea3abf3b935b0e17b1db76a50" integrity sha512-j3OEsGel8nHL/iusv/mRd5fYZ3DrOxWC82x0ogmdN/vHfAP4MYw+AFKYanzWlktNwikKvlzUV//afBW5FTp17Q== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-dotall-regex@^7.4.4", "@babel/plugin-transform-dotall-regex@^7.7.0": +"@babel/plugin-transform-dotall-regex@^7.4.4", "@babel/plugin-transform-dotall-regex@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.8.3.tgz#c3c6ec5ee6125c6993c5cbca20dc8621a9ea7a6e" integrity sha512-kLs1j9Nn4MQoBYdRXH6AeaXMbEJFaFu/v1nQkvib6QzTj8MZI5OQzqmD83/2jEM1z0DLilra5aWO5YpyC0ALIw== @@ -565,14 +692,14 @@ "@babel/helper-create-regexp-features-plugin" "^7.8.3" "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-duplicate-keys@^7.5.0": +"@babel/plugin-transform-duplicate-keys@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.8.3.tgz#8d12df309aa537f272899c565ea1768e286e21f1" integrity sha512-s8dHiBUbcbSgipS4SMFuWGqCvyge5V2ZeAWzR6INTVC3Ltjig/Vw1G2Gztv0vU/hRG9X8IvKvYdoksnUfgXOEQ== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-exponentiation-operator@^7.2.0": +"@babel/plugin-transform-exponentiation-operator@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.8.3.tgz#581a6d7f56970e06bf51560cd64f5e947b70d7b7" integrity sha512-zwIpuIymb3ACcInbksHaNcR12S++0MDLKkiqXHl3AzpgdKlFNhog+z/K0+TGW+b0w5pgTq4H6IwV/WhxbGYSjQ== @@ -580,14 +707,14 @@ "@babel/helper-builder-binary-assignment-operator-visitor" "^7.8.3" "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-for-of@^7.4.4": +"@babel/plugin-transform-for-of@^7.9.0": version "7.9.0" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.9.0.tgz#0f260e27d3e29cd1bb3128da5e76c761aa6c108e" integrity sha512-lTAnWOpMwOXpyDx06N+ywmF3jNbafZEqZ96CGYabxHrxNX8l5ny7dt4bK/rGwAh9utyP2b2Hv7PlZh1AAS54FQ== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-function-name@^7.7.0": +"@babel/plugin-transform-function-name@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.8.3.tgz#279373cb27322aaad67c2683e776dfc47196ed8b" integrity sha512-rO/OnDS78Eifbjn5Py9v8y0aR+aSYhDhqAwVfsTl0ERuMZyr05L1aFSCJnbv2mmsLkit/4ReeQ9N2BgLnOcPCQ== @@ -595,21 +722,21 @@ "@babel/helper-function-name" "^7.8.3" "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-literals@^7.2.0": +"@babel/plugin-transform-literals@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.8.3.tgz#aef239823d91994ec7b68e55193525d76dbd5dc1" integrity sha512-3Tqf8JJ/qB7TeldGl+TT55+uQei9JfYaregDcEAyBZ7akutriFrt6C/wLYIer6OYhleVQvH/ntEhjE/xMmy10A== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-member-expression-literals@^7.2.0": +"@babel/plugin-transform-member-expression-literals@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.8.3.tgz#963fed4b620ac7cbf6029c755424029fa3a40410" integrity sha512-3Wk2EXhnw+rP+IDkK6BdtPKsUE5IeZ6QOGrPYvw52NwBStw9V1ZVzxgK6fSKSxqUvH9eQPR3tm3cOq79HlsKYA== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-modules-amd@^7.5.0": +"@babel/plugin-transform-modules-amd@^7.9.6": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.9.6.tgz#8539ec42c153d12ea3836e0e3ac30d5aae7b258e" integrity sha512-zoT0kgC3EixAyIAU+9vfaUVKTv9IxBDSabgHoUCBP6FqEJ+iNiN7ip7NBKcYqbfUDfuC2mFCbM7vbu4qJgOnDw== @@ -618,17 +745,7 @@ "@babel/helper-plugin-utils" "^7.8.3" babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-commonjs@7.7.0": - version "7.7.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.7.0.tgz#3e5ffb4fd8c947feede69cbe24c9554ab4113fe3" - integrity sha512-KEMyWNNWnjOom8vR/1+d+Ocz/mILZG/eyHHO06OuBQ2aNhxT62fr4y6fGOplRx+CxCSp3IFwesL8WdINfY/3kg== - dependencies: - "@babel/helper-module-transforms" "^7.7.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-simple-access" "^7.7.0" - babel-plugin-dynamic-import-node "^2.3.0" - -"@babel/plugin-transform-modules-commonjs@^7.7.0": +"@babel/plugin-transform-modules-commonjs@7.9.6", "@babel/plugin-transform-modules-commonjs@^7.9.6": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.9.6.tgz#64b7474a4279ee588cacd1906695ca721687c277" integrity sha512-7H25fSlLcn+iYimmsNe3uK1at79IE6SKW9q0/QeEHTMC9MdOZ+4bA+T1VFB5fgOqBWoqlifXRzYD0JPdmIrgSQ== @@ -638,7 +755,7 @@ "@babel/helper-simple-access" "^7.8.3" babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-systemjs@^7.7.0": +"@babel/plugin-transform-modules-systemjs@^7.9.6": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.9.6.tgz#207f1461c78a231d5337a92140e52422510d81a4" integrity sha512-NW5XQuW3N2tTHim8e1b7qGy7s0kZ2OH3m5octc49K1SdAKGxYxeIx7hiIz05kS1R2R+hOWcsr1eYwcGhrdHsrg== @@ -648,7 +765,7 @@ "@babel/helper-plugin-utils" "^7.8.3" babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-umd@^7.7.0": +"@babel/plugin-transform-modules-umd@^7.9.0": version "7.9.0" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.9.0.tgz#e909acae276fec280f9b821a5f38e1f08b480697" integrity sha512-uTWkXkIVtg/JGRSIABdBoMsoIeoHQHPTL0Y2E7xf5Oj7sLqwVsNXOkNk0VJc7vF0IMBsPeikHxFjGe+qmwPtTQ== @@ -656,21 +773,21 @@ "@babel/helper-module-transforms" "^7.9.0" "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-named-capturing-groups-regex@^7.7.0": +"@babel/plugin-transform-named-capturing-groups-regex@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.8.3.tgz#a2a72bffa202ac0e2d0506afd0939c5ecbc48c6c" integrity sha512-f+tF/8UVPU86TrCb06JoPWIdDpTNSGGcAtaD9mLP0aYGA0OS0j7j7DHJR0GTFrUZPUU6loZhbsVZgTh0N+Qdnw== dependencies: "@babel/helper-create-regexp-features-plugin" "^7.8.3" -"@babel/plugin-transform-new-target@^7.4.4": +"@babel/plugin-transform-new-target@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.8.3.tgz#60cc2ae66d85c95ab540eb34babb6434d4c70c43" integrity sha512-QuSGysibQpyxexRyui2vca+Cmbljo8bcRckgzYV4kRIsHpVeyeC3JDO63pY+xFZ6bWOBn7pfKZTqV4o/ix9sFw== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-object-super@^7.5.5": +"@babel/plugin-transform-object-super@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.8.3.tgz#ebb6a1e7a86ffa96858bd6ac0102d65944261725" integrity sha512-57FXk+gItG/GejofIyLIgBKTas4+pEU47IXKDBWFTxdPd7F80H8zybyAY7UoblVfBhBGs2EKM+bJUu2+iUYPDQ== @@ -678,7 +795,7 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/helper-replace-supers" "^7.8.3" -"@babel/plugin-transform-parameters@^7.4.4", "@babel/plugin-transform-parameters@^7.9.5": +"@babel/plugin-transform-parameters@^7.9.5": version "7.9.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.9.5.tgz#173b265746f5e15b2afe527eeda65b73623a0795" integrity sha512-0+1FhHnMfj6lIIhVvS4KGQJeuhe1GI//h5uptK4PvLt+BGBxsoUJbd3/IW002yk//6sZPlFgsG1hY6OHLcy6kA== @@ -686,21 +803,30 @@ "@babel/helper-get-function-arity" "^7.8.3" "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-property-literals@^7.2.0": +"@babel/plugin-transform-property-literals@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.8.3.tgz#33194300d8539c1ed28c62ad5087ba3807b98263" integrity sha512-uGiiXAZMqEoQhRWMK17VospMZh5sXWg+dlh2soffpkAl96KAm+WZuJfa6lcELotSRmooLqg0MWdH6UUq85nmmg== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-react-display-name@^7.0.0": +"@babel/plugin-transform-react-display-name@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.8.3.tgz#70ded987c91609f78353dd76d2fb2a0bb991e8e5" integrity sha512-3Jy/PCw8Fe6uBKtEgz3M82ljt+lTg+xJaM4og+eyu83qLT87ZUSckn0wy7r31jflURWLO83TW6Ylf7lyXj3m5A== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-react-jsx-self@^7.0.0": +"@babel/plugin-transform-react-jsx-development@^7.9.0": + version "7.9.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.9.0.tgz#3c2a130727caf00c2a293f0aed24520825dbf754" + integrity sha512-tK8hWKrQncVvrhvtOiPpKrQjfNX3DtkNLSX4ObuGcpS9p0QrGetKmlySIGR07y48Zft8WVgPakqd/bk46JrMSw== + dependencies: + "@babel/helper-builder-react-jsx-experimental" "^7.9.0" + "@babel/helper-plugin-utils" "^7.8.3" + "@babel/plugin-syntax-jsx" "^7.8.3" + +"@babel/plugin-transform-react-jsx-self@^7.9.0": version "7.9.0" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.9.0.tgz#f4f26a325820205239bb915bad8e06fcadabb49b" integrity sha512-K2ObbWPKT7KUTAoyjCsFilOkEgMvFG+y0FqOl6Lezd0/13kMkkjHskVsZvblRPj1PHA44PrToaZANrryppzTvQ== @@ -708,7 +834,7 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/plugin-syntax-jsx" "^7.8.3" -"@babel/plugin-transform-react-jsx-source@^7.0.0": +"@babel/plugin-transform-react-jsx-source@^7.9.0": version "7.9.0" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.9.0.tgz#89ef93025240dd5d17d3122294a093e5e0183de0" integrity sha512-K6m3LlSnTSfRkM6FcRk8saNEeaeyG5k7AVkBU2bZK3+1zdkSED3qNdsWrUgQBeTVD2Tp3VMmerxVO2yM5iITmw== @@ -716,7 +842,7 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/plugin-syntax-jsx" "^7.8.3" -"@babel/plugin-transform-react-jsx@^7.7.0": +"@babel/plugin-transform-react-jsx@^7.9.4": version "7.9.4" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.9.4.tgz#86f576c8540bd06d0e95e0b61ea76d55f6cbd03f" integrity sha512-Mjqf3pZBNLt854CK0C/kRuXAnE6H/bo7xYojP+WGtX8glDGSibcwnsWwhwoSuRg0+EBnxPC1ouVnuetUIlPSAw== @@ -726,45 +852,45 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/plugin-syntax-jsx" "^7.8.3" -"@babel/plugin-transform-regenerator@^7.7.0": +"@babel/plugin-transform-regenerator@^7.8.7": version "7.8.7" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.8.7.tgz#5e46a0dca2bee1ad8285eb0527e6abc9c37672f8" integrity sha512-TIg+gAl4Z0a3WmD3mbYSk+J9ZUH6n/Yc57rtKRnlA/7rcCvpekHXe0CMZHP1gYp7/KLe9GHTuIba0vXmls6drA== dependencies: regenerator-transform "^0.14.2" -"@babel/plugin-transform-reserved-words@^7.2.0": +"@babel/plugin-transform-reserved-words@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.8.3.tgz#9a0635ac4e665d29b162837dd3cc50745dfdf1f5" integrity sha512-mwMxcycN3omKFDjDQUl+8zyMsBfjRFr0Zn/64I41pmjv4NJuqcYlEtezwYtw9TFd9WR1vN5kiM+O0gMZzO6L0A== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-runtime@7.6.2": - version "7.6.2" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.6.2.tgz#2669f67c1fae0ae8d8bf696e4263ad52cb98b6f8" - integrity sha512-cqULw/QB4yl73cS5Y0TZlQSjDvNkzDbu0FurTZyHlJpWE5T3PCMdnyV+xXoH1opr1ldyHODe3QAX3OMAii5NxA== +"@babel/plugin-transform-runtime@7.9.6": + version "7.9.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.9.6.tgz#3ba804438ad0d880a17bca5eaa0cdf1edeedb2fd" + integrity sha512-qcmiECD0mYOjOIt8YHNsAP1SxPooC/rDmfmiSK9BNY72EitdSc7l44WTEklaWuFtbOEBjNhWWyph/kOImbNJ4w== dependencies: - "@babel/helper-module-imports" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" + "@babel/helper-module-imports" "^7.8.3" + "@babel/helper-plugin-utils" "^7.8.3" resolve "^1.8.1" semver "^5.5.1" -"@babel/plugin-transform-shorthand-properties@^7.2.0": +"@babel/plugin-transform-shorthand-properties@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.8.3.tgz#28545216e023a832d4d3a1185ed492bcfeac08c8" integrity sha512-I9DI6Odg0JJwxCHzbzW08ggMdCezoWcuQRz3ptdudgwaHxTjxw5HgdFJmZIkIMlRymL6YiZcped4TTCB0JcC8w== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-spread@^7.6.2": +"@babel/plugin-transform-spread@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.8.3.tgz#9c8ffe8170fdfb88b114ecb920b82fb6e95fe5e8" integrity sha512-CkuTU9mbmAoFOI1tklFWYYbzX5qCIZVXPVy0jpXgGwkplCndQAa58s2jr66fTeQnA64bDox0HL4U56CFYoyC7g== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-sticky-regex@^7.2.0": +"@babel/plugin-transform-sticky-regex@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.8.3.tgz#be7a1290f81dae767475452199e1f76d6175b100" integrity sha512-9Spq0vGCD5Bb4Z/ZXXSK5wbbLFMG085qd2vhL1JYu1WcQ5bXqZBAYRzU1d+p79GcHs2szYv5pVQCX13QgldaWw== @@ -772,7 +898,7 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/helper-regex" "^7.8.3" -"@babel/plugin-transform-template-literals@^7.4.4": +"@babel/plugin-transform-template-literals@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.8.3.tgz#7bfa4732b455ea6a43130adc0ba767ec0e402a80" integrity sha512-820QBtykIQOLFT8NZOcTRJ1UNuztIELe4p9DCgvj4NK+PwluSJ49we7s9FB1HIGNIYT7wFUJ0ar2QpCDj0escQ== @@ -780,14 +906,14 @@ "@babel/helper-annotate-as-pure" "^7.8.3" "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-typeof-symbol@^7.2.0": +"@babel/plugin-transform-typeof-symbol@^7.8.4": version "7.8.4" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.8.4.tgz#ede4062315ce0aaf8a657a920858f1a2f35fc412" integrity sha512-2QKyfjGdvuNfHsb7qnBBlKclbD4CfshH2KvDabiijLMGXPHJXGxtDzwIF7bQP+T0ysw8fYTtxPafgfs/c1Lrqg== dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-transform-typescript@^7.7.2": +"@babel/plugin-transform-typescript@^7.9.0": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.9.6.tgz#2248971416a506fc78278fc0c0ea3179224af1e9" integrity sha512-8OvsRdvpt3Iesf2qsAn+YdlwAJD7zJ+vhFZmDCa4b8dTp7MmHtKk5FF2mCsGxjZwuwsy/yIIay/nLmxST1ctVQ== @@ -796,7 +922,7 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/plugin-syntax-typescript" "^7.8.3" -"@babel/plugin-transform-unicode-regex@^7.7.0": +"@babel/plugin-transform-unicode-regex@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.8.3.tgz#0cef36e3ba73e5c57273effb182f46b91a1ecaad" integrity sha512-+ufgJjYdmWfSQ+6NS9VGUR2ns8cjJjYbrbi11mZBTaWm+Fui/ncTLFF28Ei1okavY+xkojGr1eJxNsWYeA5aZw== @@ -804,67 +930,76 @@ "@babel/helper-create-regexp-features-plugin" "^7.8.3" "@babel/helper-plugin-utils" "^7.8.3" -"@babel/preset-env@7.7.1": - version "7.7.1" - resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.7.1.tgz#04a2ff53552c5885cf1083e291c8dd5490f744bb" - integrity sha512-/93SWhi3PxcVTDpSqC+Dp4YxUu3qZ4m7I76k0w73wYfn7bGVuRIO4QUz95aJksbS+AD1/mT1Ie7rbkT0wSplaA== +"@babel/preset-env@7.9.6": + version "7.9.6" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.9.6.tgz#df063b276c6455ec6fcfc6e53aacc38da9b0aea6" + integrity sha512-0gQJ9RTzO0heXOhzftog+a/WyOuqMrAIugVYxMYf83gh1CQaQDjMtsOpqOwXyDL/5JcWsrCm8l4ju8QC97O7EQ== dependencies: - "@babel/helper-module-imports" "^7.7.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-proposal-async-generator-functions" "^7.7.0" - "@babel/plugin-proposal-dynamic-import" "^7.7.0" - "@babel/plugin-proposal-json-strings" "^7.2.0" - "@babel/plugin-proposal-object-rest-spread" "^7.6.2" - "@babel/plugin-proposal-optional-catch-binding" "^7.2.0" - "@babel/plugin-proposal-unicode-property-regex" "^7.7.0" - "@babel/plugin-syntax-async-generators" "^7.2.0" - "@babel/plugin-syntax-dynamic-import" "^7.2.0" - "@babel/plugin-syntax-json-strings" "^7.2.0" - "@babel/plugin-syntax-object-rest-spread" "^7.2.0" - "@babel/plugin-syntax-optional-catch-binding" "^7.2.0" - "@babel/plugin-syntax-top-level-await" "^7.7.0" - "@babel/plugin-transform-arrow-functions" "^7.2.0" - "@babel/plugin-transform-async-to-generator" "^7.7.0" - "@babel/plugin-transform-block-scoped-functions" "^7.2.0" - "@babel/plugin-transform-block-scoping" "^7.6.3" - "@babel/plugin-transform-classes" "^7.7.0" - "@babel/plugin-transform-computed-properties" "^7.2.0" - "@babel/plugin-transform-destructuring" "^7.6.0" - "@babel/plugin-transform-dotall-regex" "^7.7.0" - "@babel/plugin-transform-duplicate-keys" "^7.5.0" - "@babel/plugin-transform-exponentiation-operator" "^7.2.0" - "@babel/plugin-transform-for-of" "^7.4.4" - "@babel/plugin-transform-function-name" "^7.7.0" - "@babel/plugin-transform-literals" "^7.2.0" - "@babel/plugin-transform-member-expression-literals" "^7.2.0" - "@babel/plugin-transform-modules-amd" "^7.5.0" - "@babel/plugin-transform-modules-commonjs" "^7.7.0" - "@babel/plugin-transform-modules-systemjs" "^7.7.0" - "@babel/plugin-transform-modules-umd" "^7.7.0" - "@babel/plugin-transform-named-capturing-groups-regex" "^7.7.0" - "@babel/plugin-transform-new-target" "^7.4.4" - "@babel/plugin-transform-object-super" "^7.5.5" - "@babel/plugin-transform-parameters" "^7.4.4" - "@babel/plugin-transform-property-literals" "^7.2.0" - "@babel/plugin-transform-regenerator" "^7.7.0" - "@babel/plugin-transform-reserved-words" "^7.2.0" - "@babel/plugin-transform-shorthand-properties" "^7.2.0" - "@babel/plugin-transform-spread" "^7.6.2" - "@babel/plugin-transform-sticky-regex" "^7.2.0" - "@babel/plugin-transform-template-literals" "^7.4.4" - "@babel/plugin-transform-typeof-symbol" "^7.2.0" - "@babel/plugin-transform-unicode-regex" "^7.7.0" - "@babel/types" "^7.7.1" - browserslist "^4.6.0" - core-js-compat "^3.1.1" + "@babel/compat-data" "^7.9.6" + "@babel/helper-compilation-targets" "^7.9.6" + "@babel/helper-module-imports" "^7.8.3" + "@babel/helper-plugin-utils" "^7.8.3" + "@babel/plugin-proposal-async-generator-functions" "^7.8.3" + "@babel/plugin-proposal-dynamic-import" "^7.8.3" + "@babel/plugin-proposal-json-strings" "^7.8.3" + "@babel/plugin-proposal-nullish-coalescing-operator" "^7.8.3" + "@babel/plugin-proposal-numeric-separator" "^7.8.3" + "@babel/plugin-proposal-object-rest-spread" "^7.9.6" + "@babel/plugin-proposal-optional-catch-binding" "^7.8.3" + "@babel/plugin-proposal-optional-chaining" "^7.9.0" + "@babel/plugin-proposal-unicode-property-regex" "^7.8.3" + "@babel/plugin-syntax-async-generators" "^7.8.0" + "@babel/plugin-syntax-dynamic-import" "^7.8.0" + "@babel/plugin-syntax-json-strings" "^7.8.0" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.0" + "@babel/plugin-syntax-numeric-separator" "^7.8.0" + "@babel/plugin-syntax-object-rest-spread" "^7.8.0" + "@babel/plugin-syntax-optional-catch-binding" "^7.8.0" + "@babel/plugin-syntax-optional-chaining" "^7.8.0" + "@babel/plugin-syntax-top-level-await" "^7.8.3" + "@babel/plugin-transform-arrow-functions" "^7.8.3" + "@babel/plugin-transform-async-to-generator" "^7.8.3" + "@babel/plugin-transform-block-scoped-functions" "^7.8.3" + "@babel/plugin-transform-block-scoping" "^7.8.3" + "@babel/plugin-transform-classes" "^7.9.5" + "@babel/plugin-transform-computed-properties" "^7.8.3" + "@babel/plugin-transform-destructuring" "^7.9.5" + "@babel/plugin-transform-dotall-regex" "^7.8.3" + "@babel/plugin-transform-duplicate-keys" "^7.8.3" + "@babel/plugin-transform-exponentiation-operator" "^7.8.3" + "@babel/plugin-transform-for-of" "^7.9.0" + "@babel/plugin-transform-function-name" "^7.8.3" + "@babel/plugin-transform-literals" "^7.8.3" + "@babel/plugin-transform-member-expression-literals" "^7.8.3" + "@babel/plugin-transform-modules-amd" "^7.9.6" + "@babel/plugin-transform-modules-commonjs" "^7.9.6" + "@babel/plugin-transform-modules-systemjs" "^7.9.6" + "@babel/plugin-transform-modules-umd" "^7.9.0" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.8.3" + "@babel/plugin-transform-new-target" "^7.8.3" + "@babel/plugin-transform-object-super" "^7.8.3" + "@babel/plugin-transform-parameters" "^7.9.5" + "@babel/plugin-transform-property-literals" "^7.8.3" + "@babel/plugin-transform-regenerator" "^7.8.7" + "@babel/plugin-transform-reserved-words" "^7.8.3" + "@babel/plugin-transform-shorthand-properties" "^7.8.3" + "@babel/plugin-transform-spread" "^7.8.3" + "@babel/plugin-transform-sticky-regex" "^7.8.3" + "@babel/plugin-transform-template-literals" "^7.8.3" + "@babel/plugin-transform-typeof-symbol" "^7.8.4" + "@babel/plugin-transform-unicode-regex" "^7.8.3" + "@babel/preset-modules" "^0.1.3" + "@babel/types" "^7.9.6" + browserslist "^4.11.1" + core-js-compat "^3.6.2" invariant "^2.2.2" - js-levenshtein "^1.1.3" + levenary "^1.1.1" semver "^5.5.0" -"@babel/preset-modules@0.1.1": - version "0.1.1" - resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.1.tgz#add61473e3182771b36930c1312f3c56c114e406" - integrity sha512-x/kt2aAZlgcFnP3P851fkkb2s4FmTiyGic58pkWMaRK9Am3u9KkH1ttHGjwlsKu7/TVJsLEBXZnjUxqsid3tww== +"@babel/preset-modules@0.1.3", "@babel/preset-modules@^0.1.3": + version "0.1.3" + resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.3.tgz#13242b53b5ef8c883c3cf7dddd55b36ce80fbc72" + integrity sha512-Ra3JXOHBq2xd56xSF7lMKXdjBn3T772Y1Wet3yWnkDly9zHvJki029tAFzvAAK5cf4YV3yoxuP61crYRol6SVg== dependencies: "@babel/helper-plugin-utils" "^7.0.0" "@babel/plugin-proposal-unicode-property-regex" "^7.4.4" @@ -872,24 +1007,25 @@ "@babel/types" "^7.4.4" esutils "^2.0.2" -"@babel/preset-react@7.7.0": - version "7.7.0" - resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.7.0.tgz#8ab0c4787d98cf1f5f22dabf115552bf9e4e406c" - integrity sha512-IXXgSUYBPHUGhUkH+89TR6faMcBtuMW0h5OHbMuVbL3/5wK2g6a2M2BBpkLa+Kw0sAHiZ9dNVgqJMDP/O4GRBA== +"@babel/preset-react@7.9.4": + version "7.9.4" + resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.9.4.tgz#c6c97693ac65b6b9c0b4f25b948a8f665463014d" + integrity sha512-AxylVB3FXeOTQXNXyiuAQJSvss62FEotbX2Pzx3K/7c+MKJMdSg6Ose6QYllkdCFA8EInCJVw7M/o5QbLuA4ZQ== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-transform-react-display-name" "^7.0.0" - "@babel/plugin-transform-react-jsx" "^7.7.0" - "@babel/plugin-transform-react-jsx-self" "^7.0.0" - "@babel/plugin-transform-react-jsx-source" "^7.0.0" + "@babel/helper-plugin-utils" "^7.8.3" + "@babel/plugin-transform-react-display-name" "^7.8.3" + "@babel/plugin-transform-react-jsx" "^7.9.4" + "@babel/plugin-transform-react-jsx-development" "^7.9.0" + "@babel/plugin-transform-react-jsx-self" "^7.9.0" + "@babel/plugin-transform-react-jsx-source" "^7.9.0" -"@babel/preset-typescript@7.7.2": - version "7.7.2" - resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.7.2.tgz#f71c8bba2ae02f11b29dbf7d6a35f47bbe011632" - integrity sha512-1B4HthAelaLGfNRyrWqJtBEjXX1ulThCrLQ5B2VOtEAznWFIFXFJahgXImqppy66lx/Oh+cOSCQdJzZqh2Jh5g== +"@babel/preset-typescript@7.9.0": + version "7.9.0" + resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.9.0.tgz#87705a72b1f0d59df21c179f7c3d2ef4b16ce192" + integrity sha512-S4cueFnGrIbvYJgwsVFKdvOmpiL0XGw9MFW9D0vgRys5g36PBhZRL8NX8Gr2akz8XRtzq6HuDXPD/1nniagNUg== dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-transform-typescript" "^7.7.2" + "@babel/helper-plugin-utils" "^7.8.3" + "@babel/plugin-transform-typescript" "^7.9.0" "@babel/runtime-corejs3@^7.8.3": version "7.9.6" @@ -899,21 +1035,23 @@ core-js-pure "^3.0.0" regenerator-runtime "^0.13.4" -"@babel/runtime@7.7.2": - version "7.7.2" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.7.2.tgz#111a78002a5c25fc8e3361bedc9529c696b85a6a" - integrity sha512-JONRbXbTXc9WQE2mAZd1p0Z3DZ/6vaQIkgYMSTP3KjRCyd7rCZCcfhCyX+YjwcKxcZ82UrxbRD358bpExNgrjw== - dependencies: - regenerator-runtime "^0.13.2" - -"@babel/runtime@^7.3.1", "@babel/runtime@^7.5.5", "@babel/runtime@^7.7.2", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2": +"@babel/runtime@7.9.6", "@babel/runtime@^7.3.1", "@babel/runtime@^7.5.5", "@babel/runtime@^7.7.2", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.9.6.tgz#a9102eb5cadedf3f31d08a9ecf294af7827ea29f" integrity sha512-64AF1xY3OAkFHqOb9s4jpgk1Mm5vDZ4L3acHvAml+53nO1XbXLuDodsVpO4OIUsmemlUHMxNdYMNJmsvOwLrvQ== dependencies: regenerator-runtime "^0.13.4" -"@babel/template@^7.7.0", "@babel/template@^7.8.3", "@babel/template@^7.8.6": +"@babel/template@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.10.1.tgz#e167154a94cb5f14b28dc58f5356d2162f539811" + integrity sha512-OQDg6SqvFSsc9A0ej6SKINWrpJiNonRIniYondK2ViKhB06i3c0s+76XUft71iqBEe9S1OKsHwPAjfHnuvnCig== + dependencies: + "@babel/code-frame" "^7.10.1" + "@babel/parser" "^7.10.1" + "@babel/types" "^7.10.1" + +"@babel/template@^7.7.4", "@babel/template@^7.8.3", "@babel/template@^7.8.6": version "7.8.6" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.8.6.tgz#86b22af15f828dfb086474f964dcc3e39c43ce2b" integrity sha512-zbMsPMy/v0PWFZEhQJ66bqjhH+z0JgMoBWuikXybgG3Gkd/3t5oQ1Rw2WQhnSrsOmsKXnZOx15tkC4qON/+JPg== @@ -922,7 +1060,22 @@ "@babel/parser" "^7.8.6" "@babel/types" "^7.8.6" -"@babel/traverse@^7.4.5", "@babel/traverse@^7.7.2", "@babel/traverse@^7.8.3", "@babel/traverse@^7.9.6": +"@babel/traverse@^7.10.1": + version "7.10.1" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.10.1.tgz#bbcef3031e4152a6c0b50147f4958df54ca0dd27" + integrity sha512-C/cTuXeKt85K+p08jN6vMDz8vSV0vZcI0wmQ36o6mjbuo++kPMdpOYw23W2XH04dbRt9/nMEfA4W3eR21CD+TQ== + dependencies: + "@babel/code-frame" "^7.10.1" + "@babel/generator" "^7.10.1" + "@babel/helper-function-name" "^7.10.1" + "@babel/helper-split-export-declaration" "^7.10.1" + "@babel/parser" "^7.10.1" + "@babel/types" "^7.10.1" + debug "^4.1.0" + globals "^11.1.0" + lodash "^4.17.13" + +"@babel/traverse@^7.4.5", "@babel/traverse@^7.7.4", "@babel/traverse@^7.8.3", "@babel/traverse@^7.9.6": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.9.6.tgz#5540d7577697bf619cc57b92aa0f1c231a94f442" integrity sha512-b3rAHSjbxy6VEAvlxM8OV/0X4XrG72zoxme6q1MOoe2vd0bEc+TwayhuC1+Dfgqh1QEG+pj7atQqvUprHIccsg== @@ -946,7 +1099,7 @@ lodash "^4.17.13" to-fast-properties "^2.0.0" -"@babel/types@^7.4.4", "@babel/types@^7.7.1", "@babel/types@^7.7.2", "@babel/types@^7.8.3", "@babel/types@^7.8.6", "@babel/types@^7.9.0", "@babel/types@^7.9.5", "@babel/types@^7.9.6": +"@babel/types@7.9.6", "@babel/types@^7.4.4", "@babel/types@^7.7.4", "@babel/types@^7.8.3", "@babel/types@^7.8.6", "@babel/types@^7.9.0", "@babel/types@^7.9.5", "@babel/types@^7.9.6": version "7.9.6" resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.9.6.tgz#2c5502b427251e9de1bd2dff95add646d95cc9f7" integrity sha512-qxXzvBO//jO9ZnoasKF1uJzHd2+M6Q2ZPIVfnFps8JJvXy0ZBbwbNOmE6SGIY5XOY6d1Bo5lb9d9RJ8nv3WSeA== @@ -955,6 +1108,15 @@ lodash "^4.17.13" to-fast-properties "^2.0.0" +"@babel/types@^7.10.1", "@babel/types@^7.10.2": + version "7.10.2" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.10.2.tgz#30283be31cad0dbf6fb00bd40641ca0ea675172d" + integrity sha512-AD3AwWBSz0AWF0AkCN9VPiWrvldXq+/e3cHa4J89vo4ymjz1XwrBFFVZmkJTsQIPNk+ZVomPSXUJqq8yyjZsng== + dependencies: + "@babel/helper-validator-identifier" "^7.10.1" + lodash "^4.17.13" + to-fast-properties "^2.0.0" + "@emotion/cache@^10.0.27": version "10.0.29" resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-10.0.29.tgz#87e7e64f412c060102d589fe7c6dc042e6f9d1e0" @@ -1113,14 +1275,14 @@ unique-filename "^1.1.1" which "^1.3.1" -"@lerna/add@3.20.0": - version "3.20.0" - resolved "https://registry.yarnpkg.com/@lerna/add/-/add-3.20.0.tgz#bea7edf36fc93fb72ec34cb9ba854c48d4abf309" - integrity sha512-AnH1oRIEEg/VDa3SjYq4x1/UglEAvrZuV0WssHUMN81RTZgQk3we+Mv3qZNddrZ/fBcZu2IAdN/EQ3+ie2JxKQ== +"@lerna/add@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/add/-/add-3.21.0.tgz#27007bde71cc7b0a2969ab3c2f0ae41578b4577b" + integrity sha512-vhUXXF6SpufBE1EkNEXwz1VLW03f177G9uMOFMQkp6OJ30/PWg4Ekifuz9/3YfgB2/GH8Tu4Lk3O51P2Hskg/A== dependencies: "@evocateur/pacote" "^9.6.3" - "@lerna/bootstrap" "3.20.0" - "@lerna/command" "3.18.5" + "@lerna/bootstrap" "3.21.0" + "@lerna/command" "3.21.0" "@lerna/filter-options" "3.20.0" "@lerna/npm-conf" "3.16.0" "@lerna/validation-error" "3.13.0" @@ -1129,12 +1291,12 @@ p-map "^2.1.0" semver "^6.2.0" -"@lerna/bootstrap@3.20.0": - version "3.20.0" - resolved "https://registry.yarnpkg.com/@lerna/bootstrap/-/bootstrap-3.20.0.tgz#635d71046830f208e851ab429a63da1747589e37" - integrity sha512-Wylullx3uthKE7r4izo09qeRGL20Y5yONlQEjPCfnbxCC2Elu+QcPu4RC6kqKQ7b+g7pdC3OOgcHZjngrwr5XQ== +"@lerna/bootstrap@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/bootstrap/-/bootstrap-3.21.0.tgz#bcd1b651be5b0970b20d8fae04c864548123aed6" + integrity sha512-mtNHlXpmvJn6JTu0KcuTTPl2jLsDNud0QacV/h++qsaKbhAaJr/FElNZ5s7MwZFUM3XaDmvWzHKaszeBMHIbBw== dependencies: - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/filter-options" "3.20.0" "@lerna/has-npm-version" "3.16.5" "@lerna/npm-install" "3.16.5" @@ -1158,13 +1320,13 @@ read-package-tree "^5.1.6" semver "^6.2.0" -"@lerna/changed@3.20.0": - version "3.20.0" - resolved "https://registry.yarnpkg.com/@lerna/changed/-/changed-3.20.0.tgz#66b97ebd6c8f8d207152ee524a0791846a9097ae" - integrity sha512-+hzMFSldbRPulZ0vbKk6RD9f36gaH3Osjx34wrrZ62VB4pKmjyuS/rxVYkCA3viPLHoiIw2F8zHM5BdYoDSbjw== +"@lerna/changed@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/changed/-/changed-3.21.0.tgz#108e15f679bfe077af500f58248c634f1044ea0b" + integrity sha512-hzqoyf8MSHVjZp0gfJ7G8jaz+++mgXYiNs9iViQGA8JlN/dnWLI5sWDptEH3/B30Izo+fdVz0S0s7ydVE3pWIw== dependencies: "@lerna/collect-updates" "3.20.0" - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/listable" "3.18.5" "@lerna/output" "3.13.0" @@ -1186,12 +1348,12 @@ execa "^1.0.0" strong-log-transformer "^2.0.0" -"@lerna/clean@3.20.0": - version "3.20.0" - resolved "https://registry.yarnpkg.com/@lerna/clean/-/clean-3.20.0.tgz#ba777e373ddeae63e57860df75d47a9e5264c5b2" - integrity sha512-9ZdYrrjQvR5wNXmHfDsfjWjp0foOkCwKe3hrckTzkAeQA1ibyz5llGwz5e1AeFrV12e2/OLajVqYfe+qdkZUgg== +"@lerna/clean@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/clean/-/clean-3.21.0.tgz#c0b46b5300cc3dae2cda3bec14b803082da3856d" + integrity sha512-b/L9l+MDgE/7oGbrav6rG8RTQvRiZLO1zTcG17zgJAAuhlsPxJExMlh2DFwJEVi2les70vMhHfST3Ue1IMMjpg== dependencies: - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/filter-options" "3.20.0" "@lerna/prompt" "3.18.5" "@lerna/pulse-till-done" "3.13.0" @@ -1231,14 +1393,14 @@ npmlog "^4.1.2" slash "^2.0.0" -"@lerna/command@3.18.5": - version "3.18.5" - resolved "https://registry.yarnpkg.com/@lerna/command/-/command-3.18.5.tgz#14c6d2454adbfd365f8027201523e6c289cd3cd9" - integrity sha512-36EnqR59yaTU4HrR1C9XDFti2jRx0BgpIUBeWn129LZZB8kAB3ov1/dJNa1KcNRKp91DncoKHLY99FZ6zTNpMQ== +"@lerna/command@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/command/-/command-3.21.0.tgz#9a2383759dc7b700dacfa8a22b2f3a6e190121f7" + integrity sha512-T2bu6R8R3KkH5YoCKdutKv123iUgUbW8efVjdGCDnCMthAQzoentOJfDeodBwn0P2OqCl3ohsiNVtSn9h78fyQ== dependencies: "@lerna/child-process" "3.16.5" "@lerna/package-graph" "3.18.5" - "@lerna/project" "3.18.0" + "@lerna/project" "3.21.0" "@lerna/validation-error" "3.13.0" "@lerna/write-log-file" "3.13.0" clone-deep "^4.0.1" @@ -1247,10 +1409,10 @@ is-ci "^2.0.0" npmlog "^4.1.2" -"@lerna/conventional-commits@3.18.5": - version "3.18.5" - resolved "https://registry.yarnpkg.com/@lerna/conventional-commits/-/conventional-commits-3.18.5.tgz#08efd2e5b45acfaf3f151a53a3ec7ecade58a7bc" - integrity sha512-qcvXIEJ3qSgalxXnQ7Yxp5H9Ta5TVyai6vEor6AAEHc20WiO7UIdbLDCxBtiiHMdGdpH85dTYlsoYUwsCJu3HQ== +"@lerna/conventional-commits@3.22.0": + version "3.22.0" + resolved "https://registry.yarnpkg.com/@lerna/conventional-commits/-/conventional-commits-3.22.0.tgz#2798f4881ee2ef457bdae027ab7d0bf0af6f1e09" + integrity sha512-z4ZZk1e8Mhz7+IS8NxHr64wyklHctCJyWpJKEZZPJiLFJ8yKto/x38O80R10pIzC0rr8Sy/OsjSH4bl0TbbgqA== dependencies: "@lerna/validation-error" "3.13.0" conventional-changelog-angular "^5.0.3" @@ -1273,14 +1435,14 @@ fs-extra "^8.1.0" npmlog "^4.1.2" -"@lerna/create@3.18.5": - version "3.18.5" - resolved "https://registry.yarnpkg.com/@lerna/create/-/create-3.18.5.tgz#11ac539f069248eaf7bc4c42e237784330f4fc47" - integrity sha512-cHpjocbpKmLopCuZFI7cKEM3E/QY8y+yC7VtZ4FQRSaLU8D8i2xXtXmYaP1GOlVNavji0iwoXjuNpnRMInIr2g== +"@lerna/create@3.22.0": + version "3.22.0" + resolved "https://registry.yarnpkg.com/@lerna/create/-/create-3.22.0.tgz#d6bbd037c3dc5b425fe5f6d1b817057c278f7619" + integrity sha512-MdiQQzCcB4E9fBF1TyMOaAEz9lUjIHp1Ju9H7f3lXze5JK6Fl5NYkouAvsLgY6YSIhXMY8AHW2zzXeBDY4yWkw== dependencies: "@evocateur/pacote" "^9.6.3" "@lerna/child-process" "3.16.5" - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/npm-conf" "3.16.0" "@lerna/validation-error" "3.13.0" camelcase "^5.0.0" @@ -1305,23 +1467,23 @@ "@lerna/child-process" "3.16.5" npmlog "^4.1.2" -"@lerna/diff@3.18.5": - version "3.18.5" - resolved "https://registry.yarnpkg.com/@lerna/diff/-/diff-3.18.5.tgz#e9e2cb882f84d5b84f0487c612137305f07accbc" - integrity sha512-u90lGs+B8DRA9Z/2xX4YaS3h9X6GbypmGV6ITzx9+1Ga12UWGTVlKaCXBgONMBjzJDzAQOK8qPTwLA57SeBLgA== +"@lerna/diff@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/diff/-/diff-3.21.0.tgz#e6df0d8b9916167ff5a49fcb02ac06424280a68d" + integrity sha512-5viTR33QV3S7O+bjruo1SaR40m7F2aUHJaDAC7fL9Ca6xji+aw1KFkpCtVlISS0G8vikUREGMJh+c/VMSc8Usw== dependencies: "@lerna/child-process" "3.16.5" - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/validation-error" "3.13.0" npmlog "^4.1.2" -"@lerna/exec@3.20.0": - version "3.20.0" - resolved "https://registry.yarnpkg.com/@lerna/exec/-/exec-3.20.0.tgz#29f0c01aee2340eb46f90706731fef2062a49639" - integrity sha512-pS1mmC7kzV668rHLWuv31ClngqeXjeHC8kJuM+W2D6IpUVMGQHLcCTYLudFgQsuKGVpl0DGNYG+sjLhAPiiu6A== +"@lerna/exec@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/exec/-/exec-3.21.0.tgz#17f07533893cb918a17b41bcc566dc437016db26" + integrity sha512-iLvDBrIE6rpdd4GIKTY9mkXyhwsJ2RvQdB9ZU+/NhR3okXfqKc6py/24tV111jqpXTtZUW6HNydT4dMao2hi1Q== dependencies: "@lerna/child-process" "3.16.5" - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/filter-options" "3.20.0" "@lerna/profiler" "3.20.0" "@lerna/run-topologically" "3.18.5" @@ -1364,13 +1526,13 @@ ssri "^6.0.1" tar "^4.4.8" -"@lerna/github-client@3.16.5": - version "3.16.5" - resolved "https://registry.yarnpkg.com/@lerna/github-client/-/github-client-3.16.5.tgz#2eb0235c3bf7a7e5d92d73e09b3761ab21f35c2e" - integrity sha512-rHQdn8Dv/CJrO3VouOP66zAcJzrHsm+wFuZ4uGAai2At2NkgKH+tpNhQy2H1PSC0Ezj9LxvdaHYrUzULqVK5Hw== +"@lerna/github-client@3.22.0": + version "3.22.0" + resolved "https://registry.yarnpkg.com/@lerna/github-client/-/github-client-3.22.0.tgz#5d816aa4f76747ed736ae64ff962b8f15c354d95" + integrity sha512-O/GwPW+Gzr3Eb5bk+nTzTJ3uv+jh5jGho9BOqKlajXaOkMYGBELEAqV5+uARNGWZFvYAiF4PgqHb6aCUu7XdXg== dependencies: "@lerna/child-process" "3.16.5" - "@octokit/plugin-enterprise-rest" "^3.6.1" + "@octokit/plugin-enterprise-rest" "^6.0.1" "@octokit/rest" "^16.28.4" git-url-parse "^11.1.2" npmlog "^4.1.2" @@ -1397,13 +1559,13 @@ "@lerna/child-process" "3.16.5" semver "^6.2.0" -"@lerna/import@3.18.5": - version "3.18.5" - resolved "https://registry.yarnpkg.com/@lerna/import/-/import-3.18.5.tgz#a9c7d8601870729851293c10abd18b3707f7ba5e" - integrity sha512-PH0WVLEgp+ORyNKbGGwUcrueW89K3Iuk/DDCz8mFyG2IG09l/jOF0vzckEyGyz6PO5CMcz4TI1al/qnp3FrahQ== +"@lerna/import@3.22.0": + version "3.22.0" + resolved "https://registry.yarnpkg.com/@lerna/import/-/import-3.22.0.tgz#1a5f0394f38e23c4f642a123e5e1517e70d068d2" + integrity sha512-uWOlexasM5XR6tXi4YehODtH9Y3OZrFht3mGUFFT3OIl2s+V85xIGFfqFGMTipMPAGb2oF1UBLL48kR43hRsOg== dependencies: "@lerna/child-process" "3.16.5" - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/prompt" "3.18.5" "@lerna/pulse-till-done" "3.13.0" "@lerna/validation-error" "3.13.0" @@ -1411,43 +1573,43 @@ fs-extra "^8.1.0" p-map-series "^1.0.0" -"@lerna/info@3.20.0": - version "3.20.0" - resolved "https://registry.yarnpkg.com/@lerna/info/-/info-3.20.0.tgz#3a5212f3029f2bc6255f9533bdf4bcb120ef329a" - integrity sha512-Rsz+KQF9mczbGUbPTrtOed1N0C+cA08Qz0eX/oI+NNjvsryZIju/o7uedG4I3P55MBiAioNrJI88fHH3eTgYug== +"@lerna/info@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/info/-/info-3.21.0.tgz#76696b676fdb0f35d48c83c63c1e32bb5e37814f" + integrity sha512-0XDqGYVBgWxUquFaIptW2bYSIu6jOs1BtkvRTWDDhw4zyEdp6q4eaMvqdSap1CG+7wM5jeLCi6z94wS0AuiuwA== dependencies: - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/output" "3.13.0" envinfo "^7.3.1" -"@lerna/init@3.18.5": - version "3.18.5" - resolved "https://registry.yarnpkg.com/@lerna/init/-/init-3.18.5.tgz#86dd0b2b3290755a96975069b5cb007f775df9f5" - integrity sha512-oCwipWrha98EcJAHm8AGd2YFFLNI7AW9AWi0/LbClj1+XY9ah+uifXIgYGfTk63LbgophDd8936ZEpHMxBsbAg== +"@lerna/init@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/init/-/init-3.21.0.tgz#1e810934dc8bf4e5386c031041881d3b4096aa5c" + integrity sha512-6CM0z+EFUkFfurwdJCR+LQQF6MqHbYDCBPyhu/d086LRf58GtYZYj49J8mKG9ktayp/TOIxL/pKKjgLD8QBPOg== dependencies: "@lerna/child-process" "3.16.5" - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" fs-extra "^8.1.0" p-map "^2.1.0" write-json-file "^3.2.0" -"@lerna/link@3.18.5": - version "3.18.5" - resolved "https://registry.yarnpkg.com/@lerna/link/-/link-3.18.5.tgz#f24347e4f0b71d54575bd37cfa1794bc8ee91b18" - integrity sha512-xTN3vktJpkT7Nqc3QkZRtHO4bT5NvuLMtKNIBDkks0HpGxC9PRyyqwOoCoh1yOGbrWIuDezhfMg3Qow+6I69IQ== +"@lerna/link@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/link/-/link-3.21.0.tgz#8be68ff0ccee104b174b5bbd606302c2f06e9d9b" + integrity sha512-tGu9GxrX7Ivs+Wl3w1+jrLi1nQ36kNI32dcOssij6bg0oZ2M2MDEFI9UF2gmoypTaN9uO5TSsjCFS7aR79HbdQ== dependencies: - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/package-graph" "3.18.5" "@lerna/symlink-dependencies" "3.17.0" p-map "^2.1.0" slash "^2.0.0" -"@lerna/list@3.20.0": - version "3.20.0" - resolved "https://registry.yarnpkg.com/@lerna/list/-/list-3.20.0.tgz#7e67cc29c5cf661cfd097e8a7c2d3dcce7a81029" - integrity sha512-fXTicPrfioVnRzknyPawmYIVkzDRBaQqk9spejS1S3O1DOidkihK0xxNkr8HCVC0L22w6f92g83qWDp2BYRUbg== +"@lerna/list@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/list/-/list-3.21.0.tgz#42f76fafa56dea13b691ec8cab13832691d61da2" + integrity sha512-KehRjE83B1VaAbRRkRy6jLX1Cin8ltsrQ7FHf2bhwhRHK0S54YuA6LOoBnY/NtA8bHDX/Z+G5sMY78X30NS9tg== dependencies: - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/filter-options" "3.20.0" "@lerna/listable" "3.18.5" "@lerna/output" "3.13.0" @@ -1593,10 +1755,10 @@ npmlog "^4.1.2" upath "^1.2.0" -"@lerna/project@3.18.0": - version "3.18.0" - resolved "https://registry.yarnpkg.com/@lerna/project/-/project-3.18.0.tgz#56feee01daeb42c03cbdf0ed8a2a10cbce32f670" - integrity sha512-+LDwvdAp0BurOAWmeHE3uuticsq9hNxBI0+FMHiIai8jrygpJGahaQrBYWpwbshbQyVLeQgx3+YJdW2TbEdFWA== +"@lerna/project@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/project/-/project-3.21.0.tgz#5d784d2d10c561a00f20320bcdb040997c10502d" + integrity sha512-xT1mrpET2BF11CY32uypV2GPtPVm6Hgtha7D81GQP9iAitk9EccrdNjYGt5UBYASl4CIDXBRxwmTTVGfrCx82A== dependencies: "@lerna/package" "3.16.0" "@lerna/validation-error" "3.13.0" @@ -1619,10 +1781,10 @@ inquirer "^6.2.0" npmlog "^4.1.2" -"@lerna/publish@3.20.2": - version "3.20.2" - resolved "https://registry.yarnpkg.com/@lerna/publish/-/publish-3.20.2.tgz#a45d29813099b3249657ea913d0dc3f8ebc5cc2e" - integrity sha512-N7Y6PdhJ+tYQPdI1tZum8W25cDlTp4D6brvRacKZusweWexxaopbV8RprBaKexkEX/KIbncuADq7qjDBdQHzaA== +"@lerna/publish@3.22.0": + version "3.22.0" + resolved "https://registry.yarnpkg.com/@lerna/publish/-/publish-3.22.0.tgz#7a3fb61026d3b7425f3b9a1849421f67d795c55d" + integrity sha512-8LBeTLBN8NIrCrLGykRu+PKrfrCC16sGCVY0/bzq9TDioR7g6+cY0ZAw653Qt/0Kr7rg3J7XxVNdzj3fvevlwA== dependencies: "@evocateur/libnpmaccess" "^3.1.2" "@evocateur/npm-registry-fetch" "^4.0.0" @@ -1630,7 +1792,7 @@ "@lerna/check-working-tree" "3.16.5" "@lerna/child-process" "3.16.5" "@lerna/collect-updates" "3.20.0" - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/describe-ref" "3.16.5" "@lerna/log-packed" "3.16.0" "@lerna/npm-conf" "3.16.0" @@ -1645,7 +1807,7 @@ "@lerna/run-lifecycle" "3.16.2" "@lerna/run-topologically" "3.18.5" "@lerna/validation-error" "3.13.0" - "@lerna/version" "3.20.2" + "@lerna/version" "3.22.0" figgy-pudding "^3.5.1" fs-extra "^8.1.0" npm-package-arg "^6.1.0" @@ -1708,12 +1870,12 @@ figgy-pudding "^3.5.1" p-queue "^4.0.0" -"@lerna/run@3.20.0": - version "3.20.0" - resolved "https://registry.yarnpkg.com/@lerna/run/-/run-3.20.0.tgz#a479f7c42bdf9ebabb3a1e5a2bdebb7a8d201151" - integrity sha512-9U3AqeaCeB7KsGS9oyKNp62s9vYoULg/B4cqXTKZkc+OKL6QOEjYHYVSBcMK9lUXrMjCjDIuDSX3PnTCPxQ2Dw== +"@lerna/run@3.21.0": + version "3.21.0" + resolved "https://registry.yarnpkg.com/@lerna/run/-/run-3.21.0.tgz#2a35ec84979e4d6e42474fe148d32e5de1cac891" + integrity sha512-fJF68rT3veh+hkToFsBmUJ9MHc9yGXA7LSDvhziAojzOb0AI/jBDp6cEcDQyJ7dbnplba2Lj02IH61QUf9oW0Q== dependencies: - "@lerna/command" "3.18.5" + "@lerna/command" "3.21.0" "@lerna/filter-options" "3.20.0" "@lerna/npm-run-script" "3.16.5" "@lerna/output" "3.13.0" @@ -1758,17 +1920,17 @@ dependencies: npmlog "^4.1.2" -"@lerna/version@3.20.2": - version "3.20.2" - resolved "https://registry.yarnpkg.com/@lerna/version/-/version-3.20.2.tgz#3709141c0f537741d9bc10cb24f56897bcb30428" - integrity sha512-ckBJMaBWc+xJen0cMyCE7W67QXLLrc0ELvigPIn8p609qkfNM0L0CF803MKxjVOldJAjw84b8ucNWZLvJagP/Q== +"@lerna/version@3.22.0": + version "3.22.0" + resolved "https://registry.yarnpkg.com/@lerna/version/-/version-3.22.0.tgz#67e1340c1904e9b339becd66429f32dd8ad65a55" + integrity sha512-6uhL6RL7/FeW6u1INEgyKjd5dwO8+IsbLfkfC682QuoVLS7VG6OOB+JmTpCvnuyYWI6fqGh1bRk9ww8kPsj+EA== dependencies: "@lerna/check-working-tree" "3.16.5" "@lerna/child-process" "3.16.5" "@lerna/collect-updates" "3.20.0" - "@lerna/command" "3.18.5" - "@lerna/conventional-commits" "3.18.5" - "@lerna/github-client" "3.16.5" + "@lerna/command" "3.21.0" + "@lerna/conventional-commits" "3.22.0" + "@lerna/github-client" "3.22.0" "@lerna/gitlab-client" "3.15.0" "@lerna/output" "3.13.0" "@lerna/prerelease-id-from-version" "3.16.0" @@ -1806,10 +1968,26 @@ call-me-maybe "^1.0.1" glob-to-regexp "^0.3.0" -"@next/react-refresh-utils@9.3.6": - version "9.3.6" - resolved "https://registry.yarnpkg.com/@next/react-refresh-utils/-/react-refresh-utils-9.3.6.tgz#a5eb91b4b8270fecb5fcf2d1319d8d04f0cf6f75" - integrity sha512-XgxPc3WAkNpzD9xYtN4bd3wpV39WhnHYSwXGovVVcImXY6yn1as6hDgwkbuLADY/SHLIu2AI6CTVq0JDoTC/+g== +"@next/react-dev-overlay@9.4.4": + version "9.4.4" + resolved "https://registry.yarnpkg.com/@next/react-dev-overlay/-/react-dev-overlay-9.4.4.tgz#4ae03ac839ff022b3ce5c695bd24b179d4ef459d" + integrity sha512-UUAa8RbH7BeWDPCkagIkR4sUsyvTPlEdFrPZ9kGjf2+p8HkLHpcVY7y+XRnNvJQs4PsAF0Plh20FBz7t54U2iQ== + dependencies: + "@babel/code-frame" "7.8.3" + ally.js "1.4.1" + anser "1.4.9" + chalk "4.0.0" + classnames "2.2.6" + data-uri-to-buffer "3.0.0" + shell-quote "1.7.2" + source-map "0.8.0-beta.0" + stacktrace-parser "0.1.10" + strip-ansi "6.0.0" + +"@next/react-refresh-utils@9.4.4": + version "9.4.4" + resolved "https://registry.yarnpkg.com/@next/react-refresh-utils/-/react-refresh-utils-9.4.4.tgz#d94cbb3b354a07f1f5b80e554d6b9e34aba99e41" + integrity sha512-9nKENeWRI6kQk44TbeqleIVtNLfcS3klVUepzl/ZCqzR5Bi06uqBCD277hdVvG/wL1pxA+R/pgJQLqnF5E2wPQ== "@nodelib/fs.stat@^1.1.2": version "1.1.3" @@ -1817,11 +1995,11 @@ integrity sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw== "@octokit/auth-token@^2.4.0": - version "2.4.0" - resolved "https://registry.yarnpkg.com/@octokit/auth-token/-/auth-token-2.4.0.tgz#b64178975218b99e4dfe948253f0673cbbb59d9f" - integrity sha512-eoOVMjILna7FVQf96iWc3+ZtE/ZT6y8ob8ZzcqKY1ibSQCnu4O/B7pJvzMx5cyZ/RjAff6DAdEb0O0Cjcxidkg== + version "2.4.1" + resolved "https://registry.yarnpkg.com/@octokit/auth-token/-/auth-token-2.4.1.tgz#375d79eebd03750e6a9b0299e80b8167c7c85655" + integrity sha512-NB81O5h39KfHYGtgfWr2booRxp2bWOJoqbWwbyUg2hw6h35ArWYlAST5B3XwAkbdcx13yt84hFXyFP5X0QToWA== dependencies: - "@octokit/types" "^2.0.0" + "@octokit/types" "^4.0.1" "@octokit/endpoint@^6.0.1": version "6.0.1" @@ -1832,10 +2010,10 @@ is-plain-object "^3.0.0" universal-user-agent "^5.0.0" -"@octokit/plugin-enterprise-rest@^3.6.1": - version "3.6.2" - resolved "https://registry.yarnpkg.com/@octokit/plugin-enterprise-rest/-/plugin-enterprise-rest-3.6.2.tgz#74de25bef21e0182b4fa03a8678cd00a4e67e561" - integrity sha512-3wF5eueS5OHQYuAEudkpN+xVeUsg8vYEMMenEzLphUZ7PRZ8OJtDcsreL3ad9zxXmBbaFWzLmFcdob5CLyZftA== +"@octokit/plugin-enterprise-rest@^6.0.1": + version "6.0.1" + resolved "https://registry.yarnpkg.com/@octokit/plugin-enterprise-rest/-/plugin-enterprise-rest-6.0.1.tgz#e07896739618dab8da7d4077c658003775f95437" + integrity sha512-93uGjlhUD+iNg1iWhUENAtJata6w5nE+V4urXOAlIXdco6xNZtUSfYY8dzp3Udy74aqO/B5UZL80x/YMa5PKRw== "@octokit/plugin-paginate-rest@^1.1.1": version "1.1.2" @@ -1867,18 +2045,18 @@ once "^1.4.0" "@octokit/request-error@^2.0.0": - version "2.0.0" - resolved "https://registry.yarnpkg.com/@octokit/request-error/-/request-error-2.0.0.tgz#94ca7293373654400fbb2995f377f9473e00834b" - integrity sha512-rtYicB4Absc60rUv74Rjpzek84UbVHGHJRu4fNVlZ1mCcyUPPuzFfG9Rn6sjHrd95DEsmjSt1Axlc699ZlbDkw== + version "2.0.1" + resolved "https://registry.yarnpkg.com/@octokit/request-error/-/request-error-2.0.1.tgz#49bd71e811daffd5bdd06ef514ca47b5039682d1" + integrity sha512-5lqBDJ9/TOehK82VvomQ6zFiZjPeSom8fLkFVLuYL3sKiIb5RB8iN/lenLkY7oBmyQcGP7FBMGiIZTO8jufaRQ== dependencies: - "@octokit/types" "^2.0.0" + "@octokit/types" "^4.0.1" deprecation "^2.0.0" once "^1.4.0" "@octokit/request@^5.2.0": - version "5.4.2" - resolved "https://registry.yarnpkg.com/@octokit/request/-/request-5.4.2.tgz#74f8e5bbd39dc738a1b127629791f8ad1b3193ee" - integrity sha512-zKdnGuQ2TQ2vFk9VU8awFT4+EYf92Z/v3OlzRaSh4RIP0H6cvW1BFPXq4XYvNez+TPQjqN+0uSkCYnMFFhcFrw== + version "5.4.3" + resolved "https://registry.yarnpkg.com/@octokit/request/-/request-5.4.3.tgz#85b78ea4ae6e1c4ac2b02528102d4cd776145935" + integrity sha512-RtqMzF3mhqxmWoqVD84x2gdtbqn2inTBU/HPkWf5u0R5r7fBTaLPAcCBgukeI2gjTwD9ChL9Cu0MlTBs7B/tSw== dependencies: "@octokit/endpoint" "^6.0.1" "@octokit/request-error" "^2.0.0" @@ -1918,6 +2096,13 @@ dependencies: "@types/node" ">= 8" +"@octokit/types@^4.0.1": + version "4.0.1" + resolved "https://registry.yarnpkg.com/@octokit/types/-/types-4.0.1.tgz#dd32ff2407699f3a0c909cdd24de17b45b7d7051" + integrity sha512-Ho6h7w2h9y8RRE8r656hIj1oiSbwbIHJGF5r9G5FOwS2VdDPq8QLGvsG4x6pKHpvyGK7j+43sAc2cJKMiFoIJw== + dependencies: + "@types/node" ">= 8" + "@opencensus/core@0.0.9": version "0.0.9" resolved "https://registry.yarnpkg.com/@opencensus/core/-/core-0.0.9.tgz#b16f775435ee309433e4126af194d37313fc93b3" @@ -2055,249 +2240,10 @@ dependencies: "@types/node" "*" -"@types/d3-array@*": - version "2.0.0" - resolved "https://registry.yarnpkg.com/@types/d3-array/-/d3-array-2.0.0.tgz#a0d63a296a2d8435a9ec59393dcac746c6174a96" - integrity sha512-rGqfPVowNDTszSFvwoZIXvrPG7s/qKzm9piCRIH6xwTTRu7pPZ3ootULFnPkTt74B6i5lN0FpLQL24qGOw1uZA== - -"@types/d3-array@^1": - version "1.2.7" - resolved "https://registry.yarnpkg.com/@types/d3-array/-/d3-array-1.2.7.tgz#34dc654d34fc058c41c31dbca1ed68071a8fcc17" - integrity sha512-51vHWuUyDOi+8XuwPrTw3cFqyh2Slg9y8COYkRfjCPG9TfYqY0hoNPzv/8BrcAy0FeQBzqEo/D/8Nk2caOQJnA== - -"@types/d3-axis@*": - version "1.0.12" - resolved "https://registry.yarnpkg.com/@types/d3-axis/-/d3-axis-1.0.12.tgz#8c124edfcc02f3b3a9cdaa2a28b8a20341401799" - integrity sha512-BZISgSD5M8TgURyNtcPAmUB9sk490CO1Thb6/gIn0WZTt3Y50IssX+2Z0vTccoqZksUDTep0b+o4ofXslvNbqg== - dependencies: - "@types/d3-selection" "*" - -"@types/d3-brush@*": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@types/d3-brush/-/d3-brush-1.1.0.tgz#3f1f01aa3d4d70aff2a358c49dd3295be10d774c" - integrity sha512-yz5Y94XpUARimOlLk+RWM1cZh1FrtmSGOyDQfCArsMa6kAnhjF3EserSTDnHAuVuNATMoTIOPHa7pjG2iTkPYA== - dependencies: - "@types/d3-selection" "*" - -"@types/d3-chord@*": - version "1.0.9" - resolved "https://registry.yarnpkg.com/@types/d3-chord/-/d3-chord-1.0.9.tgz#ccc5de03ff079025491b7aa6b750670a140b45ae" - integrity sha512-UA6lI9CVW5cT5Ku/RV4hxoFn4mKySHm7HEgodtfRthAj1lt9rKZEPon58vyYfk+HIAm33DtJJgZwMXy2QgyPXw== - -"@types/d3-collection@*": - version "1.0.8" - resolved "https://registry.yarnpkg.com/@types/d3-collection/-/d3-collection-1.0.8.tgz#aa9552c570a96e33c132e0fd20e331f64baa9dd5" - integrity sha512-y5lGlazdc0HNO0F3UUX2DPE7OmYvd9Kcym4hXwrJcNUkDaypR5pX+apuMikl9LfTxKItJsY9KYvzBulpCKyvuQ== - -"@types/d3-color@*": - version "1.2.2" - resolved "https://registry.yarnpkg.com/@types/d3-color/-/d3-color-1.2.2.tgz#80cf7cfff7401587b8f89307ba36fe4a576bc7cf" - integrity sha512-6pBxzJ8ZP3dYEQ4YjQ+NVbQaOflfgXq/JbDiS99oLobM2o72uAST4q6yPxHv6FOTCRC/n35ktuo8pvw/S4M7sw== - -"@types/d3-contour@*": - version "1.3.0" - resolved "https://registry.yarnpkg.com/@types/d3-contour/-/d3-contour-1.3.0.tgz#1a408b121fa5e341f715e3055303ef3079fc7eb0" - integrity sha512-AUCUIjEnC5lCGBM9hS+MryRaFLIrPls4Rbv6ktqbd+TK/RXZPwOy9rtBWmGpbeXcSOYCJTUDwNJuEnmYPJRxHQ== - dependencies: - "@types/d3-array" "*" - "@types/geojson" "*" - -"@types/d3-dispatch@*": - version "1.0.7" - resolved "https://registry.yarnpkg.com/@types/d3-dispatch/-/d3-dispatch-1.0.7.tgz#6721aefbb9862ce78c20a87a1490c21f57c3ed7f" - integrity sha512-M+z84G7UKwK6hEPnGCSccOg8zJ3Nk2hgDQ9sCstHXgsFU0sMxlIZVKqKB5oxUDbALqQG6ucg0G9e8cmOSlishg== - -"@types/d3-drag@*": - version "1.2.3" - resolved "https://registry.yarnpkg.com/@types/d3-drag/-/d3-drag-1.2.3.tgz#d8ddccca28e939e9c689bea6f40a937e48c39051" - integrity sha512-rWB5SPvkYVxW3sqUxHOJUZwifD0KqvKwvt1bhNqcLpW6Azsd0BJgRNcyVW8GAferaAk5r8dzeZnf9zKlg9+xMQ== - dependencies: - "@types/d3-selection" "*" - -"@types/d3-dsv@*": - version "1.0.36" - resolved "https://registry.yarnpkg.com/@types/d3-dsv/-/d3-dsv-1.0.36.tgz#e91129d7c02b1b814838d001e921e8b9a67153d0" - integrity sha512-jbIWQ27QJcBNMZbQv0NSQMHnBDCmxghAxePxgyiPH1XPCRkOsTBei7jcdi3fDrUCGpCV3lKrSZFSlOkhUQVClA== - -"@types/d3-ease@*": - version "1.0.9" - resolved "https://registry.yarnpkg.com/@types/d3-ease/-/d3-ease-1.0.9.tgz#1dd849bd7edef6426e915e220ed9970db5ea4e04" - integrity sha512-U5ADevQ+W6fy32FVZZC9EXallcV/Mi12A5Tkd0My5MrC7T8soMQEhlDAg88XUWm0zoCQlB4XV0en/24LvuDB4Q== - -"@types/d3-fetch@*": - version "1.1.5" - resolved "https://registry.yarnpkg.com/@types/d3-fetch/-/d3-fetch-1.1.5.tgz#51601f79dd4653b5d84e6a3176d78145e065db5e" - integrity sha512-o9c0ItT5/Gl3wbNuVpzRnYX1t3RghzeWAjHUVLuyZJudiTxC4f/fC0ZPFWLQ2lVY8pAMmxpV8TJ6ETYCgPeI3A== - dependencies: - "@types/d3-dsv" "*" - -"@types/d3-force@*": - version "1.2.1" - resolved "https://registry.yarnpkg.com/@types/d3-force/-/d3-force-1.2.1.tgz#c28803ea36fe29788db69efa0ad6c2dc09544e83" - integrity sha512-jqK+I36uz4kTBjyk39meed5y31Ab+tXYN/x1dn3nZEus9yOHCLc+VrcIYLc/aSQ0Y7tMPRlIhLetulME76EiiA== - -"@types/d3-format@*": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@types/d3-format/-/d3-format-1.3.1.tgz#35bf88264bd6bcda39251165bb827f67879c4384" - integrity sha512-KAWvReOKMDreaAwOjdfQMm0HjcUMlQG47GwqdVKgmm20vTd2pucj0a70c3gUSHrnsmo6H2AMrkBsZU2UhJLq8A== - -"@types/d3-geo@*": - version "1.11.1" - resolved "https://registry.yarnpkg.com/@types/d3-geo/-/d3-geo-1.11.1.tgz#e96ec91f16221d87507fec66b2cc889f52d2493e" - integrity sha512-Ox8WWOG3igDRoep/dNsGbOiSJYdUG3ew/6z0ETvHyAtXZVBjOE0S96zSSmzgl0gqQ3RdZjn2eeJOj9oRcMZPkQ== - dependencies: - "@types/geojson" "*" - -"@types/d3-hierarchy@*": - version "1.1.6" - resolved "https://registry.yarnpkg.com/@types/d3-hierarchy/-/d3-hierarchy-1.1.6.tgz#4c017521900813ea524c9ecb8d7985ec26a9ad9a" - integrity sha512-vvSaIDf/Ov0o3KwMT+1M8+WbnnlRiGjlGD5uvk83a1mPCTd/E5x12bUJ/oP55+wUY/4Kb5kc67rVpVGJ2KUHxg== - -"@types/d3-interpolate@*": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@types/d3-interpolate/-/d3-interpolate-1.3.1.tgz#1c280511f622de9b0b47d463fa55f9a4fd6f5fc8" - integrity sha512-z8Zmi08XVwe8e62vP6wcA+CNuRhpuUU5XPEfqpG0hRypDE5BWNthQHB1UNWWDB7ojCbGaN4qBdsWp5kWxhT1IQ== - dependencies: - "@types/d3-color" "*" - -"@types/d3-path@*": - version "1.0.8" - resolved "https://registry.yarnpkg.com/@types/d3-path/-/d3-path-1.0.8.tgz#48e6945a8ff43ee0a1ce85c8cfa2337de85c7c79" - integrity sha512-AZGHWslq/oApTAHu9+yH/Bnk63y9oFOMROtqPAtxl5uB6qm1x2lueWdVEjsjjV3Qc2+QfuzKIwIR5MvVBakfzA== - -"@types/d3-polygon@*": - version "1.0.7" - resolved "https://registry.yarnpkg.com/@types/d3-polygon/-/d3-polygon-1.0.7.tgz#7b3947aa2d48287ff535230d3d396668ab17bfdf" - integrity sha512-Xuw0eSjQQKs8jTiNbntWH0S+Xp+JyhqxmQ0YAQ3rDu6c3kKMFfgsaGN7Jv5u3zG6yVX/AsLP/Xs/QRjmi9g43Q== - -"@types/d3-quadtree@*": - version "1.0.7" - resolved "https://registry.yarnpkg.com/@types/d3-quadtree/-/d3-quadtree-1.0.7.tgz#8e29464ff5b326f6612c1428d9362b4b35de2b70" - integrity sha512-0ajFawWicfjsaCLh6NzxOyVDYhQAmMFbsiI3MPGLInorauHFEh9/Cl6UHNf+kt/J1jfoxKY/ZJaKAoDpbvde5Q== - -"@types/d3-random@*": - version "1.1.2" - resolved "https://registry.yarnpkg.com/@types/d3-random/-/d3-random-1.1.2.tgz#6f77e8b7bb64ac393f92d33fe8f71038bc4f3cde" - integrity sha512-Jui+Zn28pQw/3EayPKaN4c/PqTvqNbIPjHkgIIFnxne1FdwNjfHtAIsZIBMKlquQNrrMjFzCrlF2gPs3xckqaA== - -"@types/d3-scale-chromatic@*": - version "1.5.0" - resolved "https://registry.yarnpkg.com/@types/d3-scale-chromatic/-/d3-scale-chromatic-1.5.0.tgz#315367557d51b823bec848614fac095325613fc3" - integrity sha512-9/D7cOBKdZdTCPc6re0HeSUFBM0aFzdNdmYggUWT9SRRiYSOa6Ys2xdTwHKgc1WS3gGfwTMatBOdWCS863REsg== - -"@types/d3-scale@*": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@types/d3-scale/-/d3-scale-2.2.0.tgz#e5987a2857365823eb26ed5eb21bc566c4dcf1c0" - integrity sha512-oQFanN0/PiR2oySHfj+zAAkK1/p4LD32Nt1TMVmzk+bYHk7vgIg/iTXQWitp1cIkDw4LMdcgvO63wL+mNs47YA== - dependencies: - "@types/d3-time" "*" - -"@types/d3-selection@*": - version "1.4.1" - resolved "https://registry.yarnpkg.com/@types/d3-selection/-/d3-selection-1.4.1.tgz#fa1f8710a6b5d7cfe5c6caa61d161be7cae4a022" - integrity sha512-bv8IfFYo/xG6dxri9OwDnK3yCagYPeRIjTlrcdYJSx+FDWlCeBDepIHUpqROmhPtZ53jyna0aUajZRk0I3rXNA== - -"@types/d3-shape@*": - version "1.3.2" - resolved "https://registry.yarnpkg.com/@types/d3-shape/-/d3-shape-1.3.2.tgz#a41d9d6b10d02e221696b240caf0b5d0f5a588ec" - integrity sha512-LtD8EaNYCaBRzHzaAiIPrfcL3DdIysc81dkGlQvv7WQP3+YXV7b0JJTtR1U3bzeRieS603KF4wUo+ZkJVenh8w== - dependencies: - "@types/d3-path" "*" - -"@types/d3-time-format@*": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@types/d3-time-format/-/d3-time-format-2.1.1.tgz#dd2c79ec4575f1355484ab6b10407824668eba42" - integrity sha512-tJSyXta8ZyJ52wDDHA96JEsvkbL6jl7wowGmuf45+fAkj5Y+SQOnz0N7/H68OWmPshPsAaWMQh+GAws44IzH3g== - -"@types/d3-time@*": - version "1.0.10" - resolved "https://registry.yarnpkg.com/@types/d3-time/-/d3-time-1.0.10.tgz#d338c7feac93a98a32aac875d1100f92c7b61f4f" - integrity sha512-aKf62rRQafDQmSiv1NylKhIMmznsjRN+MnXRXTqHoqm0U/UZzVpdrtRnSIfdiLS616OuC1soYeX1dBg2n1u8Xw== - -"@types/d3-timer@*": - version "1.0.9" - resolved "https://registry.yarnpkg.com/@types/d3-timer/-/d3-timer-1.0.9.tgz#aed1bde0cf18920d33f5d44839d73de393633fd3" - integrity sha512-WvfJ3LFxBbWjqRGz9n7GJt08RrTHPJDVsIwwoCMROlqF+iDacYiAFjf9oqnq0mXpb2juA2N/qjKP+MKdal3YNQ== - -"@types/d3-transition@*": - version "1.1.6" - resolved "https://registry.yarnpkg.com/@types/d3-transition/-/d3-transition-1.1.6.tgz#7e52da29749d874866cc803fad13925713a372da" - integrity sha512-/F+O2r4oz4G9ATIH3cuSCMGphAnl7VDx7SbENEK0NlI/FE8Jx2oiIrv0uTrpg7yF/AmuWbqp7AGdEHAPIh24Gg== - dependencies: - "@types/d3-selection" "*" - -"@types/d3-voronoi@*": - version "1.1.9" - resolved "https://registry.yarnpkg.com/@types/d3-voronoi/-/d3-voronoi-1.1.9.tgz#7bbc210818a3a5c5e0bafb051420df206617c9e5" - integrity sha512-DExNQkaHd1F3dFPvGA/Aw2NGyjMln6E9QzsiqOcBgnE+VInYnFBHBBySbZQts6z6xD+5jTfKCP7M4OqMyVjdwQ== - -"@types/d3-zoom@*": - version "1.7.4" - resolved "https://registry.yarnpkg.com/@types/d3-zoom/-/d3-zoom-1.7.4.tgz#9226ffd2bd3846ec0e4a4e2bff211612d3aafad5" - integrity sha512-5jnFo/itYhJeB2khO/lKe730kW/h2EbKMOvY0uNp3+7NdPm4w63DwPEMxifQZ7n902xGYK5DdU67FmToSoy4VA== - dependencies: - "@types/d3-interpolate" "*" - "@types/d3-selection" "*" - -"@types/d3@5.7.2": - version "5.7.2" - resolved "https://registry.yarnpkg.com/@types/d3/-/d3-5.7.2.tgz#52235eb71a1d3ca171d6dca52a58f5ccbe0254cc" - integrity sha512-7/wClB8ycneWGy3jdvLfXKTd5SoTg9hji7IdJ0RuO9xTY54YpJ8zlcFADcXhY1J3kCBwxp+/1jeN6a5OMwgYOw== - dependencies: - "@types/d3-array" "^1" - "@types/d3-axis" "*" - "@types/d3-brush" "*" - "@types/d3-chord" "*" - "@types/d3-collection" "*" - "@types/d3-color" "*" - "@types/d3-contour" "*" - "@types/d3-dispatch" "*" - "@types/d3-drag" "*" - "@types/d3-dsv" "*" - "@types/d3-ease" "*" - "@types/d3-fetch" "*" - "@types/d3-force" "*" - "@types/d3-format" "*" - "@types/d3-geo" "*" - "@types/d3-hierarchy" "*" - "@types/d3-interpolate" "*" - "@types/d3-path" "*" - "@types/d3-polygon" "*" - "@types/d3-quadtree" "*" - "@types/d3-random" "*" - "@types/d3-scale" "*" - "@types/d3-scale-chromatic" "*" - "@types/d3-selection" "*" - "@types/d3-shape" "*" - "@types/d3-time" "*" - "@types/d3-time-format" "*" - "@types/d3-timer" "*" - "@types/d3-transition" "*" - "@types/d3-voronoi" "*" - "@types/d3-zoom" "*" - -"@types/d3@^3": - version "3.5.43" - resolved "https://registry.yarnpkg.com/@types/d3/-/d3-3.5.43.tgz#e9b4992817e0b6c5efaa7d6e5bb2cee4d73eab58" - integrity sha512-t9ZmXOcpVxywRw86YtIC54g7M9puRh8hFedRvVfHKf5YyOP6pSxA0TvpXpfseXSCInoW4P7bggTrSDiUOs4g5w== - -"@types/dagre-d3@0.4.39": - version "0.4.39" - resolved "https://registry.yarnpkg.com/@types/dagre-d3/-/dagre-d3-0.4.39.tgz#bd5a05c7da366bb6e27e271f3ce1ff7cfc5b5c89" - integrity sha512-JZySpfIQPRSTx38B4P5pPGeV4Pqgb0qE/aIiS60qD+j0c3mYP/AHiiwlsrrLCVZWY7OJdvD3dp+aD1HvpjPBzA== - dependencies: - "@types/d3" "^3" - "@types/dagre" "*" - -"@types/dagre@*": - version "0.7.44" - resolved "https://registry.yarnpkg.com/@types/dagre/-/dagre-0.7.44.tgz#8f4b796b118ca29c132da7068fbc0d0351ee5851" - integrity sha512-N6HD+79w77ZVAaVO7JJDW5yJ9LAxM62FpgNGO9xEde+KVYjDRyhIMzfiErXpr1g0JPon9kwlBzoBK6s4fOww9Q== - -"@types/echarts@4.6.0": - version "4.6.0" - resolved "https://registry.yarnpkg.com/@types/echarts/-/echarts-4.6.0.tgz#0dbf5c35db16ed23f9e61370179989fcd62c2678" - integrity sha512-Unz/VUdQ3KwD3vtCh8bI295F3p6rFSApivwGPQJ1Mu6837xeit7C1YuX+75gRwfahotbaazmG8e2rLBNEzrfFg== +"@types/echarts@4.6.1": + version "4.6.1" + resolved "https://registry.yarnpkg.com/@types/echarts/-/echarts-4.6.1.tgz#de6ede6b8069123150d53f3350f9e38533f1970e" + integrity sha512-oeekc7CQASQzRCbx8ixVQw4U4AmnjF2/fEyptqeUYeQnsEubr87Yde8nw6AxWTsEtWSNB/Pp0B/FTgGNHqkAUQ== dependencies: "@types/zrender" "*" @@ -2330,21 +2276,16 @@ "@types/qs" "*" "@types/serve-static" "*" -"@types/faker@4.1.11": - version "4.1.11" - resolved "https://registry.yarnpkg.com/@types/faker/-/faker-4.1.11.tgz#07911f1a39aeeaeec71d8efa0f93ef0eeafd3462" - integrity sha512-iL7khABWgMH53FDfQNYtbFDJXjM3G97KswtyVMUP9XBSt9c+33L1TsXI+mx+EgnoOcuSp12qZae6hLCxGcq7yg== +"@types/faker@4.1.12": + version "4.1.12" + resolved "https://registry.yarnpkg.com/@types/faker/-/faker-4.1.12.tgz#065d37343677df1aa757c622650bd14666c42602" + integrity sha512-0MEyzJrLLs1WaOCx9ULK6FzdCSj2EuxdSP9kvuxxdBEGujZYUOZ4vkPXdgu3dhyg/pOdn7VCatelYX7k0YShlA== "@types/file-saver@2.0.1": version "2.0.1" resolved "https://registry.yarnpkg.com/@types/file-saver/-/file-saver-2.0.1.tgz#e18eb8b069e442f7b956d313f4fadd3ef887354e" integrity sha512-g1QUuhYVVAamfCifK7oB7G3aIl4BbOyzDOqVyUfEr4tfBKrXfeH+M+Tg7HKCXSrbzxYdhyCP7z9WbKo0R2hBCw== -"@types/geojson@*": - version "7946.0.7" - resolved "https://registry.yarnpkg.com/@types/geojson/-/geojson-7946.0.7.tgz#c8fa532b60a0042219cdf173ca21a975ef0666ad" - integrity sha512-wE2v81i4C4Ol09RtsWFAqg3BUitWbHSpSlIo+bNdsCJijO9sjme+zm+73ZMCa/qMC8UEERxzGbvmr1cffo2SiQ== - "@types/glob@*", "@types/glob@^7.1.1": version "7.1.1" resolved "https://registry.yarnpkg.com/@types/glob/-/glob-7.1.1.tgz#aa59a1c6e3fbc421e07ccd31a944c30eba521575" @@ -2362,7 +2303,7 @@ "@types/react" "*" hoist-non-react-statics "^3.3.0" -"@types/http-proxy@^1.17.3": +"@types/http-proxy@^1.17.4": version "1.17.4" resolved "https://registry.yarnpkg.com/@types/http-proxy/-/http-proxy-1.17.4.tgz#e7c92e3dbe3e13aa799440ff42e6d3a17a9d045b" integrity sha512-IrSHl2u6AWXduUaDLqYpt45tLVCtYv7o4Z0s1KghBCDgIIS9oW5K1H8mZG/A2CfeLdEa7rTd1ACOiHBc1EMT2Q== @@ -2374,10 +2315,10 @@ resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.4.tgz#38fd73ddfd9b55abb1e1b2ed578cb55bd7b7d339" integrity sha512-8+KAKzEvSUdeo+kmqnKrqgeE+LcA0tjYWFY7RPProVYwnqDjukzO+3b6dLD56rYX5TdWejnEOLJYOIeh4CXKuA== -"@types/lodash@4.14.150": - version "4.14.150" - resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.150.tgz#649fe44684c3f1fcb6164d943c5a61977e8cf0bd" - integrity sha512-kMNLM5JBcasgYscD9x/Gvr6lTAv2NVgsKtet/hm93qMyf/D1pt+7jeEZklKJKxMVmXjxbRVQQGfqDSfipYCO6w== +"@types/lodash@4.14.155": + version "4.14.155" + resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.155.tgz#e2b4514f46a261fd11542e47519c20ebce7bc23a" + integrity sha512-vEcX7S7aPhsBCivxMwAANQburHBtfN9RdyXFk84IJmu2Z4Hkg1tOFgaslRiEqqvoLtbCBi6ika1EMspE+NZ9Lg== "@types/memory-fs@*": version "0.3.2" @@ -2407,14 +2348,14 @@ integrity sha1-aaI6OtKcrwCX8G7aWbNh7i8GOfY= "@types/node@*", "@types/node@>= 8": - version "14.0.1" - resolved "https://registry.yarnpkg.com/@types/node/-/node-14.0.1.tgz#5d93e0a099cd0acd5ef3d5bde3c086e1f49ff68c" - integrity sha512-FAYBGwC+W6F9+huFIDtn43cpy7+SzG+atzRiTfdp3inUKL2hXnd4rG8hylJLIh4+hqrQy1P17kvJByE/z825hA== + version "14.0.5" + resolved "https://registry.yarnpkg.com/@types/node/-/node-14.0.5.tgz#3d03acd3b3414cf67faf999aed11682ed121f22b" + integrity sha512-90hiq6/VqtQgX8Sp0EzeIsv3r+ellbGj4URKj5j30tLlZvRUpnAe9YbYnjl3pJM93GyXU0tghHhvXHq+5rnCKA== -"@types/node@13.13.5": - version "13.13.5" - resolved "https://registry.yarnpkg.com/@types/node/-/node-13.13.5.tgz#96ec3b0afafd64a4ccea9107b75bf8489f0e5765" - integrity sha512-3ySmiBYJPqgjiHA7oEaIo2Rzz0HrOZ7yrNO5HWyaE5q0lQ3BppDZ3N53Miz8bw2I7gh1/zir2MGVZBvpb1zq9g== +"@types/node@14.0.10": + version "14.0.10" + resolved "https://registry.yarnpkg.com/@types/node/-/node-14.0.10.tgz#dbfaa170bd9eafccccb6d7060743a761b0844afd" + integrity sha512-Bz23oN/5bi0rniKT24ExLf4cK0JdvN3dH/3k0whYkdN4eI4vS2ZW/2ENNn2uxHCzWcbdHIa/GRuWQytfzCjRYw== "@types/normalize-package-data@^2.4.0": version "2.4.0" @@ -2437,35 +2378,35 @@ integrity sha512-KfRL3PuHmqQLOG+2tGpRO26Ctg+Cq1E01D2DMriKEATHgWLfeNDmq9e29Q9WIky0dQ3NPkd1mzYH8Lm936Z9qw== "@types/q@^1.5.1": - version "1.5.2" - resolved "https://registry.yarnpkg.com/@types/q/-/q-1.5.2.tgz#690a1475b84f2a884fd07cd797c00f5f31356ea8" - integrity sha512-ce5d3q03Ex0sy4R14722Rmt6MT07Ua+k4FwDfdcToYJcMKNtRVQvJ6JCAPdAmAnbRb6CsX6aYb9m96NGod9uTw== + version "1.5.4" + resolved "https://registry.yarnpkg.com/@types/q/-/q-1.5.4.tgz#15925414e0ad2cd765bfef58842f7e26a7accb24" + integrity sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug== "@types/qs@*": - version "6.9.2" - resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.2.tgz#faab98ec4f96ee72c829b7ec0983af4f4d343113" - integrity sha512-a9bDi4Z3zCZf4Lv1X/vwnvbbDYSNz59h3i3KdyuYYN+YrLjSeJD0dnphdULDfySvUv6Exy/O0K6wX/kQpnPQ+A== + version "6.9.3" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.3.tgz#b755a0934564a200d3efdf88546ec93c369abd03" + integrity sha512-7s9EQWupR1fTc2pSMtXRQ9w9gLOcrJn+h7HOXw4evxyvVqMi4f+q7d2tnFe3ng3SNHjtK+0EzGMGFUQX4/AQRA== "@types/range-parser@*": version "1.2.3" resolved "https://registry.yarnpkg.com/@types/range-parser/-/range-parser-1.2.3.tgz#7ee330ba7caafb98090bece86a5ee44115904c2c" integrity sha512-ewFXqrQHlFsgc09MK5jP5iR7vumV/BYayNC6PgJO2LPe8vrnNFyjQjSppfEngITi0qvfKtzFvgKymGheFM9UOA== -"@types/react-dom@16.9.7": - version "16.9.7" - resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-16.9.7.tgz#60844d48ce252d7b2dccf0c7bb937130e27c0cd2" - integrity sha512-GHTYhM8/OwUCf254WO5xqR/aqD3gC9kSTLpopWGpQLpnw23jk44RvMHsyUSEplvRJZdHxhJGMMLF0kCPYHPhQA== +"@types/react-dom@16.9.8": + version "16.9.8" + resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-16.9.8.tgz#fe4c1e11dfc67155733dfa6aa65108b4971cb423" + integrity sha512-ykkPQ+5nFknnlU6lDd947WbQ6TE3NNzbQAkInC2EKY1qeYdTKp7onFusmYZb+ityzx2YviqT6BXSu+LyWWJwcA== dependencies: "@types/react" "*" "@types/react-native@*": - version "0.62.9" - resolved "https://registry.yarnpkg.com/@types/react-native/-/react-native-0.62.9.tgz#f75d4a8879e68ed3857d6f2f73dc0752a0505362" - integrity sha512-OcoE7SKz1PsvTGJK5fIwJu6kWdDFN+hH1vMI4GVTEBYhV5FAM5vKVUFCaSiEPJScyNyIEWAeQwFvI3a01+Grzg== + version "0.62.11" + resolved "https://registry.yarnpkg.com/@types/react-native/-/react-native-0.62.11.tgz#12a19fbdd9709bcf2646f677f5ac49d4d474252b" + integrity sha512-hRJSROGw+3JIp2w4WAAA+/4YM/HApeOQul7FVxOzLduaMKV/YZnm+1bfkS7hhKp9JqlbFNgqoRY/p2Ut7AD47g== dependencies: "@types/react" "*" -"@types/react@*": +"@types/react@*", "@types/react@16.9.35": version "16.9.35" resolved "https://registry.yarnpkg.com/@types/react/-/react-16.9.35.tgz#a0830d172e8aadd9bd41709ba2281a3124bbd368" integrity sha512-q0n0SsWcGc8nDqH2GJfWQWUOmZSJhXV64CjVN5SvcNti3TdEaA3AH0D8DwNmMdzjMAC/78tB8nAZIlV8yTz+zQ== @@ -2473,14 +2414,6 @@ "@types/prop-types" "*" csstype "^2.2.0" -"@types/react@16.9.34": - version "16.9.34" - resolved "https://registry.yarnpkg.com/@types/react/-/react-16.9.34.tgz#f7d5e331c468f53affed17a8a4d488cd44ea9349" - integrity sha512-8AJlYMOfPe1KGLKyHpflCg5z46n0b5DbRfqDksxBLBTUpB75ypDBAO9eCUcjNwE6LCUslwTz00yyG/X9gaVtow== - dependencies: - "@types/prop-types" "*" - csstype "^2.2.0" - "@types/rimraf@3.0.0": version "3.0.0" resolved "https://registry.yarnpkg.com/@types/rimraf/-/rimraf-3.0.0.tgz#b9d03f090ece263671898d57bb7bb007023ac19f" @@ -2490,17 +2423,17 @@ "@types/node" "*" "@types/serve-static@*": - version "1.13.3" - resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.13.3.tgz#eb7e1c41c4468272557e897e9171ded5e2ded9d1" - integrity sha512-oprSwp094zOglVrXdlo/4bAHtKTAxX6VT8FOZlBKrmyLbNvE1zxZyJ6yikMVtHIvwP45+ZQGJn+FdXGKTozq0g== + version "1.13.4" + resolved "https://registry.yarnpkg.com/@types/serve-static/-/serve-static-1.13.4.tgz#6662a93583e5a6cabca1b23592eb91e12fa80e7c" + integrity sha512-jTDt0o/YbpNwZbQmE/+2e+lfjJEJJR0I3OFaKQKPWkASkCoW3i6fsUnqudSMcNAfbtmADGu8f4MV4q+GqULmug== dependencies: "@types/express-serve-static-core" "*" "@types/mime" "*" -"@types/shelljs@0.8.7": - version "0.8.7" - resolved "https://registry.yarnpkg.com/@types/shelljs/-/shelljs-0.8.7.tgz#a2a606b185165abadf8b7995fea5e326e637088e" - integrity sha512-Mg2qGjLIJIieeJ1/NjswAOY9qXDShLeh6JwpD1NZsvUvI0hxdUCNDpnBXv9YQeugKi2EHU+BqkbUE4jpY4GKmQ== +"@types/shelljs@0.8.8": + version "0.8.8" + resolved "https://registry.yarnpkg.com/@types/shelljs/-/shelljs-0.8.8.tgz#e439c69929b88a2c8123c1a55e09eb708315addf" + integrity sha512-lD3LWdg6j8r0VRBFahJVaxoW0SIcswxKaFUrmKl33RJVeeoNYQAz4uqCJ5Z6v4oIBOsC5GozX+I5SorIKiTcQA== dependencies: "@types/glob" "*" "@types/node" "*" @@ -2526,16 +2459,16 @@ integrity sha512-/gG2M/Imw7cQFp8PGvz/SwocNrmKFjFsm5Pb8HdbHkZ1K8pmuPzOX4VeVoiEecFCVf4CsN1r3/BRvx+6sNqwtQ== "@types/uglify-js@*": - version "3.9.1" - resolved "https://registry.yarnpkg.com/@types/uglify-js/-/uglify-js-3.9.1.tgz#0ad39d6a72979593f669acdfc7e980d590d3fb94" - integrity sha512-rdBIeMQyRBOXogop/EYBvSkYFn9D9yGxUa5hagBVG55KIdSUbp22EACJSHCs6kmmfunojAhf7zJH+Ds06/qLaQ== + version "3.9.2" + resolved "https://registry.yarnpkg.com/@types/uglify-js/-/uglify-js-3.9.2.tgz#01992579debba674e1e359cd6bcb1a1d0ab2e02b" + integrity sha512-d6dIfpPbF+8B7WiCi2ELY7m0w1joD8cRW4ms88Emdb2w062NeEpbNCeWwVCgzLRpVG+5e74VFSg4rgJ2xXjEiQ== dependencies: source-map "^0.6.1" -"@types/webpack-dev-middleware@3.7.0": - version "3.7.0" - resolved "https://registry.yarnpkg.com/@types/webpack-dev-middleware/-/webpack-dev-middleware-3.7.0.tgz#3f90ddf22aef2c592eae528c08739bc4807a7d62" - integrity sha512-qBJ0+FXarzHwcnIYEc5W41LxC0r74kUIkjCbYBTXq4eKjuFSbt5tSohU5sZYKNnsmHteLJ73Fg+bD4oECCcJxg== +"@types/webpack-dev-middleware@3.7.1": + version "3.7.1" + resolved "https://registry.yarnpkg.com/@types/webpack-dev-middleware/-/webpack-dev-middleware-3.7.1.tgz#2d4e7d9abf6eec2988476f9f8f1e9b7acb2a7f82" + integrity sha512-+U6zP6/jlQ9Mw4zBOiuKOe/delLS4f0kvzAJCVK9wsW00hi4TD9u6TIaE5x5xy6xrkCiXMSf56bsAosORjP3/Q== dependencies: "@types/connect" "*" "@types/memory-fs" "*" @@ -2563,10 +2496,10 @@ "@types/webpack-sources" "*" source-map "^0.6.0" -"@types/webpack@4.41.12": - version "4.41.12" - resolved "https://registry.yarnpkg.com/@types/webpack/-/webpack-4.41.12.tgz#0386ee2a2814368e2f2397abb036c0bf173ff6c3" - integrity sha512-BpCtM4NnBen6W+KEhrL9jKuZCXVtiH6+0b6cxdvNt2EwU949Al334PjQSl2BeAyvAX9mgoNNG21wvjP3xZJJ5w== +"@types/webpack@4.41.17": + version "4.41.17" + resolved "https://registry.yarnpkg.com/@types/webpack/-/webpack-4.41.17.tgz#0a69005e644d657c85b7d6ec1c826a71bebd1c93" + integrity sha512-6FfeCidTSHozwKI67gIVQQ5Mp0g4X96c2IXxX75hYEQJwST/i6NyZexP//zzMOBb+wG9jJ7oO8fk9yObP2HWAw== dependencies: "@types/anymatch" "*" "@types/node" "*" @@ -2580,10 +2513,10 @@ resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-15.0.0.tgz#cb3f9f741869e20cce330ffbeb9271590483882d" integrity sha512-FA/BWv8t8ZWJ+gEOnLLd8ygxH/2UFbAvgEonyfN6yWGLKc7zVjbpl2Y4CTjid9h2RfgPP6SEt6uHwEOply00yw== -"@types/yargs@15.0.4": - version "15.0.4" - resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-15.0.4.tgz#7e5d0f8ca25e9d5849f2ea443cf7c402decd8299" - integrity sha512-9T1auFmbPZoxHz0enUFlUuKRy3it01R+hlggyVUMtnCTQRunsQYifnSGb8hET4Xo8yiC0o0r1paW3ud5+rbURg== +"@types/yargs@15.0.5": + version "15.0.5" + resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-15.0.5.tgz#947e9a6561483bdee9adffc983e91a6902af8b79" + integrity sha512-Dk/IDOPtOgubt/IaevIUbTgV7doaKkoorvOyYM2CMwuDyP89bekI7H4xLIwunNYiK9jhCkmc6pUrJk3cj2AB9w== dependencies: "@types/yargs-parser" "*" @@ -2592,47 +2525,48 @@ resolved "https://registry.yarnpkg.com/@types/zrender/-/zrender-4.0.0.tgz#a6806f12ec4eccaaebd9b0d816f049aca6188fbd" integrity sha512-s89GOIeKFiod2KSqHkfd2rzx+T2DVu7ihZCBEBnhFrzvQPUmzvDSBot9Fi1DfMQm9Odg+rTqoMGC38RvrwJK2w== -"@typescript-eslint/eslint-plugin@2.31.0": - version "2.31.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-2.31.0.tgz#942c921fec5e200b79593c71fafb1e3f57aa2e36" - integrity sha512-iIC0Pb8qDaoit+m80Ln/aaeu9zKQdOLF4SHcGLarSeY1gurW6aU4JsOPMjKQwXlw70MvWKZQc6S2NamA8SJ/gg== +"@typescript-eslint/eslint-plugin@3.1.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-3.1.0.tgz#4ac00ecca3bbea740c577f1843bc54fa69c3def2" + integrity sha512-D52KwdgkjYc+fmTZKW7CZpH5ZBJREJKZXRrveMiRCmlzZ+Rw9wRVJ1JAmHQ9b/+Ehy1ZeaylofDB9wwXUt83wg== dependencies: - "@typescript-eslint/experimental-utils" "2.31.0" + "@typescript-eslint/experimental-utils" "3.1.0" functional-red-black-tree "^1.0.1" regexpp "^3.0.0" + semver "^7.3.2" tsutils "^3.17.1" -"@typescript-eslint/experimental-utils@2.31.0": - version "2.31.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/experimental-utils/-/experimental-utils-2.31.0.tgz#a9ec514bf7fd5e5e82bc10dcb6a86d58baae9508" - integrity sha512-MI6IWkutLYQYTQgZ48IVnRXmLR/0Q6oAyJgiOror74arUMh7EWjJkADfirZhRsUMHeLJ85U2iySDwHTSnNi9vA== +"@typescript-eslint/experimental-utils@3.1.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/experimental-utils/-/experimental-utils-3.1.0.tgz#2d5dba7c2ac2a3da3bfa3f461ff64de38587a872" + integrity sha512-Zf8JVC2K1svqPIk1CB/ehCiWPaERJBBokbMfNTNRczCbQSlQXaXtO/7OfYz9wZaecNvdSvVADt6/XQuIxhC79w== dependencies: "@types/json-schema" "^7.0.3" - "@typescript-eslint/typescript-estree" "2.31.0" + "@typescript-eslint/typescript-estree" "3.1.0" eslint-scope "^5.0.0" eslint-utils "^2.0.0" -"@typescript-eslint/parser@2.31.0": - version "2.31.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-2.31.0.tgz#beddd4e8efe64995108b229b2862cd5752d40d6f" - integrity sha512-uph+w6xUOlyV2DLSC6o+fBDzZ5i7+3/TxAsH4h3eC64tlga57oMb96vVlXoMwjR/nN+xyWlsnxtbDkB46M2EPQ== +"@typescript-eslint/parser@3.1.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-3.1.0.tgz#9c02ba5d88ad2355672f39e6cd4176f172dd47f8" + integrity sha512-NcDSJK8qTA2tPfyGiPes9HtVKLbksmuYjlgGAUs7Ld2K0swdWibnCq9IJx9kJN8JJdgUJSorFiGaPHBgH81F/Q== dependencies: "@types/eslint-visitor-keys" "^1.0.0" - "@typescript-eslint/experimental-utils" "2.31.0" - "@typescript-eslint/typescript-estree" "2.31.0" + "@typescript-eslint/experimental-utils" "3.1.0" + "@typescript-eslint/typescript-estree" "3.1.0" eslint-visitor-keys "^1.1.0" -"@typescript-eslint/typescript-estree@2.31.0": - version "2.31.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-2.31.0.tgz#ac536c2d46672aa1f27ba0ec2140d53670635cfd" - integrity sha512-vxW149bXFXXuBrAak0eKHOzbcu9cvi6iNcJDzEtOkRwGHxJG15chiAQAwhLOsk+86p9GTr/TziYvw+H9kMaIgA== +"@typescript-eslint/typescript-estree@3.1.0": + version "3.1.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-3.1.0.tgz#eaff52d31e615e05b894f8b9d2c3d8af152a5dd2" + integrity sha512-+4nfYauqeQvK55PgFrmBWFVYb6IskLyOosYEmhH3mSVhfBp9AIJnjExdgDmKWoOBHRcPM8Ihfm2BFpZf0euUZQ== dependencies: debug "^4.1.1" eslint-visitor-keys "^1.1.0" glob "^7.1.6" is-glob "^4.0.1" lodash "^4.17.15" - semver "^6.3.0" + semver "^7.3.2" tsutils "^3.17.1" "@webassemblyjs/ast@1.9.0": @@ -2895,6 +2829,14 @@ ajv@^6.1.0, ajv@^6.10.0, ajv@^6.10.2, ajv@^6.12.0, ajv@^6.5.5: json-schema-traverse "^0.4.1" uri-js "^4.2.2" +ally.js@1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/ally.js/-/ally.js-1.4.1.tgz#9fb7e6ba58efac4ee9131cb29aa9ee3b540bcf1e" + integrity sha1-n7fmuljvrE7pExyymqnuO1QLzx4= + dependencies: + css.escape "^1.5.0" + platform "1.3.3" + alphanum-sort@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/alphanum-sort/-/alphanum-sort-1.0.2.tgz#97a1119649b211ad33691d9f9f486a8ec9fbe0a3" @@ -2912,6 +2854,11 @@ amp@0.3.1, amp@~0.3.1: resolved "https://registry.yarnpkg.com/amp/-/amp-0.3.1.tgz#6adf8d58a74f361e82c1fa8d389c079e139fc47d" integrity sha1-at+NWKdPNh6CwfqNOJwHnhOfxH0= +anser@1.4.9: + version "1.4.9" + resolved "https://registry.yarnpkg.com/anser/-/anser-1.4.9.tgz#1f85423a5dcf8da4631a341665ff675b96845760" + integrity sha512-AI+BjTeGt2+WFk4eWcqbQ7snZpDBt8SaLlj0RT2h5xfdWaiy51OjYvqwMrNzJLGy8iOAL6nKDITWO+rd4MkYEA== + ansi-align@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/ansi-align/-/ansi-align-3.0.0.tgz#b536b371cf687caaef236c18d3e21fe3797467cb" @@ -3235,9 +3182,9 @@ aws-sign2@~0.7.0: integrity sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg= aws4@^1.8.0: - version "1.9.1" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.9.1.tgz#7e33d8f7d449b3f673cd72deb9abdc552dbe528e" - integrity sha512-wMHVg2EOHaMRxbzgFJ9gtjOOCrI80OHLG14rxi28XwOW8ux6IiEbRCGGGqCtdAIg4FQCbW20k9RsT4y3gJlFug== + version "1.10.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.10.0.tgz#a17b3a8ea811060e74d47d306122400ad4497ae2" + integrity sha512-3YDiu347mtVtjpyV3u5kVqQLP242c06zwDOgpeRnybmXlYYsLbtTrUBUm8i8srONt+FWobl5aibnU1030PeeuA== axios@^0.19.0: version "0.19.2" @@ -3255,7 +3202,7 @@ babel-code-frame@^6.22.0: esutils "^2.0.2" js-tokens "^3.0.2" -babel-plugin-dynamic-import-node@^2.3.0, babel-plugin-dynamic-import-node@^2.3.3: +babel-plugin-dynamic-import-node@^2.3.3: version "2.3.3" resolved "https://registry.yarnpkg.com/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz#84fda19c976ec5c6defef57f9427b3def66e17a3" integrity sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ== @@ -3409,14 +3356,14 @@ bluebird@^3.5.1, bluebird@^3.5.3, bluebird@^3.5.5: integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg== bn.js@^4.0.0, bn.js@^4.1.0, bn.js@^4.4.0: - version "4.11.8" - resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.8.tgz#2cde09eb5ee341f484746bb0309b3253b1b1442f" - integrity sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA== + version "4.11.9" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.9.tgz#26d556829458f9d1e81fc48952493d0ba3507828" + integrity sha512-E6QoYqCKZfgatHTdHzs1RRKP7ip4vvm+EyRUeE2RF0NblwVvb0p6jSVeNTOFxPn26QXN2o6SMfNxKp6kU8zQaw== bn.js@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-5.1.1.tgz#48efc4031a9c4041b9c99c6941d903463ab62eb5" - integrity sha512-IUTD/REb78Z2eodka1QZyyEk66pciRcP6Sroka0aI3tG/iwIdYLrBD62RsubR7vqdt3WyX8p4jxeatzmRSphtA== + version "5.1.2" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-5.1.2.tgz#c9686902d3c9a27729f43ab10f9d79c2004da7b0" + integrity sha512-40rZaf3bUNKTVYu9sIeeEGOg7g14Yvnj9kH7b50EiwX0Q7A6umbvfI5tvHaOERH0XigqKkfLkFQxzb4e6CIXnA== body-parser@1.19.0: version "1.19.0" @@ -3534,9 +3481,9 @@ browserify-rsa@^4.0.0, browserify-rsa@^4.0.1: randombytes "^2.0.1" browserify-sign@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/browserify-sign/-/browserify-sign-4.1.0.tgz#4fe971b379a5aeb4925e06779f9fa1f41d249d70" - integrity sha512-VYxo7cDCeYUoBZ0ZCy4UyEUCP3smyBd4DRQM5nrFS1jJjPJjX7rP3oLRpPoWfkhQfyJ0I9ZbHbKafrFD/SGlrg== + version "4.2.0" + resolved "https://registry.yarnpkg.com/browserify-sign/-/browserify-sign-4.2.0.tgz#545d0b1b07e6b2c99211082bf1b12cce7a0b0e11" + integrity sha512-hEZC1KEeYuoHRqhGhTy6gWrpJA3ZDjFWv0DE61643ZnOXAKJb3u7yWcrU0mMc9SwAqK1n7myPGndkp0dFG7NFA== dependencies: bn.js "^5.1.1" browserify-rsa "^4.0.1" @@ -3546,6 +3493,7 @@ browserify-sign@^4.0.0: inherits "^2.0.4" parse-asn1 "^5.1.5" readable-stream "^3.6.0" + safe-buffer "^5.2.0" browserify-zlib@^0.2.0: version "0.2.0" @@ -3554,16 +3502,7 @@ browserify-zlib@^0.2.0: dependencies: pako "~1.0.5" -browserslist@4.8.3: - version "4.8.3" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.8.3.tgz#65802fcd77177c878e015f0e3189f2c4f627ba44" - integrity sha512-iU43cMMknxG1ClEZ2MDKeonKE1CCrFVkQK2AqO2YWFmvIrx4JWrvQ4w4hQez6EpVI8rHTtqh/ruHHDHSOKxvUg== - dependencies: - caniuse-lite "^1.0.30001017" - electron-to-chromium "^1.3.322" - node-releases "^1.1.44" - -browserslist@^4.0.0, browserslist@^4.6.0, browserslist@^4.8.5: +browserslist@4.12.0, browserslist@^4.0.0, browserslist@^4.11.1, browserslist@^4.8.5: version "4.12.0" resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.12.0.tgz#06c6d5715a1ede6c51fc39ff67fd647f740b656d" integrity sha512-UH2GkcEDSI0k/lRkuDSzFl9ZZ87skSy9w2XAn1MsZnL+4c4rqbBd3e82UWHbYDpztABrPBhZsTEeuxVfHppqDg== @@ -3578,7 +3517,7 @@ btoa-lite@^1.0.0: resolved "https://registry.yarnpkg.com/btoa-lite/-/btoa-lite-1.0.0.tgz#337766da15801210fdd956c22e9c6891ab9d0337" integrity sha1-M3dm2hWAEhD92VbCLpxokaudAzc= -buffer-from@^1.0.0: +buffer-from@^1.0.0, buffer-from@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A== @@ -3622,6 +3561,30 @@ bytes@3.1.0: resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.0.tgz#f6cf7933a360e0588fa9fde85651cdc7f805d1f6" integrity sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg== +cacache@13.0.1: + version "13.0.1" + resolved "https://registry.yarnpkg.com/cacache/-/cacache-13.0.1.tgz#a8000c21697089082f85287a1aec6e382024a71c" + integrity sha512-5ZvAxd05HDDU+y9BVvcqYu2LLXmPnQ0hW62h32g4xBTgL/MppR4/04NHfj/ycM2y6lmTnbw6HVi+1eN0Psba6w== + dependencies: + chownr "^1.1.2" + figgy-pudding "^3.5.1" + fs-minipass "^2.0.0" + glob "^7.1.4" + graceful-fs "^4.2.2" + infer-owner "^1.0.4" + lru-cache "^5.1.1" + minipass "^3.0.0" + minipass-collect "^1.0.2" + minipass-flush "^1.0.5" + minipass-pipeline "^1.2.2" + mkdirp "^0.5.1" + move-concurrently "^1.0.1" + p-map "^3.0.0" + promise-inflight "^1.0.1" + rimraf "^2.7.1" + ssri "^7.0.0" + unique-filename "^1.1.1" + cacache@^12.0.0, cacache@^12.0.2, cacache@^12.0.3: version "12.0.4" resolved "https://registry.yarnpkg.com/cacache/-/cacache-12.0.4.tgz#668bcbd105aeb5f1d92fe25570ec9525c8faa40c" @@ -3766,17 +3729,17 @@ caniuse-api@^3.0.0: lodash.memoize "^4.1.2" lodash.uniq "^4.5.0" -caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001017, caniuse-lite@^1.0.30001043: - version "1.0.30001059" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001059.tgz#7bff0613d94b6ea41cb5c864c966d340f8ae6d34" - integrity sha512-oOrc+jPJWooKIA0IrNZ5sYlsXc7NP7KLhNWrSGEJhnfSzDvDJ0zd3i6HXsslExY9bbu+x0FQ5C61LcqmPt7bOQ== +caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001043: + version "1.0.30001064" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001064.tgz#a0f49689119ba08943b09968e118faf3f645add0" + integrity sha512-hdBcQMFvJIrOhkpAZiRXz04Cmetwc9NekeuNl0qZfHOugxOhJKxsjF1RmISMPFjIF4PPx1reliIzbfN42EiQ5A== caseless@~0.12.0: version "0.12.0" resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" integrity sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw= -chalk@2.4.2, chalk@^2.0.0, chalk@^2.1.0, chalk@^2.3.0, chalk@^2.3.1, chalk@^2.4.1, chalk@^2.4.2: +chalk@2.4.2, chalk@^2.0.0, chalk@^2.3.0, chalk@^2.3.1, chalk@^2.4.1, chalk@^2.4.2: version "2.4.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== @@ -3793,6 +3756,14 @@ chalk@3.0.0, chalk@^3.0.0, chalk@~3.0.0: ansi-styles "^4.1.0" supports-color "^7.1.0" +chalk@4.0.0, chalk@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.0.0.tgz#6e98081ed2d17faab615eb52ac66ec1fe6209e72" + integrity sha512-N9oWFcegS0sFr9oh1oz2d7Npos6vNoWW9HvtCg5N1KRFpUhaAhvTv5Y58g880fZaEYSNm3qDz8SU1UrGvp+n7A== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + chalk@^1.1.3: version "1.1.3" resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" @@ -3804,14 +3775,6 @@ chalk@^1.1.3: strip-ansi "^3.0.0" supports-color "^2.0.0" -chalk@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.0.0.tgz#6e98081ed2d17faab615eb52ac66ec1fe6209e72" - integrity sha512-N9oWFcegS0sFr9oh1oz2d7Npos6vNoWW9HvtCg5N1KRFpUhaAhvTv5Y58g880fZaEYSNm3qDz8SU1UrGvp+n7A== - dependencies: - ansi-styles "^4.1.0" - supports-color "^7.1.0" - chardet@^0.7.0: version "0.7.0" resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" @@ -3822,22 +3785,7 @@ charm@~0.1.1: resolved "https://registry.yarnpkg.com/charm/-/charm-0.1.2.tgz#06c21eed1a1b06aeb67553cdc53e23274bac2296" integrity sha1-BsIe7RobBq62dVPNxT4jJ0usIpY= -chokidar@3.3.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.3.0.tgz#12c0714668c55800f659e262d4962a97faf554a6" - integrity sha512-dGmKLDdT3Gdl7fBUe8XK+gAtGmzy5Fn0XkkWQuYxGIgWVPPse2CxFA5mtrlD0TOHaHjEUqkWNyP1XdHoJES/4A== - dependencies: - anymatch "~3.1.1" - braces "~3.0.2" - glob-parent "~5.1.0" - is-binary-path "~2.1.0" - is-glob "~4.0.1" - normalize-path "~3.0.0" - readdirp "~3.2.0" - optionalDependencies: - fsevents "~2.1.1" - -chokidar@^2.1.8: +chokidar@2.1.8, chokidar@^2.1.8: version "2.1.8" resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-2.1.8.tgz#804b3a7b6a99358c3c5c61e71d8728f041cff917" integrity sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg== @@ -3856,7 +3804,22 @@ chokidar@^2.1.8: optionalDependencies: fsevents "^1.2.7" -chokidar@^3.2.2, chokidar@^3.3.0: +chokidar@3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.3.0.tgz#12c0714668c55800f659e262d4962a97faf554a6" + integrity sha512-dGmKLDdT3Gdl7fBUe8XK+gAtGmzy5Fn0XkkWQuYxGIgWVPPse2CxFA5mtrlD0TOHaHjEUqkWNyP1XdHoJES/4A== + dependencies: + anymatch "~3.1.1" + braces "~3.0.2" + glob-parent "~5.1.0" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.2.0" + optionalDependencies: + fsevents "~2.1.1" + +chokidar@^3.2.2, chokidar@^3.3.0, chokidar@^3.4.0: version "3.4.0" resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.4.0.tgz#b30611423ce376357c765b9b8f904b9fba3c0be8" integrity sha512-aXAaho2VJtisB/1fg1+3nlLJqGOuewTzQpd/Tz0yTg2R0e4IGtshYvtjowyEumcBv2z+y4+kc75Mz7j5xJskcQ== @@ -3906,6 +3869,11 @@ class-utils@^0.3.5: isobject "^3.0.0" static-extend "^0.1.1" +classnames@2.2.6: + version "2.2.6" + resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.2.6.tgz#43935bffdd291f326dad0a205309b38d00f650ce" + integrity sha512-JR/iSQOSt+LQIWwrwEzJ9uk0xfN3mTVYMwt1Ir5mUcSN6pU+V4zQFFaJsclJbPuAUQH+yfWef6tm7l1quW3C8Q== + claygl@^1.2.1: version "1.3.0" resolved "https://registry.yarnpkg.com/claygl/-/claygl-1.3.0.tgz#7a6e2903210519ac358848f5d78070ed211685f3" @@ -3948,7 +3916,7 @@ cli-tableau@^2.0.0: chalk "3.0.0" mocha "^7.1.1" -cli-truncate@^2.1.0: +cli-truncate@2.1.0, cli-truncate@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/cli-truncate/-/cli-truncate-2.1.0.tgz#c39e28bf05edcde5be3b98992a22deed5a2b93c7" integrity sha512-n8fOixwDD6b/ObinzTrp1ZKFzbgvKZvuz/TvejnLn1aQfC6r52XEx85FmuC+3HI+JM7coBRXUvNqEU2PHVrHpg== @@ -4082,17 +4050,17 @@ combined-stream@^1.0.6, combined-stream@~1.0.6: dependencies: delayed-stream "~1.0.0" -commander@2, commander@^2.20.0, commander@~2.20.3: - version "2.20.3" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" - integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== - commander@2.15.1: version "2.15.1" resolved "https://registry.yarnpkg.com/commander/-/commander-2.15.1.tgz#df46e867d0fc2aec66a34662b406a9ccafff5b0f" integrity sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag== -commander@^5.0.0: +commander@^2.20.0, commander@~2.20.3: + version "2.20.3" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" + integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== + +commander@^5.1.0: version "5.1.0" resolved "https://registry.yarnpkg.com/commander/-/commander-5.1.0.tgz#46abbd1652f8e059bddaef99bbdcb2ad9cf179ae" integrity sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg== @@ -4103,9 +4071,9 @@ commondir@^1.0.1: integrity sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs= compare-func@^1.3.1: - version "1.3.2" - resolved "https://registry.yarnpkg.com/compare-func/-/compare-func-1.3.2.tgz#99dd0ba457e1f9bc722b12c08ec33eeab31fa648" - integrity sha1-md0LpFfh+bxyKxLAjsM+6rMfpkg= + version "1.3.4" + resolved "https://registry.yarnpkg.com/compare-func/-/compare-func-1.3.4.tgz#6b07c4c5e8341119baf44578085bda0f4a823516" + integrity sha512-sq2sWtrqKPkEXAC8tEJA1+BqAH9GbFkGBtUOqrUX57VSfwp8xyktctk+uLoRy5eccTdxzDcVIztlYDpKs3Jv1Q== dependencies: array-ify "^1.0.0" dot-prop "^3.0.0" @@ -4329,7 +4297,7 @@ copy-descriptor@^0.1.0: resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d" integrity sha1-Z29us8OZl8LuGsOpJP1hJHSPV40= -core-js-compat@^3.1.1: +core-js-compat@^3.6.2: version "3.6.5" resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.6.5.tgz#2a51d9a4e25dfd6e690251aa81f99e3c05481f1c" integrity sha512-7ItTKOhOZbznhXAQ2g/slGg1PJV5zDO/WdkTwi7UEOJmkvsE32PWvx6mKtDjiMpjnR2CNf6BAD6sSxIlv7ptng== @@ -4426,7 +4394,7 @@ cross-fetch@3.0.4: node-fetch "2.6.0" whatwg-fetch "3.0.0" -cross-spawn@6.0.5, cross-spawn@^6.0.0, cross-spawn@^6.0.5: +cross-spawn@6.0.5, cross-spawn@^6.0.0: version "6.0.5" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4" integrity sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ== @@ -4437,7 +4405,7 @@ cross-spawn@6.0.5, cross-spawn@^6.0.0, cross-spawn@^6.0.5: shebang-command "^1.2.0" which "^1.2.9" -cross-spawn@^7.0.0, cross-spawn@^7.0.1: +cross-spawn@^7.0.0, cross-spawn@^7.0.1, cross-spawn@^7.0.2: version "7.0.2" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.2.tgz#d0d7dcfa74e89115c7619f4f721a94e1fdb716d6" integrity sha512-PD6G8QG3S4FK/XCGFbEQrDqO2AnMMsy0meR7lerlIOHAAbkuavGU/pOqprrlvfTNjvowivTeBsjebAL0NSoMxw== @@ -4550,6 +4518,11 @@ css-what@^3.2.1: resolved "https://registry.yarnpkg.com/css-what/-/css-what-3.2.1.tgz#f4a8f12421064621b456755e34a03a2c22df5da1" integrity sha512-WwOrosiQTvyms+Ti5ZC5vGEK0Vod3FTt1ca+payZqvKuGJF+dq7bG63DstxtN0dpm6FxY27a/zS3Wten+gEtGw== +css.escape@^1.5.0: + version "1.5.1" + resolved "https://registry.yarnpkg.com/css.escape/-/css.escape-1.5.1.tgz#42e27d4fa04ae32f931a4b4d4191fa9cddee97cb" + integrity sha1-QuJ9T6BK4y+TGktNQZH6nN3ul8s= + css@^2.0.0: version "2.2.4" resolved "https://registry.yarnpkg.com/css/-/css-2.2.4.tgz#c646755c73971f2bba6a601e2cf2fd71b1298929" @@ -4606,304 +4579,56 @@ cssnano-util-get-arguments@^4.0.0: resolved "https://registry.yarnpkg.com/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz#ed3a08299f21d75741b20f3b81f194ed49cc150f" integrity sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8= -cssnano-util-get-match@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz#c0e4ca07f5386bb17ec5e52250b4f5961365156d" - integrity sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0= - -cssnano-util-raw-cache@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz#b26d5fd5f72a11dfe7a7846fb4c67260f96bf282" - integrity sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA== - dependencies: - postcss "^7.0.0" - -cssnano-util-same-parent@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz#574082fb2859d2db433855835d9a8456ea18bbf3" - integrity sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q== - -cssnano@4.1.10: - version "4.1.10" - resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-4.1.10.tgz#0ac41f0b13d13d465487e111b778d42da631b8b2" - integrity sha512-5wny+F6H4/8RgNlaqab4ktc3e0/blKutmq8yNlBFXA//nSFFAqAngjNVRzUvCgYROULmZZUoosL/KSoZo5aUaQ== - dependencies: - cosmiconfig "^5.0.0" - cssnano-preset-default "^4.0.7" - is-resolvable "^1.0.0" - postcss "^7.0.0" - -csso@^4.0.2: - version "4.0.3" - resolved "https://registry.yarnpkg.com/csso/-/csso-4.0.3.tgz#0d9985dc852c7cc2b2cacfbbe1079014d1a8e903" - integrity sha512-NL3spysxUkcrOgnpsT4Xdl2aiEiBG6bXswAABQVHcMrfjjBisFOKwLDOmf4wf32aPdcJws1zds2B0Rg+jqMyHQ== - dependencies: - css-tree "1.0.0-alpha.39" - -csstype@^2.2.0, csstype@^2.5.7: - version "2.6.10" - resolved "https://registry.yarnpkg.com/csstype/-/csstype-2.6.10.tgz#e63af50e66d7c266edb6b32909cfd0aabe03928b" - integrity sha512-D34BqZU4cIlMCY93rZHbrq9pjTAQJ3U8S8rfBqjwHxkGPThWFjzZDQpgMJY0QViLxth6ZKYiwFBo14RdN44U/w== - -currently-unhandled@^0.4.1: - version "0.4.1" - resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea" - integrity sha1-mI3zP+qxke95mmE2nddsF635V+o= - dependencies: - array-find-index "^1.0.1" - -cyclist@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/cyclist/-/cyclist-1.0.1.tgz#596e9698fd0c80e12038c2b82d6eb1b35b6224d9" - integrity sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk= - -d3-array@1, d3-array@^1.1.1, d3-array@^1.2.0: - version "1.2.4" - resolved "https://registry.yarnpkg.com/d3-array/-/d3-array-1.2.4.tgz#635ce4d5eea759f6f605863dbcfc30edc737f71f" - integrity sha512-KHW6M86R+FUPYGb3R5XiYjXPq7VzwxZ22buHhAEVG5ztoEcZZMLov530mmccaqA1GghZArjQV46fuc8kUqhhHw== - -d3-axis@1: - version "1.0.12" - resolved "https://registry.yarnpkg.com/d3-axis/-/d3-axis-1.0.12.tgz#cdf20ba210cfbb43795af33756886fb3638daac9" - integrity sha512-ejINPfPSNdGFKEOAtnBtdkpr24c4d4jsei6Lg98mxf424ivoDP2956/5HDpIAtmHo85lqT4pruy+zEgvRUBqaQ== - -d3-brush@1: - version "1.1.5" - resolved "https://registry.yarnpkg.com/d3-brush/-/d3-brush-1.1.5.tgz#066b8e84d17b192986030446c97c0fba7e1bacdc" - integrity sha512-rEaJ5gHlgLxXugWjIkolTA0OyMvw8UWU1imYXy1v642XyyswmI1ybKOv05Ft+ewq+TFmdliD3VuK0pRp1VT/5A== - dependencies: - d3-dispatch "1" - d3-drag "1" - d3-interpolate "1" - d3-selection "1" - d3-transition "1" - -d3-chord@1: - version "1.0.6" - resolved "https://registry.yarnpkg.com/d3-chord/-/d3-chord-1.0.6.tgz#309157e3f2db2c752f0280fedd35f2067ccbb15f" - integrity sha512-JXA2Dro1Fxw9rJe33Uv+Ckr5IrAa74TlfDEhE/jfLOaXegMQFQTAgAw9WnZL8+HxVBRXaRGCkrNU7pJeylRIuA== - dependencies: - d3-array "1" - d3-path "1" - -d3-collection@1: - version "1.0.7" - resolved "https://registry.yarnpkg.com/d3-collection/-/d3-collection-1.0.7.tgz#349bd2aa9977db071091c13144d5e4f16b5b310e" - integrity sha512-ii0/r5f4sjKNTfh84Di+DpztYwqKhEyUlKoPrzUFfeSkWxjW49xU2QzO9qrPrNkpdI0XJkfzvmTu8V2Zylln6A== - -d3-color@1: - version "1.4.1" - resolved "https://registry.yarnpkg.com/d3-color/-/d3-color-1.4.1.tgz#c52002bf8846ada4424d55d97982fef26eb3bc8a" - integrity sha512-p2sTHSLCJI2QKunbGb7ocOh7DgTAn8IrLx21QRc/BSnodXM4sv6aLQlnfpvehFMLZEfBc6g9pH9SWQccFYfJ9Q== - -d3-contour@1: - version "1.3.2" - resolved "https://registry.yarnpkg.com/d3-contour/-/d3-contour-1.3.2.tgz#652aacd500d2264cb3423cee10db69f6f59bead3" - integrity sha512-hoPp4K/rJCu0ladiH6zmJUEz6+u3lgR+GSm/QdM2BBvDraU39Vr7YdDCicJcxP1z8i9B/2dJLgDC1NcvlF8WCg== - dependencies: - d3-array "^1.1.1" - -d3-dispatch@1: - version "1.0.6" - resolved "https://registry.yarnpkg.com/d3-dispatch/-/d3-dispatch-1.0.6.tgz#00d37bcee4dd8cd97729dd893a0ac29caaba5d58" - integrity sha512-fVjoElzjhCEy+Hbn8KygnmMS7Or0a9sI2UzGwoB7cCtvI1XpVN9GpoYlnb3xt2YV66oXYb1fLJ8GMvP4hdU1RA== - -d3-drag@1: - version "1.2.5" - resolved "https://registry.yarnpkg.com/d3-drag/-/d3-drag-1.2.5.tgz#2537f451acd39d31406677b7dc77c82f7d988f70" - integrity sha512-rD1ohlkKQwMZYkQlYVCrSFxsWPzI97+W+PaEIBNTMxRuxz9RF0Hi5nJWHGVJ3Om9d2fRTe1yOBINJyy/ahV95w== - dependencies: - d3-dispatch "1" - d3-selection "1" - -d3-dsv@1: - version "1.2.0" - resolved "https://registry.yarnpkg.com/d3-dsv/-/d3-dsv-1.2.0.tgz#9d5f75c3a5f8abd611f74d3f5847b0d4338b885c" - integrity sha512-9yVlqvZcSOMhCYzniHE7EVUws7Fa1zgw+/EAV2BxJoG3ME19V6BQFBwI855XQDsxyOuG7NibqRMTtiF/Qup46g== - dependencies: - commander "2" - iconv-lite "0.4" - rw "1" - -d3-ease@1: - version "1.0.6" - resolved "https://registry.yarnpkg.com/d3-ease/-/d3-ease-1.0.6.tgz#ebdb6da22dfac0a22222f2d4da06f66c416a0ec0" - integrity sha512-SZ/lVU7LRXafqp7XtIcBdxnWl8yyLpgOmzAk0mWBI9gXNzLDx5ybZgnRbH9dN/yY5tzVBqCQ9avltSnqVwessQ== - -d3-fetch@1: - version "1.1.2" - resolved "https://registry.yarnpkg.com/d3-fetch/-/d3-fetch-1.1.2.tgz#957c8fbc6d4480599ba191b1b2518bf86b3e1be2" - integrity sha512-S2loaQCV/ZeyTyIF2oP8D1K9Z4QizUzW7cWeAOAS4U88qOt3Ucf6GsmgthuYSdyB2HyEm4CeGvkQxWsmInsIVA== - dependencies: - d3-dsv "1" - -d3-force@1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/d3-force/-/d3-force-1.2.1.tgz#fd29a5d1ff181c9e7f0669e4bd72bdb0e914ec0b" - integrity sha512-HHvehyaiUlVo5CxBJ0yF/xny4xoaxFxDnBXNvNcfW9adORGZfyNF1dj6DGLKyk4Yh3brP/1h3rnDzdIAwL08zg== - dependencies: - d3-collection "1" - d3-dispatch "1" - d3-quadtree "1" - d3-timer "1" - -d3-format@1: - version "1.4.4" - resolved "https://registry.yarnpkg.com/d3-format/-/d3-format-1.4.4.tgz#356925f28d0fd7c7983bfad593726fce46844030" - integrity sha512-TWks25e7t8/cqctxCmxpUuzZN11QxIA7YrMbram94zMQ0PXjE4LVIMe/f6a4+xxL8HQ3OsAFULOINQi1pE62Aw== - -d3-geo@1: - version "1.12.0" - resolved "https://registry.yarnpkg.com/d3-geo/-/d3-geo-1.12.0.tgz#58ddbdf4d9db5f199db69d1b7c93dca6454a6f24" - integrity sha512-NalZVW+6/SpbKcnl+BCO67m8gX+nGeJdo6oGL9H6BRUGUL1e+AtPcP4vE4TwCQ/gl8y5KE7QvBzrLn+HsKIl+w== - dependencies: - d3-array "1" - -d3-hierarchy@1: - version "1.1.9" - resolved "https://registry.yarnpkg.com/d3-hierarchy/-/d3-hierarchy-1.1.9.tgz#2f6bee24caaea43f8dc37545fa01628559647a83" - integrity sha512-j8tPxlqh1srJHAtxfvOUwKNYJkQuBFdM1+JAUfq6xqH5eAqf93L7oG1NVqDa4CpFZNvnNKtCYEUC8KY9yEn9lQ== - -d3-interpolate@1: - version "1.4.0" - resolved "https://registry.yarnpkg.com/d3-interpolate/-/d3-interpolate-1.4.0.tgz#526e79e2d80daa383f9e0c1c1c7dcc0f0583e987" - integrity sha512-V9znK0zc3jOPV4VD2zZn0sDhZU3WAE2bmlxdIwwQPPzPjvyLkd8B3JUVdS1IDUFDkWZ72c9qnv1GK2ZagTZ8EA== - dependencies: - d3-color "1" - -d3-path@1: - version "1.0.9" - resolved "https://registry.yarnpkg.com/d3-path/-/d3-path-1.0.9.tgz#48c050bb1fe8c262493a8caf5524e3e9591701cf" - integrity sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg== - -d3-polygon@1: - version "1.0.6" - resolved "https://registry.yarnpkg.com/d3-polygon/-/d3-polygon-1.0.6.tgz#0bf8cb8180a6dc107f518ddf7975e12abbfbd38e" - integrity sha512-k+RF7WvI08PC8reEoXa/w2nSg5AUMTi+peBD9cmFc+0ixHfbs4QmxxkarVal1IkVkgxVuk9JSHhJURHiyHKAuQ== - -d3-quadtree@1: - version "1.0.7" - resolved "https://registry.yarnpkg.com/d3-quadtree/-/d3-quadtree-1.0.7.tgz#ca8b84df7bb53763fe3c2f24bd435137f4e53135" - integrity sha512-RKPAeXnkC59IDGD0Wu5mANy0Q2V28L+fNe65pOCXVdVuTJS3WPKaJlFHer32Rbh9gIo9qMuJXio8ra4+YmIymA== - -d3-random@1: - version "1.1.2" - resolved "https://registry.yarnpkg.com/d3-random/-/d3-random-1.1.2.tgz#2833be7c124360bf9e2d3fd4f33847cfe6cab291" - integrity sha512-6AK5BNpIFqP+cx/sreKzNjWbwZQCSUatxq+pPRmFIQaWuoD+NrbVWw7YWpHiXpCQ/NanKdtGDuB+VQcZDaEmYQ== - -d3-scale-chromatic@1: - version "1.5.0" - resolved "https://registry.yarnpkg.com/d3-scale-chromatic/-/d3-scale-chromatic-1.5.0.tgz#54e333fc78212f439b14641fb55801dd81135a98" - integrity sha512-ACcL46DYImpRFMBcpk9HhtIyC7bTBR4fNOPxwVSl0LfulDAwyiHyPOTqcDG1+t5d4P9W7t/2NAuWu59aKko/cg== - dependencies: - d3-color "1" - d3-interpolate "1" +cssnano-util-get-match@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz#c0e4ca07f5386bb17ec5e52250b4f5961365156d" + integrity sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0= -d3-scale@2: - version "2.2.2" - resolved "https://registry.yarnpkg.com/d3-scale/-/d3-scale-2.2.2.tgz#4e880e0b2745acaaddd3ede26a9e908a9e17b81f" - integrity sha512-LbeEvGgIb8UMcAa0EATLNX0lelKWGYDQiPdHj+gLblGVhGLyNbaCn3EvrJf0A3Y/uOOU5aD6MTh5ZFCdEwGiCw== +cssnano-util-raw-cache@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz#b26d5fd5f72a11dfe7a7846fb4c67260f96bf282" + integrity sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA== dependencies: - d3-array "^1.2.0" - d3-collection "1" - d3-format "1" - d3-interpolate "1" - d3-time "1" - d3-time-format "2" + postcss "^7.0.0" -d3-selection@1, d3-selection@^1.1.0: - version "1.4.1" - resolved "https://registry.yarnpkg.com/d3-selection/-/d3-selection-1.4.1.tgz#98eedbbe085fbda5bafa2f9e3f3a2f4d7d622a98" - integrity sha512-BTIbRjv/m5rcVTfBs4AMBLKs4x8XaaLkwm28KWu9S2vKNqXkXt2AH2Qf0sdPZHjFxcWg/YL53zcqAz+3g4/7PA== +cssnano-util-same-parent@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz#574082fb2859d2db433855835d9a8456ea18bbf3" + integrity sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q== -d3-shape@1: - version "1.3.7" - resolved "https://registry.yarnpkg.com/d3-shape/-/d3-shape-1.3.7.tgz#df63801be07bc986bc54f63789b4fe502992b5d7" - integrity sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw== +cssnano@4.1.10: + version "4.1.10" + resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-4.1.10.tgz#0ac41f0b13d13d465487e111b778d42da631b8b2" + integrity sha512-5wny+F6H4/8RgNlaqab4ktc3e0/blKutmq8yNlBFXA//nSFFAqAngjNVRzUvCgYROULmZZUoosL/KSoZo5aUaQ== dependencies: - d3-path "1" + cosmiconfig "^5.0.0" + cssnano-preset-default "^4.0.7" + is-resolvable "^1.0.0" + postcss "^7.0.0" -d3-time-format@2: - version "2.2.3" - resolved "https://registry.yarnpkg.com/d3-time-format/-/d3-time-format-2.2.3.tgz#0c9a12ee28342b2037e5ea1cf0b9eb4dd75f29cb" - integrity sha512-RAHNnD8+XvC4Zc4d2A56Uw0yJoM7bsvOlJR33bclxq399Rak/b9bhvu/InjxdWhPtkgU53JJcleJTGkNRnN6IA== +csso@^4.0.2: + version "4.0.3" + resolved "https://registry.yarnpkg.com/csso/-/csso-4.0.3.tgz#0d9985dc852c7cc2b2cacfbbe1079014d1a8e903" + integrity sha512-NL3spysxUkcrOgnpsT4Xdl2aiEiBG6bXswAABQVHcMrfjjBisFOKwLDOmf4wf32aPdcJws1zds2B0Rg+jqMyHQ== dependencies: - d3-time "1" - -d3-time@1: - version "1.1.0" - resolved "https://registry.yarnpkg.com/d3-time/-/d3-time-1.1.0.tgz#b1e19d307dae9c900b7e5b25ffc5dcc249a8a0f1" - integrity sha512-Xh0isrZ5rPYYdqhAVk8VLnMEidhz5aP7htAADH6MfzgmmicPkTo8LhkLxci61/lCB7n7UmE3bN0leRt+qvkLxA== + css-tree "1.0.0-alpha.39" -d3-timer@1: - version "1.0.10" - resolved "https://registry.yarnpkg.com/d3-timer/-/d3-timer-1.0.10.tgz#dfe76b8a91748831b13b6d9c793ffbd508dd9de5" - integrity sha512-B1JDm0XDaQC+uvo4DT79H0XmBskgS3l6Ve+1SBCfxgmtIb1AVrPIoqd+nPSv+loMX8szQ0sVUhGngL7D5QPiXw== +csstype@^2.2.0, csstype@^2.5.7: + version "2.6.10" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-2.6.10.tgz#e63af50e66d7c266edb6b32909cfd0aabe03928b" + integrity sha512-D34BqZU4cIlMCY93rZHbrq9pjTAQJ3U8S8rfBqjwHxkGPThWFjzZDQpgMJY0QViLxth6ZKYiwFBo14RdN44U/w== -d3-transition@1: - version "1.3.2" - resolved "https://registry.yarnpkg.com/d3-transition/-/d3-transition-1.3.2.tgz#a98ef2151be8d8600543434c1ca80140ae23b398" - integrity sha512-sc0gRU4PFqZ47lPVHloMn9tlPcv8jxgOQg+0zjhfZXMQuvppjG6YuwdMBE0TuqCZjeJkLecku/l9R0JPcRhaDA== +currently-unhandled@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/currently-unhandled/-/currently-unhandled-0.4.1.tgz#988df33feab191ef799a61369dd76c17adf957ea" + integrity sha1-mI3zP+qxke95mmE2nddsF635V+o= dependencies: - d3-color "1" - d3-dispatch "1" - d3-ease "1" - d3-interpolate "1" - d3-selection "^1.1.0" - d3-timer "1" + array-find-index "^1.0.1" -d3-voronoi@1: - version "1.1.4" - resolved "https://registry.yarnpkg.com/d3-voronoi/-/d3-voronoi-1.1.4.tgz#dd3c78d7653d2bb359284ae478645d95944c8297" - integrity sha512-dArJ32hchFsrQ8uMiTBLq256MpnZjeuBtdHpaDlYuQyjU0CVzCJl/BVW+SkszaAeH95D/8gxqAhgx0ouAWAfRg== - -d3-zoom@1: - version "1.8.3" - resolved "https://registry.yarnpkg.com/d3-zoom/-/d3-zoom-1.8.3.tgz#b6a3dbe738c7763121cd05b8a7795ffe17f4fc0a" - integrity sha512-VoLXTK4wvy1a0JpH2Il+F2CiOhVu7VRXWF5M/LroMIh3/zBAC3WAt7QoIvPibOavVo20hN6/37vwAsdBejLyKQ== - dependencies: - d3-dispatch "1" - d3-drag "1" - d3-interpolate "1" - d3-selection "1" - d3-transition "1" - -d3@^5.14: - version "5.16.0" - resolved "https://registry.yarnpkg.com/d3/-/d3-5.16.0.tgz#9c5e8d3b56403c79d4ed42fbd62f6113f199c877" - integrity sha512-4PL5hHaHwX4m7Zr1UapXW23apo6pexCgdetdJ5kTmADpG/7T9Gkxw0M0tf/pjoB63ezCCm0u5UaFYy2aMt0Mcw== - dependencies: - d3-array "1" - d3-axis "1" - d3-brush "1" - d3-chord "1" - d3-collection "1" - d3-color "1" - d3-contour "1" - d3-dispatch "1" - d3-drag "1" - d3-dsv "1" - d3-ease "1" - d3-fetch "1" - d3-force "1" - d3-format "1" - d3-geo "1" - d3-hierarchy "1" - d3-interpolate "1" - d3-path "1" - d3-polygon "1" - d3-quadtree "1" - d3-random "1" - d3-scale "2" - d3-scale-chromatic "1" - d3-selection "1" - d3-shape "1" - d3-time "1" - d3-time-format "2" - d3-timer "1" - d3-transition "1" - d3-voronoi "1" - d3-zoom "1" +cyclist@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/cyclist/-/cyclist-1.0.1.tgz#596e9698fd0c80e12038c2b82d6eb1b35b6224d9" + integrity sha1-WW6WmP0MgOEgOMK4LW6xs1tiJNk= d@1, d@^1.0.1: version "1.0.1" @@ -4913,24 +4638,6 @@ d@1, d@^1.0.1: es5-ext "^0.10.50" type "^1.0.1" -dagre-d3@0.6.4: - version "0.6.4" - resolved "https://registry.yarnpkg.com/dagre-d3/-/dagre-d3-0.6.4.tgz#0728d5ce7f177ca2337df141ceb60fbe6eeb7b29" - integrity sha512-e/6jXeCP7/ptlAM48clmX4xTZc5Ek6T6kagS7Oz2HrYSdqcLZFLqpAfh7ldbZRFfxCZVyh61NEPR08UQRVxJzQ== - dependencies: - d3 "^5.14" - dagre "^0.8.5" - graphlib "^2.1.8" - lodash "^4.17.15" - -dagre@^0.8.5: - version "0.8.5" - resolved "https://registry.yarnpkg.com/dagre/-/dagre-0.8.5.tgz#ba30b0055dac12b6c1fcc247817442777d06afee" - integrity sha512-/aTqmnRta7x7MCCpExk7HQL2O4owCT2h8NT//9I1OQ9vt29Pa0BzSAkR5lwFUcQ7491yVi/3CXU9jQ5o0Mn2Sw== - dependencies: - graphlib "^2.1.8" - lodash "^4.17.15" - dargs@^4.0.1: version "4.1.0" resolved "https://registry.yarnpkg.com/dargs/-/dargs-4.1.0.tgz#03a9dbb4b5c2f139bf14ae53f0b8a2a6a86f4e17" @@ -4950,6 +4657,13 @@ data-uri-to-buffer@1: resolved "https://registry.yarnpkg.com/data-uri-to-buffer/-/data-uri-to-buffer-1.2.0.tgz#77163ea9c20d8641b4707e8f18abdf9a78f34835" integrity sha512-vKQ9DTQPN1FLYiiEEOQ6IBGFqvjCa5rSK3cWMy/Nespm5d/x3dGFT9UBZnkLxCwua/IXBi2TYnwTEpsOvhC4UQ== +data-uri-to-buffer@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/data-uri-to-buffer/-/data-uri-to-buffer-3.0.0.tgz#8a3088a5efd3f53c3682343313c6895d498eb8d7" + integrity sha512-MJ6mFTZ+nPQO+39ua/ltwNePXrfdF3Ww0wP1Od7EePySXN1cP9XNqRQOG3FxTfipp8jx898LUCgBCEP11Qw/ZQ== + dependencies: + buffer-from "^1.1.1" + dateformat@^3.0.0: version "3.0.3" resolved "https://registry.yarnpkg.com/dateformat/-/dateformat-3.0.3.tgz#a6e37499a4d9a9cf85ef5872044d62901c9889ae" @@ -5028,7 +4742,7 @@ deep-extend@^0.6.0: resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== -deep-is@~0.1.3: +deep-is@^0.1.3, deep-is@~0.1.3: version "0.1.3" resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ= @@ -5209,10 +4923,10 @@ domhandler@3.0.0, domhandler@^3.0.0: dependencies: domelementtype "^2.0.1" -domutils@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.0.0.tgz#15b8278e37bfa8468d157478c58c367718133c08" - integrity sha512-n5SelJ1axbO636c2yUtOGia/IcJtVtlhQbFiVDBZHKV5ReJO1ViX7sFEemtuyoAnBxk5meNSYgA8V4s0271efg== +domutils@2.1.0, domutils@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.1.0.tgz#7ade3201af43703fde154952e3a868eb4b635f16" + integrity sha512-CD9M0Dm1iaHfQ1R/TI+z3/JWp/pgub0j4jIQKH89ARR4ATAV2nbaOQS5XxU9maJP5jHaPdDDQSEHuE2UmpUTKg== dependencies: dom-serializer "^0.2.1" domelementtype "^2.0.1" @@ -5226,15 +4940,6 @@ domutils@^1.7.0: dom-serializer "0" domelementtype "1" -domutils@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.1.0.tgz#7ade3201af43703fde154952e3a868eb4b635f16" - integrity sha512-CD9M0Dm1iaHfQ1R/TI+z3/JWp/pgub0j4jIQKH89ARR4ATAV2nbaOQS5XxU9maJP5jHaPdDDQSEHuE2UmpUTKg== - dependencies: - dom-serializer "^0.2.1" - domelementtype "^2.0.1" - domhandler "^3.0.0" - dot-prop@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-3.0.0.tgz#1b708af094a49c9a0e7dbcad790aba539dac1177" @@ -5292,22 +4997,22 @@ echarts-gl@1.1.1: claygl "^1.2.1" zrender "^4.0.4" -echarts@4.7.0: - version "4.7.0" - resolved "https://registry.yarnpkg.com/echarts/-/echarts-4.7.0.tgz#5b3875a4c2f91e3929425fabab9eace7e4098b3f" - integrity sha512-NlOTdUcAsIyCCG+N4uh0ZEvXtrPW2jvcuqf03RyqYeCKzyPbiOQ4I3MdKXMhxG3lBdqQNdNXVT71SB4KTQjN0A== +echarts@4.8.0: + version "4.8.0" + resolved "https://registry.yarnpkg.com/echarts/-/echarts-4.8.0.tgz#b2c1cfb9229b13d368ee104fc8eea600b574d4c4" + integrity sha512-YwShpug8fWngj/RlgxDaYrLBoD+LsZUArrusjNPHpAF+is+gGe38xx4W848AwWMGoi745t3OXM52JedNrv+F6g== dependencies: - zrender "4.3.0" + zrender "4.3.1" ee-first@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0= -electron-to-chromium@^1.3.322, electron-to-chromium@^1.3.413: - version "1.3.438" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.438.tgz#56051a9b148842fec813b113e8070ae892a85920" - integrity sha512-QKMcpfA/fCOnqFHsZvKr2haQQb3eXkDI17zT+4hHxJJThyN5nShcG6q1VR8vRiE/2GCJM+0p3PzinYknkdsBYg== +electron-to-chromium@^1.3.413: + version "1.3.451" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.451.tgz#0c075af3e2f06d706670bde0279432802ca8c83f" + integrity sha512-2fvco0F2bBIgqzO8GRP0Jt/91pdrf9KfZ5FsmkYkjERmIJG585cFeFZV4+CO6oTmU3HmCTgfcZuEa7kW8VUh3A== elegant-spinner@^2.0.0: version "2.0.0" @@ -5391,7 +5096,7 @@ enhanced-resolve@^4.0.0, enhanced-resolve@^4.1.0: memory-fs "^0.5.0" tapable "^1.0.0" -enquirer@2.3.5, enquirer@^2.3.4: +enquirer@2.3.5, enquirer@^2.3.5: version "2.3.5" resolved "https://registry.yarnpkg.com/enquirer/-/enquirer-2.3.5.tgz#3ab2b838df0a9d8ab9e7dff235b0e8712ef92381" integrity sha512-BNT1C08P9XD0vNg3J475yIUG+mVdp9T6towYFHUv897X0KoHBjB1shyrNmhmtHWKP17iSWgo7Gqh7BBuzLZMSA== @@ -5547,10 +5252,10 @@ eslint-plugin-react-hooks@4.0.0: resolved "https://registry.yarnpkg.com/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.0.0.tgz#81196b990043cde339e25c6662aeebe32ac52d01" integrity sha512-YKBY+kilK5wrwIdQnCF395Ya6nDro3EAMoe+2xFkmyklyhF16fH83TrQOo9zbZIDxBsXFgBbywta/0JKRNFDkw== -eslint-plugin-react@7.19.0: - version "7.19.0" - resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.19.0.tgz#6d08f9673628aa69c5559d33489e855d83551666" - integrity sha512-SPT8j72CGuAP+JFbT0sJHOB80TX/pu44gQ4vXH/cq+hQTiY2PuZ6IHkqXJV6x1b28GDdo1lbInjKUrrdUf0LOQ== +eslint-plugin-react@7.20.0: + version "7.20.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.20.0.tgz#f98712f0a5e57dfd3e5542ef0604b8739cd47be3" + integrity sha512-rqe1abd0vxMjmbPngo4NaYxTcR3Y4Hrmc/jg4T+sYz63yqlmJRknpEQfmWY+eDWPuMmix6iUIK+mv0zExjeLgA== dependencies: array-includes "^3.1.1" doctrine "^2.1.0" @@ -5561,7 +5266,6 @@ eslint-plugin-react@7.19.0: object.values "^1.1.1" prop-types "^15.7.2" resolve "^1.15.1" - semver "^6.3.0" string.prototype.matchall "^4.0.2" xregexp "^4.3.0" @@ -5581,13 +5285,6 @@ eslint-scope@^5.0.0: esrecurse "^4.1.0" estraverse "^4.1.1" -eslint-utils@^1.4.3: - version "1.4.3" - resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-1.4.3.tgz#74fec7c54d0776b6f67e0251040b5806564e981f" - integrity sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q== - dependencies: - eslint-visitor-keys "^1.1.0" - eslint-utils@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-2.0.0.tgz#7be1cc70f27a72a76cd14aa698bcabed6890e1cd" @@ -5600,22 +5297,22 @@ eslint-visitor-keys@^1.1.0: resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz#e2a82cea84ff246ad6fb57f9bde5b46621459ec2" integrity sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A== -eslint@6.8.0: - version "6.8.0" - resolved "https://registry.yarnpkg.com/eslint/-/eslint-6.8.0.tgz#62262d6729739f9275723824302fb227c8c93ffb" - integrity sha512-K+Iayyo2LtyYhDSYwz5D5QdWw0hCacNzyq1Y821Xna2xSJj7cijoLLYmLxTQgcgZ9mC61nryMy9S7GRbYpI5Ig== +eslint@7.1.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-7.1.0.tgz#d9a1df25e5b7859b0a3d86bb05f0940ab676a851" + integrity sha512-DfS3b8iHMK5z/YLSme8K5cge168I8j8o1uiVmFCgnnjxZQbCGyraF8bMl7Ju4yfBmCuxD7shOF7eqGkcuIHfsA== dependencies: "@babel/code-frame" "^7.0.0" ajv "^6.10.0" - chalk "^2.1.0" - cross-spawn "^6.0.5" + chalk "^4.0.0" + cross-spawn "^7.0.2" debug "^4.0.1" doctrine "^3.0.0" eslint-scope "^5.0.0" - eslint-utils "^1.4.3" + eslint-utils "^2.0.0" eslint-visitor-keys "^1.1.0" - espree "^6.1.2" - esquery "^1.0.1" + espree "^7.0.0" + esquery "^1.2.0" esutils "^2.0.2" file-entry-cache "^5.0.1" functional-red-black-tree "^1.0.1" @@ -5628,25 +5325,24 @@ eslint@6.8.0: is-glob "^4.0.0" js-yaml "^3.13.1" json-stable-stringify-without-jsonify "^1.0.1" - levn "^0.3.0" + levn "^0.4.1" lodash "^4.17.14" minimatch "^3.0.4" - mkdirp "^0.5.1" natural-compare "^1.4.0" - optionator "^0.8.3" + optionator "^0.9.1" progress "^2.0.0" - regexpp "^2.0.1" - semver "^6.1.2" - strip-ansi "^5.2.0" - strip-json-comments "^3.0.1" + regexpp "^3.1.0" + semver "^7.2.1" + strip-ansi "^6.0.0" + strip-json-comments "^3.1.0" table "^5.2.3" text-table "^0.2.0" v8-compile-cache "^2.0.3" -espree@^6.1.2: - version "6.2.1" - resolved "https://registry.yarnpkg.com/espree/-/espree-6.2.1.tgz#77fc72e1fd744a2052c20f38a5b575832e82734a" - integrity sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw== +espree@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/espree/-/espree-7.0.0.tgz#8a7a60f218e69f120a842dc24c5a88aa7748a74e" + integrity sha512-/r2XEx5Mw4pgKdyb7GNLQNsu++asx/dltf/CI8RFi9oGHxmQFgvLbc5Op4U6i8Oaj+kdslhJtVlEZeAqH5qOTw== dependencies: acorn "^7.1.1" acorn-jsx "^5.2.0" @@ -5662,7 +5358,7 @@ esprima@^4.0.0, esprima@^4.0.1: resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== -esquery@^1.0.1: +esquery@^1.2.0: version "1.3.1" resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.3.1.tgz#b78b5828aa8e214e29fb74c4d5b752e1c033da57" integrity sha512-olpvt9QG0vniUBZspVRN6lwB7hOZoTRtT+jzR+tS4ffYx2mzbw+z0XCOk44aaLYKApNX5nMm+E+P6o25ip/DHQ== @@ -5702,30 +5398,25 @@ eventemitter2@5.0.1, eventemitter2@^5.0.1, eventemitter2@~5.0.1: integrity sha1-YZegldX7a1folC9v1+qtY6CclFI= eventemitter2@^6.3.1: - version "6.4.0" - resolved "https://registry.yarnpkg.com/eventemitter2/-/eventemitter2-6.4.0.tgz#458afe0f570a395da622efe39c0cdccdf227cac9" - integrity sha512-UpQvRvZmP0qYxXrTl0620GOmvYlB77fHLptCG7ha79ptM2J+Q9nNurVz+gkHEiSZKTnDX5xHnKIHDCua1hsDjw== + version "6.4.1" + resolved "https://registry.yarnpkg.com/eventemitter2/-/eventemitter2-6.4.1.tgz#7fb2dd95f9f7c3c615e848d8a91a18a65feb51b3" + integrity sha512-x4TgZ3j6tfLGzAJ4MD7M7qTdAiDlfKdssgFENVpW0csJKdlXTpE27SWUDEDb5eNS/HBWQtiDFPKiO6oHokYDqg== eventemitter2@~0.4.14: version "0.4.14" resolved "https://registry.yarnpkg.com/eventemitter2/-/eventemitter2-0.4.14.tgz#8f61b75cde012b2e9eb284d4545583b5643b61ab" integrity sha1-j2G3XN4BKy6esoTUVFWDtWQ7Yas= -eventemitter3@4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.0.tgz#d65176163887ee59f386d64c82610b696a4a74eb" - integrity sha512-qerSRB0p+UDEssxTtm6EDKcE7W4OaoisfIMl4CngyEhjpYglocpNg6UEqCvemdGhosAsg4sO2dXJOdyBifPGCg== +eventemitter3@4.0.4, eventemitter3@^4.0.0: + version "4.0.4" + resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.4.tgz#b5463ace635a083d018bdc7c917b4c5f10a85384" + integrity sha512-rlaVLnVxtxvoyLsQQFBx53YmXHDxRIzzTLbdfxqi4yocpSjAxXwkU0cScM5JgSKMqEhrZpnvQ2D9gjylR0AimQ== eventemitter3@^3.1.0: version "3.1.2" resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-3.1.2.tgz#2d3d48f9c346698fce83a85d7d664e98535df6e7" integrity sha512-tvtQIeLVHjDkJYnzf2dgVMxfuSGJeM/7UCG17TT4EumTfNtF+0nebF/4zWOIkCreAbtNqhGEboB6BWrwqNaw4Q== -eventemitter3@^4.0.0: - version "4.0.4" - resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.4.tgz#b5463ace635a083d018bdc7c917b4c5f10a85384" - integrity sha512-rlaVLnVxtxvoyLsQQFBx53YmXHDxRIzzTLbdfxqi4yocpSjAxXwkU0cScM5JgSKMqEhrZpnvQ2D9gjylR0AimQ== - events@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/events/-/events-3.1.0.tgz#84279af1b34cb75aa88bf5ff291f6d0bd9b31a59" @@ -5752,10 +5443,10 @@ execa@^1.0.0: signal-exit "^3.0.0" strip-eof "^1.0.0" -execa@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/execa/-/execa-4.0.1.tgz#988488781f1f0238cd156f7aaede11c3e853b4c1" - integrity sha512-SCjM/zlBdOK8Q5TIjOn6iEHZaPHFsMoTxXQ2nvUvtPnuohz3H2dIozSg+etNR98dGoYUp2ENSKLL/XaMmbxVgw== +execa@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/execa/-/execa-4.0.2.tgz#ad87fb7b2d9d564f70d2b62d511bee41d5cbb240" + integrity sha512-QI2zLa6CjGWdiQsmSkZoGtDx2N+cQIGb3yNolGTdjSQzydzLgYYf8LRuagp7S7fPimjcrzUDSUFd/MgzELMi4Q== dependencies: cross-spawn "^7.0.0" get-stream "^5.0.0" @@ -5920,7 +5611,7 @@ fast-json-stable-stringify@^2.0.0: resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== -fast-levenshtein@~2.0.6: +fast-levenshtein@^2.0.6, fast-levenshtein@~2.0.6: version "2.0.6" resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= @@ -6523,13 +6214,6 @@ graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.6 resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.4.tgz#2256bde14d3632958c465ebc96dc467ca07a29fb" integrity sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw== -graphlib@^2.1.8: - version "2.1.8" - resolved "https://registry.yarnpkg.com/graphlib/-/graphlib-2.1.8.tgz#5761d414737870084c92ec7b5dbcb0592c9d35da" - integrity sha512-jcLLfkpoVGmH7/InMC/1hIvOPSUh38oJtGhvrOFGzioE1DZ+0YW16RgmOJhHiuWTvGiJQ9Z1Ik43JvkRPRvE+A== - dependencies: - lodash "^4.17.15" - growl@1.10.5: version "1.10.5" resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.5.tgz#f2735dc2283674fa67478b10181059355c369e5e" @@ -6775,21 +6459,21 @@ http-proxy-agent@^2.1.0: agent-base "4" debug "3.1.0" -http-proxy-middleware@1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-1.0.3.tgz#f73daad8dac622d51fe1769960c914b9b1f75a72" - integrity sha512-GHvPeBD+A357zS5tHjzj6ISrVOjjCiy0I92bdyTJz0pNmIjFxO0NX/bX+xkGgnclKQE/5hHAB9JEQ7u9Pw4olg== +http-proxy-middleware@1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-1.0.4.tgz#425ea177986a0cda34f9c81ec961c719adb6c2a9" + integrity sha512-8wiqujNWlsZNbeTSSWMLUl/u70xbJ5VYRwPR8RcAbvsNxzAZbgwLzRvT96btbm3fAitZUmo5i8LY6WKGyHDgvA== dependencies: - "@types/http-proxy" "^1.17.3" - http-proxy "^1.18.0" + "@types/http-proxy" "^1.17.4" + http-proxy "^1.18.1" is-glob "^4.0.1" lodash "^4.17.15" micromatch "^4.0.2" -http-proxy@^1.18.0: - version "1.18.0" - resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.18.0.tgz#dbe55f63e75a347db7f3d99974f2692a314a6a3a" - integrity sha512-84I2iJM/n1d4Hdgc6y2+qY5mDaz2PUVjlg9znE9byl+q0uC3DeByqBGReQu5tpLK0TAqTIXScRUV+dg7+bUPpQ== +http-proxy@^1.18.1: + version "1.18.1" + resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.18.1.tgz#401541f0534884bbf95260334e72f88ee3976549" + integrity sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ== dependencies: eventemitter3 "^4.0.0" follow-redirects "^1.0.0" @@ -6853,38 +6537,38 @@ husky@4.2.5: slash "^3.0.0" which-pm-runs "^1.0.0" -i18next-browser-languagedetector@4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/i18next-browser-languagedetector/-/i18next-browser-languagedetector-4.1.1.tgz#89656cd9b78bb92dc0c7e86c0d9606f3f15fabfa" - integrity sha512-akv0zurR/2KU7s1qaWkirY9FEEOT1TNsQaezEg8+1BLLQre7vylqb7tYoUgYqP/0/BEzXJgnoQnj+sh5xYFMhg== +i18next-browser-languagedetector@4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/i18next-browser-languagedetector/-/i18next-browser-languagedetector-4.2.0.tgz#82e35d31f88a1d7c2b6d5913bf8c8481cd40aafb" + integrity sha512-qRSCBWgDUSqVQb3sTxkDC+ImYLhF+wB387Y1RpOcJvyex+V3abi+W83n4Awy+dx719AOBbKTy97FjrUGrAhbyw== dependencies: "@babel/runtime" "^7.5.5" -i18next-fs-backend@1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/i18next-fs-backend/-/i18next-fs-backend-1.0.2.tgz#8f137a59a7088c97d5112d6d624ebb8c5675fac9" - integrity sha512-7N7V6gu00/UBdCjz37JFKGB5UfuLDJlIoLMAxXQG5ih3CFKPqtJ7GfQ7lqd8t/L/6EQ1sciZ9022Xy5BwTrf7g== +i18next-fs-backend@1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/i18next-fs-backend/-/i18next-fs-backend-1.0.4.tgz#527284da794a8896969ccfd98dba968b02bbdf0c" + integrity sha512-U5NhyDsdJnxRhvyjLjF03Yu9xgaI2Vw6+77yJTrLPiNTRAYSAslA3AS1fOrTRZTyKED1ylZGRvvIGm694N+/xg== -i18next-http-backend@1.0.8: - version "1.0.8" - resolved "https://registry.yarnpkg.com/i18next-http-backend/-/i18next-http-backend-1.0.8.tgz#6d32c4baacf363a26dfef76a973bfc3dd8d9b25d" - integrity sha512-f9oWlt3AuMh+xuKVpG8qlP6azI40N+wRBK/2jaJ7tHk6vMJSAGtZSM76Zx7p2JULqj27yNnc2yOEN4moq3RuFg== +i18next-http-backend@1.0.15: + version "1.0.15" + resolved "https://registry.yarnpkg.com/i18next-http-backend/-/i18next-http-backend-1.0.15.tgz#e1be47f29ec211b2064169c497b8dc55aab66924" + integrity sha512-AOGNcB47n0S4GANyVhGUeLRzUUgvh6Lf5vNs/+G3cCP2Mri0OseO2rX0VLHE6PcL21mswW7ka1nmjGKviXKnjQ== dependencies: node-fetch "2.6.0" -i18next-http-middleware@1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/i18next-http-middleware/-/i18next-http-middleware-1.0.4.tgz#80d4edc8343e9db645303ef878a9ea711aaaf593" - integrity sha512-ioH4LdcCeY6b5pIJppiw86ZjYnV2SnotZkDbGrLybnbc7qTBru4EnXXgMc7bWBjnIfhslW5usHddn6nojeIphw== +i18next-http-middleware@2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/i18next-http-middleware/-/i18next-http-middleware-2.1.0.tgz#0396971fe6d9fcf82c109f6ee72268d89776d7d1" + integrity sha512-OgtOFDjsIL9R4eXWrpge9TCMc52L2kL1d/9HsErPA14DyqfHc7NVCampFIuqTfpmc4janwcG66r5r5p9zdrS+Q== -i18next@19.4.4: - version "19.4.4" - resolved "https://registry.yarnpkg.com/i18next/-/i18next-19.4.4.tgz#c0a18bc2f2be554da636e67bfbf5200c7948b60d" - integrity sha512-ofaHtdsDdX3A5nYur1HWblB7J4hIcjr2ACdnwTAJgc8hTfPbyzZfGX0hVkKpI3vzDIgO6Uzc4v1ffW2W6gG6zw== +i18next@19.4.5: + version "19.4.5" + resolved "https://registry.yarnpkg.com/i18next/-/i18next-19.4.5.tgz#f9ea8bbb48d1ec66bc3436f0bb74a16b11821e11" + integrity sha512-aLvSsURoupi3x9IndmV6+m3IGhzLzhYv7Gw+//K3ovdliyGcFRV0I1MuddI0Bk/zR7BG1U+kJOjeHFUcUIdEgg== dependencies: "@babel/runtime" "^7.3.1" -iconv-lite@0.4, iconv-lite@0.4.24, iconv-lite@^0.4.24, iconv-lite@^0.4.4, iconv-lite@~0.4.13: +iconv-lite@0.4.24, iconv-lite@^0.4.24, iconv-lite@^0.4.4, iconv-lite@~0.4.13: version "0.4.24" resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== @@ -7080,7 +6764,7 @@ interpret@1.2.0, interpret@^1.0.0: resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.2.0.tgz#d5061a6224be58e8083985f5014d844359576296" integrity sha512-mT34yGKMNceBQUoVn7iCDKDntA7SC6gycMAWzGx1z/CMCTV7b2AAtXlo3nRyHZ1FelRkQbQjprHSYGwzLtkVbw== -invariant@^2.2.2: +invariant@^2.2.2, invariant@^2.2.4: version "2.2.4" resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" integrity sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA== @@ -7488,11 +7172,6 @@ jest-worker@24.9.0: merge-stream "^2.0.0" supports-color "^6.1.0" -js-levenshtein@^1.1.3: - version "1.1.6" - resolved "https://registry.yarnpkg.com/js-levenshtein/-/js-levenshtein-1.1.6.tgz#c6cee58eb3550372df8deb85fad5ce66ce01d59d" - integrity sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g== - "js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" @@ -7503,7 +7182,7 @@ js-tokens@^3.0.2: resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= -js-yaml@3.13.1, js-yaml@^3.13.1: +js-yaml@3.13.1: version "3.13.1" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.13.1.tgz#aff151b30bfdfa8e49e05da22e7415e9dfa37847" integrity sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw== @@ -7511,6 +7190,14 @@ js-yaml@3.13.1, js-yaml@^3.13.1: argparse "^1.0.7" esprima "^4.0.0" +js-yaml@^3.13.1: + version "3.14.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.0.tgz#a7a34170f26a21bb162424d8adacb4113a69e482" + integrity sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + jsbn@~0.1.0: version "0.1.1" resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" @@ -7626,7 +7313,7 @@ kind-of@^5.0.0: resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-5.1.0.tgz#729c91e2d857b7a419a1f9aa65685c4c33f5845d" integrity sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw== -kind-of@^6.0.0, kind-of@^6.0.2: +kind-of@^6.0.0, kind-of@^6.0.2, kind-of@^6.0.3: version "6.0.3" resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw== @@ -7650,31 +7337,51 @@ lcid@^2.0.0: dependencies: invert-kv "^2.0.0" -lerna@3.20.2: - version "3.20.2" - resolved "https://registry.yarnpkg.com/lerna/-/lerna-3.20.2.tgz#abf84e73055fe84ee21b46e64baf37b496c24864" - integrity sha512-bjdL7hPLpU3Y8CBnw/1ys3ynQMUjiK6l9iDWnEGwFtDy48Xh5JboR9ZJwmKGCz9A/sarVVIGwf1tlRNKUG9etA== +lerna@3.22.0: + version "3.22.0" + resolved "https://registry.yarnpkg.com/lerna/-/lerna-3.22.0.tgz#da14d08f183ffe6eec566a4ef3f0e11afa621183" + integrity sha512-xWlHdAStcqK/IjKvjsSMHPZjPkBV1lS60PmsIeObU8rLljTepc4Sg/hncw4HWfQxPIewHAUTqhrxPIsqf9L2Eg== dependencies: - "@lerna/add" "3.20.0" - "@lerna/bootstrap" "3.20.0" - "@lerna/changed" "3.20.0" - "@lerna/clean" "3.20.0" + "@lerna/add" "3.21.0" + "@lerna/bootstrap" "3.21.0" + "@lerna/changed" "3.21.0" + "@lerna/clean" "3.21.0" "@lerna/cli" "3.18.5" - "@lerna/create" "3.18.5" - "@lerna/diff" "3.18.5" - "@lerna/exec" "3.20.0" - "@lerna/import" "3.18.5" - "@lerna/info" "3.20.0" - "@lerna/init" "3.18.5" - "@lerna/link" "3.18.5" - "@lerna/list" "3.20.0" - "@lerna/publish" "3.20.2" - "@lerna/run" "3.20.0" - "@lerna/version" "3.20.2" + "@lerna/create" "3.22.0" + "@lerna/diff" "3.21.0" + "@lerna/exec" "3.21.0" + "@lerna/import" "3.22.0" + "@lerna/info" "3.21.0" + "@lerna/init" "3.21.0" + "@lerna/link" "3.21.0" + "@lerna/list" "3.21.0" + "@lerna/publish" "3.22.0" + "@lerna/run" "3.21.0" + "@lerna/version" "3.22.0" import-local "^2.0.0" npmlog "^4.1.2" -levn@^0.3.0, levn@~0.3.0: +leven@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2" + integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A== + +levenary@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/levenary/-/levenary-1.1.1.tgz#842a9ee98d2075aa7faeedbe32679e9205f46f77" + integrity sha512-mkAdOIt79FD6irqjYSs4rdbnlT5vRonMEvBVPVb3XmevfS8kgRXwfes0dhPdEtzTWD/1eNE/Bm/G1iRt6DcnQQ== + dependencies: + leven "^3.1.0" + +levn@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.4.1.tgz#ae4562c007473b932a6200d403268dd2fffc6ade" + integrity sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ== + dependencies: + prelude-ls "^1.2.1" + type-check "~0.4.0" + +levn@~0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" integrity sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4= @@ -7687,42 +7394,43 @@ lines-and-columns@^1.1.6: resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.1.6.tgz#1c00c743b433cd0a4e80758f7b64a57440d9ff00" integrity sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA= -lint-staged@10.2.2: - version "10.2.2" - resolved "https://registry.yarnpkg.com/lint-staged/-/lint-staged-10.2.2.tgz#901403c120eb5d9443a0358b55038b04c8a7db9b" - integrity sha512-78kNqNdDeKrnqWsexAmkOU3Z5wi+1CsQmUmfCuYgMTE8E4rAIX8RHW7xgxwAZ+LAayb7Cca4uYX4P3LlevzjVg== +lint-staged@10.2.8: + version "10.2.8" + resolved "https://registry.yarnpkg.com/lint-staged/-/lint-staged-10.2.8.tgz#fcc76158c9d17f5f6238678dc2f4fe98535b2f68" + integrity sha512-36VUVhZuTJUG0yuSv66o+/Cep9Uwp+od6VkjNxQjHKWvHClVD0SjAZx++4H3zgdr6DxPoUl1y/PVygfPhzAXQQ== dependencies: chalk "^4.0.0" - commander "^5.0.0" + cli-truncate "2.1.0" + commander "^5.1.0" cosmiconfig "^6.0.0" debug "^4.1.1" dedent "^0.7.0" - execa "^4.0.0" - listr2 "1.3.8" - log-symbols "^3.0.0" + execa "^4.0.1" + listr2 "^2.0.2" + log-symbols "^4.0.0" micromatch "^4.0.2" normalize-path "^3.0.0" please-upgrade-node "^3.2.0" string-argv "0.3.1" stringify-object "^3.3.0" -listr2@1.3.8: - version "1.3.8" - resolved "https://registry.yarnpkg.com/listr2/-/listr2-1.3.8.tgz#30924d79de1e936d8c40af54b6465cb814a9c828" - integrity sha512-iRDRVTgSDz44tBeBBg/35TQz4W+EZBWsDUq7hPpqeUHm7yLPNll0rkwW3lIX9cPAK7l+x95mGWLpxjqxftNfZA== +listr2@^2.0.2: + version "2.0.4" + resolved "https://registry.yarnpkg.com/listr2/-/listr2-2.0.4.tgz#b39100b0a227ec5659dcf76ddc516211fc168d61" + integrity sha512-oJaAcplPsa72rKW0eg4P4LbEJjhH+UO2I8uqR/I2wzHrVg16ohSfUy0SlcHS21zfYXxtsUpL8YXGHjyfWMR0cg== dependencies: "@samverschueren/stream-to-observable" "^0.3.0" - chalk "^3.0.0" + chalk "^4.0.0" cli-cursor "^3.1.0" cli-truncate "^2.1.0" elegant-spinner "^2.0.0" - enquirer "^2.3.4" + enquirer "^2.3.5" figures "^3.2.0" indent-string "^4.0.0" log-update "^4.0.0" p-map "^4.0.0" pad "^3.2.0" - rxjs "^6.3.3" + rxjs "^6.5.5" through "^2.3.8" uuid "^7.0.2" @@ -7885,6 +7593,13 @@ log-symbols@3.0.0, log-symbols@^3.0.0: dependencies: chalk "^2.4.2" +log-symbols@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.0.0.tgz#69b3cc46d20f448eccdb75ea1fa733d9e821c920" + integrity sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA== + dependencies: + chalk "^4.0.0" + log-update@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/log-update/-/log-update-4.0.0.tgz#589ecd352471f2a1c0c570287543a64dfd20e0a1" @@ -7925,7 +7640,7 @@ lowercase-keys@^2.0.0: resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-2.0.0.tgz#2603e78b7b4b0006cbca2fbcc8a3202558ac9479" integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA== -lru-cache@^5.1.1: +lru-cache@5.1.1, lru-cache@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== @@ -8252,18 +7967,40 @@ minimist-options@^3.0.1: is-plain-obj "^1.1.0" minimist-options@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/minimist-options/-/minimist-options-4.0.2.tgz#29c4021373ded40d546186725e57761e4b1984a7" - integrity sha512-seq4hpWkYSUh1y7NXxzucwAN9yVlBc3Upgdjz8vLCP97jG8kaOmzYrVH/m7tQ1NYD1wdtZbSLfdy4zFmRWuc/w== + version "4.1.0" + resolved "https://registry.yarnpkg.com/minimist-options/-/minimist-options-4.1.0.tgz#c0655713c53a8a2ebd77ffa247d342c40f010619" + integrity sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A== dependencies: arrify "^1.0.1" is-plain-obj "^1.1.0" + kind-of "^6.0.3" minimist@^1.1.3, minimist@^1.2.0, minimist@^1.2.5: version "1.2.5" resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602" integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw== +minipass-collect@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/minipass-collect/-/minipass-collect-1.0.2.tgz#22b813bf745dc6edba2576b940022ad6edc8c617" + integrity sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA== + dependencies: + minipass "^3.0.0" + +minipass-flush@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/minipass-flush/-/minipass-flush-1.0.5.tgz#82e7135d7e89a50ffe64610a787953c4c4cbb373" + integrity sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw== + dependencies: + minipass "^3.0.0" + +minipass-pipeline@^1.2.2: + version "1.2.3" + resolved "https://registry.yarnpkg.com/minipass-pipeline/-/minipass-pipeline-1.2.3.tgz#55f7839307d74859d6e8ada9c3ebe72cec216a34" + integrity sha512-cFOknTvng5vqnwOpDsZTWhNll6Jf8o2x+/diplafmxpuIymAjzoOolZG0VvQf3V2HgqzJNhnuKHYp2BqDgz8IQ== + dependencies: + minipass "^3.0.0" + minipass@^2.3.5, minipass@^2.6.0, minipass@^2.8.6, minipass@^2.9.0: version "2.9.0" resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.9.0.tgz#e713762e7d3e32fed803115cf93e04bca9fcc9a6" @@ -8272,7 +8009,7 @@ minipass@^2.3.5, minipass@^2.6.0, minipass@^2.8.6, minipass@^2.9.0: safe-buffer "^5.1.2" yallist "^3.0.0" -minipass@^3.0.0: +minipass@^3.0.0, minipass@^3.1.1: version "3.1.3" resolved "https://registry.yarnpkg.com/minipass/-/minipass-3.1.3.tgz#7d42ff1f39635482e15f9cdb53184deebd5815fd" integrity sha512-Mgd2GdMVzY+x3IJ+oHnVM+KG3lA5c8tnabyJKmHSaG2kAGpudxuOf8ToDkhumF7UzME7DecbQE9uOZhNm7PuJg== @@ -8385,16 +8122,16 @@ module-details-from-path@^1.0.3: integrity sha1-EUyUlnPiqKNenTV4hSeqN7Z52is= moment-timezone@^0.5.x: - version "0.5.28" - resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.5.28.tgz#f093d789d091ed7b055d82aa81a82467f72e4338" - integrity sha512-TDJkZvAyKIVWg5EtVqRzU97w0Rb0YVbfpqyjgu6GwXCAohVRqwZjf4fOzDE6p1Ch98Sro/8hQQi65WDXW5STPw== + version "0.5.31" + resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.5.31.tgz#9c40d8c5026f0c7ab46eda3d63e49c155148de05" + integrity sha512-+GgHNg8xRhMXfEbv81iDtrVeTcWt0kWmTEY1XQK14dICTXnWJnT0dxdlPspwqF3keKMVPXwayEsk1DI0AA/jdA== dependencies: moment ">= 2.9.0" -moment@2.25.3, "moment@>= 2.9.0": - version "2.25.3" - resolved "https://registry.yarnpkg.com/moment/-/moment-2.25.3.tgz#252ff41319cf41e47761a1a88cab30edfe9808c0" - integrity sha512-PuYv0PHxZvzc15Sp8ybUCoQ+xpyPWvjOuK72a5ovzp2LI32rJXOiIfyoFoYvG3s6EwwrdkMyWuRiEHSZRLJNdg== +moment@2.26.0, "moment@>= 2.9.0": + version "2.26.0" + resolved "https://registry.yarnpkg.com/moment/-/moment-2.26.0.tgz#5e1f82c6bafca6e83e808b30c8705eed0dcbd39a" + integrity sha512-oIixUO+OamkUkwjhAVE18rAMfRJNsNe/Stid/gwHSOfHrOtw9EhAY2AHvdKZ/k/MggcYELFCJz/Sn2pL8b8JMw== move-concurrently@^1.0.1: version "1.0.1" @@ -8457,10 +8194,10 @@ nan@^2.12.1: resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.1.tgz#d7be34dfa3105b91494c3147089315eff8874b01" integrity sha512-isWHgVjnFjh2x2yuJ/tj3JbwoHu3UC2dX5G/88Cm24yB6YopVgxvBObDY7n5xW6ExmFhJpSEQqFPvq9zaXc8Jw== -nanoid@3.1.5: - version "3.1.5" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.1.5.tgz#56da1bb76b619391fc61625e8b4e4bff309b9942" - integrity sha512-77yYm8wPy8igTpUQv9fA0VzEb5Ohxt5naC3zTK1oAb+u1MiyITtx0jpYrYRFfgJlefwJy2SkCaojZvxSYq6toA== +nanoid@3.1.9: + version "3.1.9" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.1.9.tgz#1f148669c70bb2072dc5af0666e46edb6cd31fb2" + integrity sha512-fFiXlFo4Wkuei3i6w9SQI6yuzGRTGi8Z2zZKZpUxv/bQlBi4jtbVPBSNFZHQA9PNjofWqtIa8p+pnsc0kgZrhQ== nanomatch@^1.2.9: version "1.2.13" @@ -8505,7 +8242,7 @@ negotiator@0.6.2: resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb" integrity sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw== -neo-async@^2.5.0, neo-async@^2.6.0, neo-async@^2.6.1: +neo-async@2.6.1, neo-async@^2.5.0, neo-async@^2.6.0, neo-async@^2.6.1: version "2.6.1" resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.1.tgz#ac27ada66167fa8849a6addd837f6b189ad2081c" integrity sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw== @@ -8520,32 +8257,37 @@ next-tick@~1.0.0: resolved "https://registry.yarnpkg.com/next-tick/-/next-tick-1.0.0.tgz#ca86d1fe8828169b0120208e3dc8424b9db8342c" integrity sha1-yobR/ogoFpsBICCOPchCS524NCw= -next@9.3.6: - version "9.3.6" - resolved "https://registry.yarnpkg.com/next/-/next-9.3.6.tgz#2bfd76f835606db3840adf07b51b052514ef3709" - integrity sha512-b46i+qw2SYMVTOObc7GqUizt1zH1fB3T5qcOiCpl9utD7/LcWZkSxSPe7ZJtpAdFnl8V/7A58ADSvU1oxDDWug== +next@9.4.4: + version "9.4.4" + resolved "https://registry.yarnpkg.com/next/-/next-9.4.4.tgz#02ad9fea7f7016b6b42fc83b67835e4a0dd0c99a" + integrity sha512-ZT8bU2SAv5jkFQ+y8py+Rl5RJRJ6DnZDS+VUnB1cIscmtmUhDi7LYED7pYm4MCKkYhPbEEM1Lbpo7fnoZJGWNQ== dependencies: - "@ampproject/toolbox-optimizer" "2.2.0" - "@babel/core" "7.7.2" - "@babel/plugin-proposal-class-properties" "7.7.0" - "@babel/plugin-proposal-nullish-coalescing-operator" "7.7.4" + "@ampproject/toolbox-optimizer" "2.4.0" + "@babel/code-frame" "7.8.3" + "@babel/core" "7.7.7" + "@babel/plugin-proposal-class-properties" "7.8.3" + "@babel/plugin-proposal-nullish-coalescing-operator" "7.8.3" "@babel/plugin-proposal-numeric-separator" "7.8.3" - "@babel/plugin-proposal-object-rest-spread" "7.6.2" - "@babel/plugin-proposal-optional-chaining" "7.7.4" + "@babel/plugin-proposal-object-rest-spread" "7.9.6" + "@babel/plugin-proposal-optional-chaining" "7.9.0" "@babel/plugin-syntax-bigint" "7.8.3" - "@babel/plugin-syntax-dynamic-import" "7.2.0" - "@babel/plugin-transform-modules-commonjs" "7.7.0" - "@babel/plugin-transform-runtime" "7.6.2" - "@babel/preset-env" "7.7.1" - "@babel/preset-modules" "0.1.1" - "@babel/preset-react" "7.7.0" - "@babel/preset-typescript" "7.7.2" - "@babel/runtime" "7.7.2" - "@next/react-refresh-utils" "9.3.6" + "@babel/plugin-syntax-dynamic-import" "7.8.3" + "@babel/plugin-transform-modules-commonjs" "7.9.6" + "@babel/plugin-transform-runtime" "7.9.6" + "@babel/preset-env" "7.9.6" + "@babel/preset-modules" "0.1.3" + "@babel/preset-react" "7.9.4" + "@babel/preset-typescript" "7.9.0" + "@babel/runtime" "7.9.6" + "@babel/types" "7.9.6" + "@next/react-dev-overlay" "9.4.4" + "@next/react-refresh-utils" "9.4.4" babel-plugin-syntax-jsx "6.18.0" babel-plugin-transform-define "2.0.0" babel-plugin-transform-react-remove-prop-types "0.4.24" - browserslist "4.8.3" + browserslist "4.12.0" + cacache "13.0.1" + chokidar "2.1.8" css-loader "3.5.3" find-cache-dir "3.3.1" fork-ts-checker-webpack-plugin "3.1.1" @@ -8554,18 +8296,21 @@ next@9.3.6: mini-css-extract-plugin "0.8.0" mkdirp "0.5.3" native-url "0.3.1" - pnp-webpack-plugin "1.5.0" - postcss "7.0.27" + neo-async "2.6.1" + pnp-webpack-plugin "1.6.4" + postcss "7.0.29" prop-types "15.7.2" prop-types-exact "1.2.0" - react-is "16.8.6" - react-refresh "0.8.1" + react-is "16.13.1" + react-refresh "0.8.3" resolve-url-loader "3.1.1" sass-loader "8.0.2" - style-loader "1.2.0" - styled-jsx "3.2.5" - use-subscription "1.1.1" + schema-utils "2.6.6" + style-loader "1.2.1" + styled-jsx "3.3.0" + use-subscription "1.4.1" watchpack "2.0.0-beta.13" + web-vitals "0.2.1" webpack "4.43.0" webpack-sources "1.4.3" @@ -8642,15 +8387,15 @@ node-libs-browser@^2.2.1: util "^0.11.0" vm-browserify "^1.0.1" -node-releases@^1.1.44, node-releases@^1.1.53: - version "1.1.55" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.55.tgz#8af23b7c561d8e2e6e36a46637bab84633b07cee" - integrity sha512-H3R3YR/8TjT5WPin/wOoHOUPHgvj8leuU/Keta/rwelEQN9pA/S2Dx8/se4pZ2LBxSd0nAGzsNzhqwa77v7F1w== +node-releases@^1.1.53: + version "1.1.56" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.56.tgz#bc054a417d316e3adac90eafb7e1932802f28705" + integrity sha512-EVo605FhWLygH8a64TjgpjyHYOihkxECwX1bHHr8tETJKWEiWS2YJjPbvsX2jFjnjTNEgBCmk9mLjKG1Mf11cw== -nodemon@2.0.3: - version "2.0.3" - resolved "https://registry.yarnpkg.com/nodemon/-/nodemon-2.0.3.tgz#e9c64df8740ceaef1cb00e1f3da57c0a93ef3714" - integrity sha512-lLQLPS90Lqwc99IHe0U94rDgvjo+G9I4uEIxRG3evSLROcqQ9hwc0AxlSHKS4T1JW/IMj/7N5mthiN58NL/5kw== +nodemon@2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/nodemon/-/nodemon-2.0.4.tgz#55b09319eb488d6394aa9818148c0c2d1c04c416" + integrity sha512-Ltced+hIfTmaS28Zjv1BM552oQ3dbwPqI4+zI0SLgq+wpJhSyqgYude/aZa/3i31VCQWMfXJVxvu86abcam3uQ== dependencies: chokidar "^3.2.2" debug "^3.2.6" @@ -8880,13 +8625,12 @@ object.assign@4.1.0, object.assign@^4.1.0: object-keys "^1.0.11" object.entries@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/object.entries/-/object.entries-1.1.1.tgz#ee1cf04153de02bb093fec33683900f57ce5399b" - integrity sha512-ilqR7BgdyZetJutmDPfXCDffGa0/Yzl2ivVNpbx/g4UeWrCdRnFDUBrKJGLhGieRHDATnyZXWBeCb29k9CJysQ== + version "1.1.2" + resolved "https://registry.yarnpkg.com/object.entries/-/object.entries-1.1.2.tgz#bc73f00acb6b6bb16c203434b10f9a7e797d3add" + integrity sha512-BQdB9qKmb/HyNdMNWVr7O3+z5MUIx3aiegEIJqjMBbBf0YT9RRxTJSim4mzFqtyr7PDAHigq0N9dO0m0tRakQA== dependencies: define-properties "^1.1.3" - es-abstract "^1.17.0-next.1" - function-bind "^1.1.1" + es-abstract "^1.17.5" has "^1.0.3" object.fromentries@^2.0.2: @@ -8957,10 +8701,10 @@ onetime@^5.1.0: dependencies: mimic-fn "^2.1.0" -open@7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/open/-/open-7.0.3.tgz#db551a1af9c7ab4c7af664139930826138531c48" - integrity sha512-sP2ru2v0P290WFfv49Ap8MF6PkzGNnGlAwHweB4WR4mr5d2d0woiCluUeJ218w7/+PmoBy9JmYgD5A4mLcWOFA== +open@7.0.4: + version "7.0.4" + resolved "https://registry.yarnpkg.com/open/-/open-7.0.4.tgz#c28a9d315e5c98340bf979fdcb2e58664aa10d83" + integrity sha512-brSA+/yq+b08Hsr4c8fsEW2CRzk1BmfN3SAK/5VCHQ9bdoZJ4qa/+AfR0xHjlbbZUyPkUHs1b8x1RqdyZdkVqQ== dependencies: is-docker "^2.0.0" is-wsl "^2.1.1" @@ -8970,7 +8714,7 @@ opencollective-postinstall@^2.0.2: resolved "https://registry.yarnpkg.com/opencollective-postinstall/-/opencollective-postinstall-2.0.2.tgz#5657f1bede69b6e33a45939b061eb53d3c6c3a89" integrity sha512-pVOEP16TrAO2/fjej1IdOyupJY8KDUM1CvsaScRbw6oddvpQoOfGk4ywha0HKKVAD6RkW4x6Q+tNBwhf3Bgpuw== -optionator@^0.8.1, optionator@^0.8.3: +optionator@^0.8.1: version "0.8.3" resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.3.tgz#84fa1d036fe9d3c7e21d99884b601167ec8fb495" integrity sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA== @@ -8982,6 +8726,18 @@ optionator@^0.8.1, optionator@^0.8.3: type-check "~0.3.2" word-wrap "~1.2.3" +optionator@^0.9.1: + version "0.9.1" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.1.tgz#4f236a6373dae0566a6d43e1326674f50c291499" + integrity sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw== + dependencies: + deep-is "^0.1.3" + fast-levenshtein "^2.0.6" + levn "^0.4.1" + prelude-ls "^1.2.1" + type-check "^0.4.0" + word-wrap "^1.2.3" + ora@4.0.4: version "4.0.4" resolved "https://registry.yarnpkg.com/ora/-/ora-4.0.4.tgz#e8da697cc5b6a47266655bf68e0fb588d29a545d" @@ -9103,6 +8859,13 @@ p-map@^2.1.0: resolved "https://registry.yarnpkg.com/p-map/-/p-map-2.1.0.tgz#310928feef9c9ecc65b68b17693018a665cea175" integrity sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw== +p-map@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-map/-/p-map-3.0.0.tgz#d704d9af8a2ba684e2600d9a215983d4141a979d" + integrity sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ== + dependencies: + aggregate-error "^3.0.0" + p-map@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/p-map/-/p-map-4.0.0.tgz#bb2f95a5eda2ec168ec9274e06a747c3e2904d2b" @@ -9446,6 +9209,11 @@ pkg-up@^2.0.0: dependencies: find-up "^2.1.0" +platform@1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/platform/-/platform-1.3.3.tgz#646c77011899870b6a0903e75e997e8e51da7461" + integrity sha1-ZGx3ARiZhwtqCQPnXpl+jlHadGE= + please-upgrade-node@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/please-upgrade-node/-/please-upgrade-node-3.2.0.tgz#aeddd3f994c933e4ad98b99d9a556efa0e2fe942" @@ -9523,17 +9291,17 @@ pm2@4.4.0: optionalDependencies: systeminformation "^4.23.3" -pnp-webpack-plugin@1.5.0: - version "1.5.0" - resolved "https://registry.yarnpkg.com/pnp-webpack-plugin/-/pnp-webpack-plugin-1.5.0.tgz#62a1cd3068f46d564bb33c56eb250e4d586676eb" - integrity sha512-jd9olUr9D7do+RN8Wspzhpxhgp1n6Vd0NtQ4SFkmIACZoEL1nkyAdW9Ygrinjec0vgDcWjscFQQ1gDW8rsfKTg== +pnp-webpack-plugin@1.6.4: + version "1.6.4" + resolved "https://registry.yarnpkg.com/pnp-webpack-plugin/-/pnp-webpack-plugin-1.6.4.tgz#c9711ac4dc48a685dabafc86f8b6dd9f8df84149" + integrity sha512-7Wjy+9E3WwLOEL30D+m8TSTF7qJJUJLONBnwQp0518siuMxUQUbgZwssaFX+QKlZkjHZcw/IpZCt/H0srrntSg== dependencies: - ts-pnp "^1.1.2" + ts-pnp "^1.1.6" -polished@3.6.2: - version "3.6.2" - resolved "https://registry.yarnpkg.com/polished/-/polished-3.6.2.tgz#bae682806da6400596a851ea443178ff6eec1dd2" - integrity sha512-V0dyaVJUM5e5wIhLHvZyyE9PhXoI0AlGT6RDww1V/v+STsocLsVGWmi+9OKTL49oaQj85XFDvsWi/uHQJ0rpWg== +polished@3.6.4: + version "3.6.4" + resolved "https://registry.yarnpkg.com/polished/-/polished-3.6.4.tgz#cec6bc0fbffc5d6ce5799c85bcc1bca5e63f1dee" + integrity sha512-21moJXCm/7EvjeKQz5w89QDDKNPCoimc83CqwZZGJluFdMXsFlMQl9lPA/OMRkoceZ19kU0anKlMgZmY7LJSJw== dependencies: "@babel/runtime" "^7.9.2" @@ -9866,10 +9634,10 @@ postcss@7.0.21: source-map "^0.6.1" supports-color "^6.1.0" -postcss@7.0.27: - version "7.0.27" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-7.0.27.tgz#cc67cdc6b0daa375105b7c424a85567345fc54d9" - integrity sha512-WuQETPMcW9Uf1/22HWUWP9lgsIC+KEHg2kozMflKjbeUtw9ujvFX6QmIfozaErDkmLWS9WEnEdEe6Uo9/BNTdQ== +postcss@7.0.29: + version "7.0.29" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-7.0.29.tgz#d3a903872bd52280b83bce38cdc83ce55c06129e" + integrity sha512-ba0ApvR3LxGvRMMiUa9n0WR4HjzcYm7tS+ht4/2Nd0NLtHpPIH77fuB9Xh1/yJVz9O/E/95Y/dn8ygWsyffXtw== dependencies: chalk "^2.4.2" source-map "^0.6.1" @@ -9884,6 +9652,11 @@ postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.14, postcss@^7.0.16, postcss@^7.0.2 source-map "^0.6.1" supports-color "^6.1.0" +prelude-ls@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396" + integrity sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== + prelude-ls@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" @@ -10036,9 +9809,9 @@ psl@^1.1.28: integrity sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ== pstree.remy@^1.1.7: - version "1.1.7" - resolved "https://registry.yarnpkg.com/pstree.remy/-/pstree.remy-1.1.7.tgz#c76963a28047ed61542dc361aa26ee55a7fa15f3" - integrity sha512-xsMgrUwRpuGskEzBFkH8NmTimbZ5PcPup0LA8JJkHIm2IMUbQcpo3yeLNWVrufEYjh8YwtSVh0xz6UeWc5Oh5A== + version "1.1.8" + resolved "https://registry.yarnpkg.com/pstree.remy/-/pstree.remy-1.1.8.tgz#c242224f4a67c21f686839bbdb4ac282b8373d3a" + integrity sha512-77DZwxQmxKnu3aR542U+X8FypNzbfJ+C5XQDk3uWjWxn6151aIMGthWYRXTqT1E5oJvg+ljaa2OJi+VfvCOQ8w== public-encrypt@^4.0.0: version "4.0.3" @@ -10216,10 +9989,10 @@ react-hooks-worker@0.9.0: resolved "https://registry.yarnpkg.com/react-hooks-worker/-/react-hooks-worker-0.9.0.tgz#cf6e481711045d539368c83ba0fa42bd97c71a09" integrity sha512-aDKlrc9Dh8O0Wag2mWbNpuXbTB/kX1tGzq74bFdxSfKg6KHvF9ft789WpatmCBQbszdgXEi3pS/BCj698JXCJQ== -react-i18next@11.4.0: - version "11.4.0" - resolved "https://registry.yarnpkg.com/react-i18next/-/react-i18next-11.4.0.tgz#dde6bf3a695910af7a4270fea2e111bc331cf151" - integrity sha512-lyOZSSQkif4H9HnHN3iEKVkryLI+WkdZSEw3VAZzinZLopfYRMHVY5YxCopdkXPLEHs6S5GjKYPh3+j0j336Fg== +react-i18next@11.5.0: + version "11.5.0" + resolved "https://registry.yarnpkg.com/react-i18next/-/react-i18next-11.5.0.tgz#84a9bb535d44c0c1b336b94de164515c2cc2a714" + integrity sha512-V6rUT7MzYBdFCgUrhfr78FHRfnY3CFoR75ET9EP5Py5UPHKyaGiK1MvPx03TesLwsmIaVHlRFU/WLzqCedXevA== dependencies: "@babel/runtime" "^7.3.1" html-parse-stringify2 "2.0.1" @@ -10237,15 +10010,10 @@ react-is@16.13.1, react-is@^16.7.0, react-is@^16.8.1: resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== -react-is@16.8.6: - version "16.8.6" - resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.8.6.tgz#5bbc1e2d29141c9fbdfed456343fe2bc430a6a16" - integrity sha512-aUk3bHfZ2bRSVFFbbeVS4i+lNPZr3/WM5jT2J5omUVV1zzcs1nAaf3l51ctA5FFvCRbhrH0bdAsRRQddFJZPtA== - -react-refresh@0.8.1: - version "0.8.1" - resolved "https://registry.yarnpkg.com/react-refresh/-/react-refresh-0.8.1.tgz#5500506ad6fc891fdd057d0bf3581f9310abc6a2" - integrity sha512-xZIKi49RtLUUSAZ4a4ut2xr+zr4+glOD5v0L413B55MPvlg4EQ6Ctx8PD4CmjlPGoAWmSCTmmkY59TErizNsow== +react-refresh@0.8.3: + version "0.8.3" + resolved "https://registry.yarnpkg.com/react-refresh/-/react-refresh-0.8.3.tgz#721d4657672d400c5e3c75d063c4a85fb2d5d68f" + integrity sha512-X8jZHc7nCMjaCqoU+V2I0cOhNW+QMBwSUkeXnTi8IPe6zaRWfn60ZzvFDZqWPfmSJfjub7dDW1SP0jaHWLu/hg== react-spinners@0.8.3: version "0.8.3" @@ -10472,7 +10240,7 @@ regenerate@^1.4.0: resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.0.tgz#4a856ec4b56e4077c557589cae85e7a4c8869a11" integrity sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg== -regenerator-runtime@^0.13.2, regenerator-runtime@^0.13.4: +regenerator-runtime@^0.13.4: version "0.13.5" resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.5.tgz#d878a1d094b4306d10b9096484b33ebd55e26697" integrity sha512-ZS5w8CpKFinUzOwW3c83oPeVXoNsrLsaCoLtJvAClH135j/R77RuymhiSErhm2lKcwSCIpmvIWSbDkIfAqKQlA== @@ -10506,12 +10274,7 @@ regexp.prototype.flags@^1.3.0: define-properties "^1.1.3" es-abstract "^1.17.0-next.1" -regexpp@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-2.0.1.tgz#8d19d31cf632482b589049f8281f93dbcba4d07f" - integrity sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw== - -regexpp@^3.0.0: +regexpp@^3.0.0, regexpp@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-3.1.0.tgz#206d0ad0a5648cffbdb8ae46438f3dc51c9f78e2" integrity sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q== @@ -10749,7 +10512,7 @@ rimraf@3.0.2, rimraf@^3.0.0: dependencies: glob "^7.1.3" -rimraf@^2.5.4, rimraf@^2.6.2, rimraf@^2.6.3: +rimraf@^2.5.4, rimraf@^2.6.2, rimraf@^2.6.3, rimraf@^2.7.1: version "2.7.1" resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.7.1.tgz#35797f13a7fdadc566142c29d4f07ccad483e3ec" integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w== @@ -10781,12 +10544,7 @@ run-series@^1.1.8: resolved "https://registry.yarnpkg.com/run-series/-/run-series-1.1.8.tgz#2c4558f49221e01cd6371ff4e0a1e203e460fc36" integrity sha512-+GztYEPRpIsQoCSraWHDBs9WVy4eVME16zhOtDB4H9J4xN0XRhknnmLOl+4gRgZtu8dpp9N/utSPjKH/xmDzXg== -rw@1: - version "1.3.3" - resolved "https://registry.yarnpkg.com/rw/-/rw-1.3.3.tgz#3f862dfa91ab766b14885ef4d01124bfda074fb4" - integrity sha1-P4Yt+pGrdmsUiF700BEkv9oHT7Q= - -rxjs@^6.3.3, rxjs@^6.4.0, rxjs@^6.5.3: +rxjs@^6.4.0, rxjs@^6.5.3, rxjs@^6.5.5: version "6.5.5" resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.5.5.tgz#c5c884e3094c8cfee31bf27eb87e54ccfc87f9ec" integrity sha512-WfQI+1gohdf0Dai/Bbmk5L5ItH5tYqm3ki2c5GdWhKjalzjg93N3avFjVStyZZz+A2Em+ZxKH5bNghw9UeylGQ== @@ -10844,6 +10602,14 @@ scheduler@^0.19.1: loose-envify "^1.1.0" object-assign "^4.1.1" +schema-utils@2.6.6, schema-utils@^2.6.1, schema-utils@^2.6.6: + version "2.6.6" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-2.6.6.tgz#299fe6bd4a3365dc23d99fd446caff8f1d6c330c" + integrity sha512-wHutF/WPSbIi9x6ctjGGk2Hvl0VOz5l3EKEuKbjPlB30mKZUzb9A5k9yEXRX3pwyqVLPvpfZZEllaFq/M718hA== + dependencies: + ajv "^6.12.0" + ajv-keywords "^3.4.1" + schema-utils@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-1.0.0.tgz#0b79a93204d7b600d4b2850d1f66c2a34951c770" @@ -10853,14 +10619,6 @@ schema-utils@^1.0.0: ajv-errors "^1.0.0" ajv-keywords "^3.1.0" -schema-utils@^2.6.1, schema-utils@^2.6.6: - version "2.6.6" - resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-2.6.6.tgz#299fe6bd4a3365dc23d99fd446caff8f1d6c330c" - integrity sha512-wHutF/WPSbIi9x6ctjGGk2Hvl0VOz5l3EKEuKbjPlB30mKZUzb9A5k9yEXRX3pwyqVLPvpfZZEllaFq/M718hA== - dependencies: - ajv "^6.12.0" - ajv-keywords "^3.4.1" - semver-compare@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/semver-compare/-/semver-compare-1.0.0.tgz#0dee216a1c941ab37e9efb1788f6afc5ff5537fc" @@ -10883,7 +10641,7 @@ semver-regex@^2.0.0: resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== -semver@6.3.0, semver@^6.0.0, semver@^6.1.2, semver@^6.2.0, semver@^6.3.0: +semver@6.3.0, semver@^6.0.0, semver@^6.2.0, semver@^6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== @@ -10893,7 +10651,7 @@ semver@7.0.0: resolved "https://registry.yarnpkg.com/semver/-/semver-7.0.0.tgz#5f3ca35761e47e05b206c6daff2cf814f0316b8e" integrity sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A== -semver@^7.2: +semver@^7.2, semver@^7.2.1, semver@^7.3.2: version "7.3.2" resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.2.tgz#604962b052b81ed0786aae84389ffba70ffd3938" integrity sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ== @@ -11006,6 +10764,11 @@ shebang-regex@^3.0.0: resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== +shell-quote@1.7.2: + version "1.7.2" + resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.7.2.tgz#67a7d02c76c9da24f99d20808fcaded0e0e04be2" + integrity sha512-mRz/m/JVscCrkMyPqHc/bczi3OQHkLTqXHEFu0zDhK/qfv3UcOA4SVmRCLmos4bhjr9ekVQubj/R7waKapmiQg== + shelljs@0.8.4: version "0.8.4" resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.8.4.tgz#de7684feeb767f8716b326078a8a00875890e3c2" @@ -11194,15 +10957,22 @@ source-map@0.7.3: resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.3.tgz#5302f8169031735226544092e64981f751750383" integrity sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ== +source-map@0.8.0-beta.0: + version "0.8.0-beta.0" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.8.0-beta.0.tgz#d4c1bb42c3f7ee925f005927ba10709e0d1d1f11" + integrity sha512-2ymg6oRBpebeZi9UUNsgQ89bhx01TcTkmNTGnNO88imTmbSgy4nfujrgVEFKWpMTEGA11EDkTt7mqObTPdigIA== + dependencies: + whatwg-url "^7.0.0" + source-map@^0.5.0, source-map@^0.5.6, source-map@^0.5.7: version "0.5.7" resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w= spdx-correct@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.0.tgz#fb83e504445268f154b074e218c87c003cd31df4" - integrity sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q== + version "3.1.1" + resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.1.tgz#dece81ac9c1e6713e5f7d1b6f17d468fa53d89a9" + integrity sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w== dependencies: spdx-expression-parse "^3.0.0" spdx-license-ids "^3.0.0" @@ -11283,11 +11053,26 @@ ssri@^6.0.0, ssri@^6.0.1: dependencies: figgy-pudding "^3.5.1" +ssri@^7.0.0: + version "7.1.0" + resolved "https://registry.yarnpkg.com/ssri/-/ssri-7.1.0.tgz#92c241bf6de82365b5c7fb4bd76e975522e1294d" + integrity sha512-77/WrDZUWocK0mvA5NTRQyveUf+wsrIc6vyrxpS8tVvYBcX215QbafrJR3KtkpskIzoFLqqNuuYQvxaMjXJ/0g== + dependencies: + figgy-pudding "^3.5.1" + minipass "^3.1.1" + stable@^0.1.8: version "0.1.8" resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf" integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w== +stacktrace-parser@0.1.10: + version "0.1.10" + resolved "https://registry.yarnpkg.com/stacktrace-parser/-/stacktrace-parser-0.1.10.tgz#29fb0cae4e0d0b85155879402857a1639eb6051a" + integrity sha512-KJP1OCML99+8fhOHxwwzyWrlUuVX5GQ0ZpJTd1DFXhdkrvg1szxfHhawXUZ3g9TkXORQd4/WG68jMlQZ2p8wlg== + dependencies: + type-fest "^0.7.1" + static-extend@^0.1.1: version "0.1.2" resolved "https://registry.yarnpkg.com/static-extend/-/static-extend-0.1.2.tgz#60809c39cbff55337226fd5e0b520f341f1fb5c6" @@ -11462,6 +11247,13 @@ stringify-object@^3.3.0: is-obj "^1.0.1" is-regexp "^1.0.0" +strip-ansi@6.0.0, strip-ansi@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.0.tgz#0b1571dd7669ccd4f3e06e14ef1eed26225ae532" + integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w== + dependencies: + ansi-regex "^5.0.0" + strip-ansi@^3.0.0, strip-ansi@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" @@ -11483,13 +11275,6 @@ strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0: dependencies: ansi-regex "^4.1.0" -strip-ansi@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.0.tgz#0b1571dd7669ccd4f3e06e14ef1eed26225ae532" - integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w== - dependencies: - ansi-regex "^5.0.0" - strip-bom@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-2.0.0.tgz#6219a85616520491f35788bdbf1447a99c7e6b0e" @@ -11536,7 +11321,7 @@ strip-json-comments@2.0.1, strip-json-comments@~2.0.1: resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= -strip-json-comments@^3.0.1: +strip-json-comments@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.0.tgz#7638d31422129ecf4457440009fba03f9f9ac180" integrity sha512-e6/d0eBu7gHtdCqFt0xJr642LdToM5/cN4Qb9DbHjVx1CP5RyeM+zH7pbecEmDv/lBqb0QH+6Uqq75rxFPkM0w== @@ -11550,18 +11335,18 @@ strong-log-transformer@^2.0.0: minimist "^1.2.0" through "^2.3.4" -style-loader@1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-1.2.0.tgz#f78e4d49caf5018f7c03ae1886e1270124feeb0a" - integrity sha512-HC8WcGnjwNrKji7HSBqFOhGNUSt7UDU/jHxT6bA83Gk+JWJBmgitWlGihc0V1w6ZvwlzcX5LJOsofZzSP7b1tQ== +style-loader@1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-1.2.1.tgz#c5cbbfbf1170d076cfdd86e0109c5bba114baa1a" + integrity sha512-ByHSTQvHLkWE9Ir5+lGbVOXhxX10fbprhLvdg96wedFZb4NDekDPxVKv5Fwmio+QcMlkkNfuK+5W1peQ5CUhZg== dependencies: loader-utils "^2.0.0" schema-utils "^2.6.6" -styled-components@5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/styled-components/-/styled-components-5.1.0.tgz#2e3985b54f461027e1c91af3229e1c2530872a4e" - integrity sha512-0Qs2wEkFBXHFlysz6CV831VG6HedcrFUwChjnWylNivsx14MtmqQsohi21rMHZxzuTba063dEyoe/SR6VGJI7Q== +styled-components@5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/styled-components/-/styled-components-5.1.1.tgz#96dfb02a8025794960863b9e8e365e3b6be5518d" + integrity sha512-1ps8ZAYu2Husx+Vz8D+MvXwEwvMwFv+hqqUwhNlDN5ybg6A+3xyW1ECrAgywhvXapNfXiz79jJyU0x22z0FFTg== dependencies: "@babel/helper-module-imports" "^7.0.0" "@babel/traverse" "^7.4.5" @@ -11574,10 +11359,10 @@ styled-components@5.1.0: shallowequal "^1.1.0" supports-color "^5.5.0" -styled-jsx@3.2.5: - version "3.2.5" - resolved "https://registry.yarnpkg.com/styled-jsx/-/styled-jsx-3.2.5.tgz#0172a3e13a0d6d8bf09167dcaf32cf7102d932ca" - integrity sha512-prEahkYwQHomUljJzXzrFnBmQrSMtWOBbXn8QeEkpfFkqMZQGshxzzp4H8ebBIsbVlHF/3+GSXMnmK/fp7qVYQ== +styled-jsx@3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/styled-jsx/-/styled-jsx-3.3.0.tgz#32335c1a3ecfc923ba4f9c056eeb3d4699006b09" + integrity sha512-sh8BI5eGKyJlwL4kNXHjb27/a/GJV8wP4ElRIkRXrGW3sHKOsY9Pa1VZRNxyvf3+lisdPwizD9JDkzVO9uGwZw== dependencies: "@babel/types" "7.8.3" babel-plugin-syntax-jsx "6.18.0" @@ -11659,17 +11444,17 @@ svgo@^1.0.0: unquote "~1.1.1" util.promisify "~1.0.0" -swr@0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/swr/-/swr-0.2.0.tgz#2088271bc0a8044089cf2badde013648f98783ee" - integrity sha512-8IZCdM0deUPhDiqOmyaj0BsnNjav1fu83nD0d07PEAzOHOn+lxcJOxwXeDBShwF6qCeZ8u8ab+a2yXkjD8yT3A== +swr@0.2.2: + version "0.2.2" + resolved "https://registry.yarnpkg.com/swr/-/swr-0.2.2.tgz#6e1b3e5c0e545c4fdb36ae3aa38cd94d0f9a88b7" + integrity sha512-D/z+PTUchZhoUA0tNC8TNJivf7Hc61WPxbUdXPi+VxRloddWYNP1ZicaEgyAph42ZnKl1L7twcZr4q6d0UMXcg== dependencies: fast-deep-equal "2.0.1" systeminformation@^4.23.3: - version "4.26.1" - resolved "https://registry.yarnpkg.com/systeminformation/-/systeminformation-4.26.1.tgz#040aa4f5d024e7e52daea3cbb977c08ff8b06d3d" - integrity sha512-1C4qjucOXYbVNOTnGaujL1bcKyVoEnNv9BTAwDyYaGhNe9mnJrxV1erBKvRxnlp1JBn+jPICeTAQ/TzlEJ5lsg== + version "4.26.4" + resolved "https://registry.yarnpkg.com/systeminformation/-/systeminformation-4.26.4.tgz#ec855d64f3e28622788a96c7dcabb6b051def50a" + integrity sha512-4+AYe0SfjdQPHEFL0nAyFMWyBUe8c5DZdSHApeJrdAvem5yoE/eS7/dGChnKLAkr+AKAoKcnqucPv302jy2+aA== table@^5.2.3: version "5.4.6" @@ -11748,19 +11533,19 @@ terser-webpack-plugin@^1.4.3: webpack-sources "^1.4.0" worker-farm "^1.7.0" -terser@4.6.8: - version "4.6.8" - resolved "https://registry.yarnpkg.com/terser/-/terser-4.6.8.tgz#62ccb14a52f102418ad6061dfef45076f13e5fa2" - integrity sha512-drV7ga6ZlIpBtitvb87Uk7P7gAJkCt3j/TqZr9wwF4Dlt0MBn52ANIAyuvP1F605WdPY4w6vT63u6KTWqaXFRQ== +terser@4.6.13: + version "4.6.13" + resolved "https://registry.yarnpkg.com/terser/-/terser-4.6.13.tgz#e879a7364a5e0db52ba4891ecde007422c56a916" + integrity sha512-wMvqukYgVpQlymbnNbabVZbtM6PN63AzqexpwJL8tbh/mRT9LE5o+ruVduAGL7D6Fpjl+Q+06U5I9Ul82odAhw== dependencies: commander "^2.20.0" source-map "~0.6.1" source-map-support "~0.5.12" terser@^4.1.2: - version "4.6.13" - resolved "https://registry.yarnpkg.com/terser/-/terser-4.6.13.tgz#e879a7364a5e0db52ba4891ecde007422c56a916" - integrity sha512-wMvqukYgVpQlymbnNbabVZbtM6PN63AzqexpwJL8tbh/mRT9LE5o+ruVduAGL7D6Fpjl+Q+06U5I9Ul82odAhw== + version "4.7.0" + resolved "https://registry.yarnpkg.com/terser/-/terser-4.7.0.tgz#15852cf1a08e3256a80428e865a2fa893ffba006" + integrity sha512-Lfb0RiZcjRDXCC3OSHJpEkxJ9Qeqs6mp2v4jf2MHfy8vGERmVDuvjXdd/EnP5Deme5F2yBRBymKmKHCBg2echw== dependencies: commander "^2.20.0" source-map "~0.6.1" @@ -11933,10 +11718,10 @@ trim-off-newlines@^1.0.0: resolved "https://registry.yarnpkg.com/trim-off-newlines/-/trim-off-newlines-1.0.1.tgz#9f9ba9d9efa8764c387698bcbfeb2c848f11adb3" integrity sha1-n5up2e+odkw4dpi8v+sshI8RrbM= -ts-loader@7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/ts-loader/-/ts-loader-7.0.3.tgz#1ba06fd3dae612ecf8b952f89145f9ac7489805f" - integrity sha512-BXAHfPjm3J//20ibuI30M+xgLpdIng68p2H952QqbbmDk7SW72HV42k9Gop7rMxuHvrXWjazWhKuyr9D9kKe3A== +ts-loader@7.0.5: + version "7.0.5" + resolved "https://registry.yarnpkg.com/ts-loader/-/ts-loader-7.0.5.tgz#789338fb01cb5dc0a33c54e50558b34a73c9c4c5" + integrity sha512-zXypEIT6k3oTc+OZNx/cqElrsbBtYqDknf48OZos0NQ3RTt045fBIU8RRSu+suObBzYB355aIPGOe/3kj9h7Ig== dependencies: chalk "^2.3.0" enhanced-resolve "^4.0.0" @@ -11944,10 +11729,10 @@ ts-loader@7.0.3: micromatch "^4.0.0" semver "^6.0.0" -ts-node@8.10.1: - version "8.10.1" - resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-8.10.1.tgz#77da0366ff8afbe733596361d2df9a60fc9c9bd3" - integrity sha512-bdNz1L4ekHiJul6SHtZWs1ujEKERJnHs4HxN7rjTyyVOFf3HaJ6sLqe6aPG62XTzAB/63pKRh5jTSWL0D7bsvw== +ts-node@8.10.2: + version "8.10.2" + resolved "https://registry.yarnpkg.com/ts-node/-/ts-node-8.10.2.tgz#eee03764633b1234ddd37f8db9ec10b75ec7fb8d" + integrity sha512-ISJJGgkIpDdBhWVu3jufsWpK3Rzo7bdiIXJjQc0ynKxVOVcg2oIrf2H2cejminGrptVc6q6/uynAHNCuWGbpVA== dependencies: arg "^4.1.0" diff "^4.0.1" @@ -11955,7 +11740,7 @@ ts-node@8.10.1: source-map-support "^0.5.17" yn "3.1.1" -ts-pnp@^1.1.2: +ts-pnp@^1.1.6: version "1.2.0" resolved "https://registry.yarnpkg.com/ts-pnp/-/ts-pnp-1.2.0.tgz#a500ad084b0798f1c3071af391e65912c86bca92" integrity sha512-csd+vJOb/gkzvcCHgTGSChYpy5f1/XKNsmvBGO4JXS+z1v2HobugDz4s1IeFXM3wZB44uczs+eazB5Q/ccdhQw== @@ -11999,6 +11784,13 @@ tweetnacl@^0.14.3, tweetnacl@~0.14.0: resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" integrity sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q= +type-check@^0.4.0, type-check@~0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1" + integrity sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew== + dependencies: + prelude-ls "^1.2.1" + type-check@~0.3.2: version "0.3.2" resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" @@ -12026,6 +11818,11 @@ type-fest@^0.6.0: resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.6.0.tgz#8d2a2370d3df886eb5c90ada1c5bf6188acf838b" integrity sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg== +type-fest@^0.7.1: + version "0.7.1" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.7.1.tgz#8dda65feaf03ed78f0a3f9678f1869147f7c5c48" + integrity sha512-Ne2YiiGN8bmrmJJEuTWTLJR32nh/JdL1+PSicowtNb0WFpn59GK8/lfD61bVtzguz7b3PBt74nxpv/Pw5po5Rg== + type-fest@^0.8.1: version "0.8.1" resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.8.1.tgz#09e249ebde851d3b1e48d27c105444667f17b83d" @@ -12061,10 +11858,10 @@ typedarray@^0.0.6: resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" integrity sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c= -typescript@3.8.3: - version "3.8.3" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-3.8.3.tgz#409eb8544ea0335711205869ec458ab109ee1061" - integrity sha512-MYlEfn5VrLNsgudQTVJeNaQFUAI7DkhnOjdpAp4T+ku1TfQClewlbSuTVHiA+8skNBgaf02TL/kLOvig4y3G8w== +typescript@3.9.3: + version "3.9.3" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-3.9.3.tgz#d3ac8883a97c26139e42df5e93eeece33d610b8a" + integrity sha512-D/wqnB2xzNFIcoBG9FG8cXRDjiqSTbG2wd8DMZeQyJlP1vfTkIxH4GKveWaEBYySKIg+USu+E+EDIR47SqnaMQ== uglify-js@^3.1.4: version "3.9.3" @@ -12255,10 +12052,12 @@ url@0.11.0, url@^0.11.0: punycode "1.3.2" querystring "0.2.0" -use-subscription@1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/use-subscription/-/use-subscription-1.1.1.tgz#5509363e9bb152c4fb334151d4dceb943beaa7bb" - integrity sha512-gk4fPTYvNhs6Ia7u8/+K7bM7sZ7O7AMfWtS+zPO8luH+zWuiGgGcrW0hL4MRWZSzXo+4ofNorf87wZwBKz2YdQ== +use-subscription@1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/use-subscription/-/use-subscription-1.4.1.tgz#edcbcc220f1adb2dd4fa0b2f61b6cc308e620069" + integrity sha512-7+IIwDG/4JICrWHL/Q/ZPK5yozEnvRm6vHImu0LKwQlmWGKeiF7mbAenLlK/cTNXrTtXHU/SFASQHzB6+oSJMQ== + dependencies: + object-assign "^4.1.1" use@^3.1.0: version "3.1.1" @@ -12384,6 +12183,13 @@ wasm-pack@0.9.1: dependencies: binary-install "0.0.1" +watchpack-chokidar2@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/watchpack-chokidar2/-/watchpack-chokidar2-2.0.0.tgz#9948a1866cbbd6cb824dea13a7ed691f6c8ddff0" + integrity sha512-9TyfOyN/zLUbA288wZ8IsMZ+6cbzvsNyEzSBp6e/zkifi6xxbl8SmQ/CxQq32k8NNqrdVEVUVSEf56L4rQ/ZxA== + dependencies: + chokidar "^2.1.8" + watchpack@2.0.0-beta.13: version "2.0.0-beta.13" resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.0.0-beta.13.tgz#9d9b0c094b8402139333e04eb6194643c8384f55" @@ -12393,13 +12199,15 @@ watchpack@2.0.0-beta.13: graceful-fs "^4.1.2" watchpack@^1.6.1: - version "1.6.1" - resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-1.6.1.tgz#280da0a8718592174010c078c7585a74cd8cd0e2" - integrity sha512-+IF9hfUFOrYOOaKyfaI7h7dquUIOgyEMoQMLA7OP5FxegKA2+XdXThAZ9TU2kucfhDH7rfMHs1oPYziVGWRnZA== + version "1.7.2" + resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-1.7.2.tgz#c02e4d4d49913c3e7e122c3325365af9d331e9aa" + integrity sha512-ymVbbQP40MFTp+cNMvpyBpBtygHnPzPkHqoIwRRj/0B8KhqQwV8LaKjtbaxF2lK4vl8zN9wCxS46IFCU5K4W0g== dependencies: - chokidar "^2.1.8" graceful-fs "^4.1.2" neo-async "^2.5.0" + optionalDependencies: + chokidar "^3.4.0" + watchpack-chokidar2 "^2.0.0" wcwidth@^1.0.0, wcwidth@^1.0.1: version "1.0.1" @@ -12408,6 +12216,11 @@ wcwidth@^1.0.0, wcwidth@^1.0.1: dependencies: defaults "^1.0.3" +web-vitals@0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/web-vitals/-/web-vitals-0.2.1.tgz#60782fa690243fe35613759a0c26431f57ba7b2d" + integrity sha512-2pdRlp6gJpOCg0oMMqwFF0axjk5D9WInc09RSYtqFgPXQ15+YKNQ7YnBBEqAL5jvmfH9WvoXDMb8DHwux7pIew== + webidl-conversions@^4.0.2: version "4.0.2" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.2.tgz#a855980b1f0b6b359ba1d5d9fb39ae941faa63ad" @@ -12545,7 +12358,7 @@ windows-release@^3.1.0: dependencies: execa "^1.0.0" -word-wrap@~1.2.3: +word-wrap@^1.2.3, word-wrap@~1.2.3: version "1.2.3" resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.3.tgz#610636f6b1f703891bd34771ccb17fb93b47079c" integrity sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ== @@ -12712,11 +12525,9 @@ yallist@^4.0.0: integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== yaml@^1.7.2: - version "1.9.2" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.9.2.tgz#f0cfa865f003ab707663e4f04b3956957ea564ed" - integrity sha512-HPT7cGGI0DuRcsO51qC1j9O16Dh1mZ2bnXwsi0jrSpsLz0WxOLSLXfkABVl6bZO629py3CU+OMJtpNHDLB97kg== - dependencies: - "@babel/runtime" "^7.9.2" + version "1.10.0" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.0.tgz#3b593add944876077d4d683fee01081bd9fff31e" + integrity sha512-yr2icI4glYaNG+KWONODapy2/jDdMSDnrONSjblABjD9B4Z5LgiircSt8m8sRZFNi08kG9Sm0uSHtEmP3zaEGg== yamljs@0.3.0: version "0.3.0" @@ -12836,12 +12647,7 @@ yn@3.1.1: resolved "https://registry.yarnpkg.com/yn/-/yn-3.1.1.tgz#1e87401a09d767c1d5eab26a6e4c185182d2eb50" integrity sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q== -zrender@4.3.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/zrender/-/zrender-4.3.0.tgz#9f056121b20bbae44414d287bf6a119ff7042661" - integrity sha512-Dii6j2bDsPkxQayuVf2DXJeruIB/mKVxxcGRZQ9GExiBd4c3w7+oBuvo1O/JGHeFeA1nCmSDVDs/S7yKZG1nrA== - -zrender@^4.0.4: +zrender@4.3.1, zrender@^4.0.4: version "4.3.1" resolved "https://registry.yarnpkg.com/zrender/-/zrender-4.3.1.tgz#baf8aa6dc8187a2f819692d7d5f9bedfa2b90fa3" integrity sha512-CeH2TpJeCdG0TAGYoPSAcFX2ogdug1K7LIn9UO/q9HWqQ54gWhrMAlDP9AwWYMUDhrPe4VeazQ4DW3msD96nUQ== diff --git a/visualdl/server/__init__.py b/visualdl/server/__init__.py index 65a494a1..09b6fc7d 100644 --- a/visualdl/server/__init__.py +++ b/visualdl/server/__init__.py @@ -14,6 +14,6 @@ # ======================================================================= from __future__ import absolute_import -from . import log +from . import (log, app, api) -__all__ = ['log'] +__all__ = ['log', 'app', 'api'] diff --git a/visualdl/server/api.py b/visualdl/server/api.py new file mode 100644 index 00000000..355ddbed --- /dev/null +++ b/visualdl/server/api.py @@ -0,0 +1,165 @@ +#!/user/bin/env python + +# Copyright (c) 2017 VisualDL Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ======================================================================= + +import functools +import json +import os + +from visualdl.reader.reader import LogReader +from visualdl.server import lib +from visualdl.server.log import logger +from visualdl.python.cache import MemCache + + +error_retry_times = 3 +error_sleep_time = 2 # seconds + + +def gen_result(data=None, status=0, msg=''): + return { + 'status': status, + 'msg': msg, + 'data': data + } + + +def result(mimetype='application/json'): + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + data = func(*args, **kwargs) + if mimetype == 'application/json': + data = json.dumps(gen_result(data)) + return data, mimetype + return wrapper + return decorator + + +def try_call(function, *args, **kwargs): + res = lib.retry(error_retry_times, function, error_sleep_time, *args, **kwargs) + if not res: + logger.error("Internal server error. Retry later.") + return res + + +class Api(object): + def __init__(self, logdir, cache_timeout): + self._reader = LogReader(logdir) + + # use a memory cache to reduce disk reading frequency. + cache = MemCache(timeout=cache_timeout) + self._cache = lib.cache_get(cache) + + def _get(self, key, func, *args, **kwargs): + return self._cache(key, func, self._reader, *args, **kwargs) + + def _get_with_retry(self, key, func, *args, **kwargs): + return self._cache(key, try_call, func, self._reader, *args, **kwargs) + + @result() + def components(self): + return self._get('data/components', lib.get_components) + + @result() + def runs(self): + return self._get('data/runs', lib.get_runs) + + @result() + def tags(self): + return self._get('data/tags', lib.get_tags) + + @result() + def logs(self): + return self._get('data/logs', lib.get_logs) + + @result() + def scalars_tags(self): + return self._get_with_retry('data/plugin/scalars/tags', lib.get_scalar_tags) + + @result() + def images_tags(self): + return self._get_with_retry('data/plugin/images/tags', lib.get_image_tags) + + @result() + def audio_tags(self): + return self._get_with_retry('data/plugin/audio/tags', lib.get_audio_tags) + + @result() + def embeddings_tags(self): + return self._get_with_retry('data/plugin/embeddings/tags', lib.get_embeddings_tags) + + @result() + def scalars_list(self, run, tag): + key = os.path.join('data/plugin/scalars/scalars', run, tag) + return self._get_with_retry(key, lib.get_scalar, run, tag) + + @result() + def images_list(self, mode, tag): + key = os.path.join('data/plugin/images/images', mode, tag) + return self._get_with_retry(key, lib.get_image_tag_steps, mode, tag) + + @result('image/png') + def images_image(self, mode, tag, index=0): + index = int(index) + key = os.path.join('data/plugin/images/individualImage', mode, tag, str(index)) + return self._get_with_retry(key, lib.get_individual_image, mode, tag, index) + + @result() + def audio_list(self, run, tag): + key = os.path.join('data/plugin/audio/audio', run, tag) + return self._get_with_retry(key, lib.get_audio_tag_steps, run, tag) + + def audio_audio(self, run, tag, index=0): + index = int(index) + key = os.path.join('data/plugin/audio/individualAudio', run, tag, str(index)) + return self._get_with_retry(key, lib.get_individual_audio, run, tag, index) + + @result() + def embeddings_embedding(self, run, tag='default', reduction='pca', dimension=2): + dimension = int(dimension) + key = os.path.join('data/plugin/embeddings/embeddings', run, str(dimension), reduction) + return self._get_with_retry(key, lib.get_embeddings, run, tag, reduction, dimension) + + +def create_api_call(logdir, cache_timeout): + api = Api(logdir, cache_timeout) + routes = { + 'components': (api.components, []), + 'runs': (api.runs, []), + 'tags': (api.tags, []), + 'logs': (api.logs, []), + 'scalars/tags': (api.scalars_tags, []), + 'images/tags': (api.images_tags, []), + 'audio/tags': (api.audio_tags, []), + 'embeddings/tags': (api.embeddings_tags, []), + 'scalars/list': (api.scalars_list, ['run', 'tag']), + 'images/list': (api.images_list, ['run', 'tag']), + 'images/image': (api.images_image, ['run', 'tag', 'index']), + 'audio/list': (api.audio_list, ['run', 'tag']), + 'audio/audio': (api.audio_audio, ['run', 'tag', 'index']), + 'embeddings/embedding': (api.embeddings_embedding, ['run', 'tag', 'reduction', 'dimension']) + } + + def call(path: str, args): + route = routes.get(path) + if not route: + return gen_result(status=1, msg='api not found') + method, call_arg_names = route + call_args = [args.get(name) for name in call_arg_names] + return method(*call_args) + + return call diff --git a/visualdl/server/app.py b/visualdl/server/app.py index e282a97d..a3661012 100644 --- a/visualdl/server/app.py +++ b/visualdl/server/app.py @@ -15,7 +15,6 @@ # limitations under the License. # ======================================================================= -import json import os import time import sys @@ -24,22 +23,17 @@ import threading import re import webbrowser import requests -from visualdl.reader.reader import LogReader from visualdl.utils import update_util -from flask import (Flask, Response, redirect, request, send_file) +from flask import (Flask, Response, redirect, request, send_file, make_response) from flask_babel import Babel import visualdl.server -from visualdl.server import lib +from visualdl.server.api import create_api_call from visualdl.server.args import (ParseArgs, parse_args) from visualdl.server.log import logger from visualdl.server.template import Template -from visualdl.python.cache import MemCache - -error_retry_times = 3 -error_sleep_time = 2 # seconds SERVER_DIR = os.path.join(visualdl.ROOT, 'server') @@ -51,29 +45,6 @@ template_file_path = os.path.join(SERVER_DIR, "./dist") mock_data_path = os.path.join(SERVER_DIR, "./mock_data/") -def try_call(function, *args, **kwargs): - res = lib.retry(error_retry_times, function, error_sleep_time, *args, - **kwargs) - if not res: - logger.error("Internal server error. Retry later.") - return res - - -# status, msg, data -def gen_result(status, msg, data): - """ - :param status: - :param msg: - :param data: - :return: - """ - result = dict() - result['status'] = status - result['msg'] = msg - result['data'] = data - return result - - def create_app(args): app = Flask('visualdl', static_folder=None) # set static expires in a short time to reduce browser's memory usage. @@ -81,11 +52,8 @@ def create_app(args): app.config['BABEL_DEFAULT_LOCALE'] = default_language babel = Babel(app) - log_reader = LogReader(args.logdir) + api_call = create_api_call(args.logdir, args.cache_timeout) - # use a memory cache to reduce disk reading frequency. - CACHE = MemCache(timeout=args.cache_timeout) - cache_get = lib.cache_get(CACHE) update_util.PbUpdater().start() public_path = args.public_path @@ -105,16 +73,16 @@ def create_app(args): template = Template(os.path.join(server_path, template_file_path), PUBLIC_PATH=public_path.lstrip('/')) - @app.route("/") + @app.route('/') def base(): return redirect(public_path, code=302) - @app.route("/favicon.ico") + @app.route('/favicon.ico') def favicon(): icon = os.path.join(template_file_path, 'favicon.ico') if os.path.exists(icon): return send_file(icon) - return "file not found", 404 + return 'file not found', 404 @app.route(public_path + '/') def index(): @@ -127,130 +95,10 @@ def create_app(args): def serve_static(filename): return template.render(filename if re.search(r'\..+$', filename) else filename + '.html') - @app.route(api_path + "/components") - def components(): - data = cache_get('/data/components', lib.get_components, log_reader) - result = gen_result(0, "", data) - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + '/runs') - def runs(): - data = cache_get('/data/runs', lib.get_runs, log_reader) - result = gen_result(0, "", data) - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + '/tags') - def tags(): - data = cache_get('/data/tags', lib.get_tags, log_reader) - result = gen_result(0, "", data) - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + '/logs') - def logs(): - data = cache_get('/data/logs', lib.get_logs, log_reader) - result = gen_result(0, "", data) - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + "/scalars/tags") - def scalar_tags(): - data = cache_get("/data/plugin/scalars/tags", try_call, - lib.get_scalar_tags, log_reader) - result = gen_result(0, "", data) - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + "/images/tags") - def image_tags(): - data = cache_get("/data/plugin/images/tags", try_call, - lib.get_image_tags, log_reader) - result = gen_result(0, "", data) - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + "/audio/tags") - def audio_tags(): - data = cache_get("/data/plugin/audio/tags", try_call, - lib.get_audio_tags, log_reader) - result = gen_result(0, "", data) - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + "/embeddings/tags") - def embeddings_tags(): - data = cache_get("/data/plugin/embeddings/tags", try_call, - lib.get_embeddings_tags, log_reader) - result = gen_result(0, "", data) - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + '/scalars/list') - def scalars(): - run = request.args.get('run') - tag = request.args.get('tag') - key = os.path.join('/data/plugin/scalars/scalars', run, tag) - data = cache_get(key, try_call, lib.get_scalar, log_reader, run, tag) - result = gen_result(0, "", data) - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + '/images/list') - def images(): - mode = request.args.get('run') - tag = request.args.get('tag') - key = os.path.join('/data/plugin/images/images', mode, tag) - - data = cache_get(key, try_call, lib.get_image_tag_steps, log_reader, - mode, tag) - result = gen_result(0, "", data) - - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + '/images/image') - def individual_image(): - mode = request.args.get('run') - tag = request.args.get('tag') # include a index - step_index = int(request.args.get('index')) # index of step - - key = os.path.join('/data/plugin/images/individualImage', mode, tag, - str(step_index)) - data = cache_get(key, try_call, lib.get_individual_image, log_reader, - mode, tag, step_index) - return Response(data, mimetype="image/png") - - @app.route(api_path + '/embeddings/embedding') - def embeddings(): - run = request.args.get('run') - tag = request.args.get('tag', 'default') - dimension = request.args.get('dimension') - reduction = request.args.get('reduction') - key = os.path.join('/data/plugin/embeddings/embeddings', run, - dimension, reduction) - data = cache_get(key, try_call, lib.get_embeddings, log_reader, run, - tag, reduction, int(dimension)) - result = gen_result(0, "", data) - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + '/audio/list') - def audio(): - run = request.args.get('run') - tag = request.args.get('tag') - key = os.path.join('/data/plugin/audio/audio', run, tag) - - data = cache_get(key, try_call, lib.get_audio_tag_steps, log_reader, - run, tag) - result = gen_result(0, "", data) - - return Response(json.dumps(result), mimetype='application/json') - - @app.route(api_path + '/audio/audio') - def individual_audio(): - run = request.args.get('run') - tag = request.args.get('tag') # include a index - step_index = int(request.args.get('index')) # index of step - - key = os.path.join('/data/plugin/audio/individualAudio', run, tag, - str(step_index)) - data = cache_get(key, try_call, lib.get_individual_audio, log_reader, - run, tag, step_index) - response = send_file( - data, as_attachment=True, attachment_filename='audio.wav') - return response - + @app.route(api_path + '/') + def serve_api(method): + data, mimetype = api_call(method, request.args) + return make_response(Response(data, mimetype=mimetype)) return app @@ -267,13 +115,13 @@ def _open_browser(app, index_url): def _run(**kwargs): args = ParseArgs(**kwargs) - logger.info(" port=" + str(args.port)) + logger.info(' port=' + str(args.port)) app = create_app(args) if not args.api_only: - index_url = "http://" + args.host + ":" + str(args.port) + args.public_path + index_url = 'http://' + args.host + ':' + str(args.port) + args.public_path if kwargs.get('open_browser', False): threading.Thread( - target=_open_browser, kwargs={"app": app, "index_url": index_url}).start() + target=_open_browser, kwargs={'app': app, 'index_url': index_url}).start() app.run(debug=False, host=args.host, port=args.port, threaded=False) @@ -289,10 +137,10 @@ def run(logdir=None, **options): def main(): args = parse_args() - logger.info(" port=" + str(args.port)) + logger.info(' port=' + str(args.port)) app = create_app(args) app.run(debug=False, host=args.host, port=args.port, threaded=False) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/visualdl/server/visualDL.bat b/visualdl/server/visualDL.bat deleted file mode 100644 index 5510460b..00000000 --- a/visualdl/server/visualDL.bat +++ /dev/null @@ -1,3 +0,0 @@ -@ECHO OFF -setlocal DISABLEDELAYEDEXPANSION -python %~dp0\visualDL %* -- GitLab